From 269cbea0f19bc4440736793b360b9327ac83e7f5 Mon Sep 17 00:00:00 2001 From: Alex Le Date: Tue, 6 Dec 2022 15:26:38 -0800 Subject: [PATCH 001/133] Partitioning compaction for Cortex Signed-off-by: Alex Le --- pkg/compactor/block_visit_marker.go | 136 ++- pkg/compactor/block_visit_marker_test.go | 70 +- pkg/compactor/blocks_cleaner.go | 159 ++- pkg/compactor/blocks_cleaner_test.go | 10 +- pkg/compactor/compactor.go | 143 ++- pkg/compactor/compactor_test.go | 342 +++++-- .../partition_compaction_complete_checker.go | 67 ++ ...tition_compaction_complete_checker_test.go | 379 +++++++ pkg/compactor/partitioned_group_info.go | 143 +++ pkg/compactor/partitioned_group_info_test.go | 159 +++ pkg/compactor/shuffle_sharding_grouper.go | 438 ++++++-- .../shuffle_sharding_grouper_test.go | 942 ++++++++++++++++-- pkg/compactor/shuffle_sharding_planner.go | 22 +- .../shuffle_sharding_planner_test.go | 116 ++- pkg/storage/tsdb/bucketindex/index.go | 8 + .../prometheus/prometheus/storage/merge.go | 21 +- .../prometheus/prometheus/tsdb/compact.go | 34 +- .../prometheus/tsdb/index/postings.go | 51 + .../thanos-io/thanos/pkg/block/block.go | 10 + .../thanos-io/thanos/pkg/block/fetcher.go | 4 + .../thanos/pkg/block/partition_info.go | 79 ++ .../thanos-io/thanos/pkg/compact/compact.go | 138 ++- .../thanos-io/thanos/pkg/compact/planner.go | 8 + 23 files changed, 3050 insertions(+), 429 deletions(-) create mode 100644 pkg/compactor/partition_compaction_complete_checker.go create mode 100644 pkg/compactor/partition_compaction_complete_checker_test.go create mode 100644 pkg/compactor/partitioned_group_info.go create mode 100644 pkg/compactor/partitioned_group_info_test.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go diff --git a/pkg/compactor/block_visit_marker.go b/pkg/compactor/block_visit_marker.go index 2e43dca6c08..9ccf9515749 100644 --- a/pkg/compactor/block_visit_marker.go +++ b/pkg/compactor/block_visit_marker.go @@ -21,8 +21,10 @@ import ( ) const ( - // BlockVisitMarkerFile is the known json filename for representing the most recent compactor visit. - BlockVisitMarkerFile = "visit-mark.json" + // BlockVisitMarkerFileSuffix is the known suffix of json filename for representing the most recent compactor visit. + BlockVisitMarkerFileSuffix = "-visit-mark.json" + // BlockVisitMarkerFilePrefix is the known prefix of json filename for representing the most recent compactor visit. + BlockVisitMarkerFilePrefix = "partition-" // VisitMarkerVersion1 is the current supported version of visit-mark file. VisitMarkerVersion1 = 1 ) @@ -32,24 +34,42 @@ var ( ErrorUnmarshalBlockVisitMarker = errors.New("unmarshal block visit marker JSON") ) +type VisitStatus string + +const ( + Pending VisitStatus = "pending" + Completed VisitStatus = "completed" +) + type BlockVisitMarker struct { - CompactorID string `json:"compactorID"` + CompactorID string `json:"compactorID"` + Status VisitStatus `json:"status"` + PartitionedGroupID uint32 `json:"partitionedGroupID"` + PartitionID int `json:"partitionID"` // VisitTime is a unix timestamp of when the block was visited (mark updated). VisitTime int64 `json:"visitTime"` // Version of the file. Version int `json:"version"` } -func (b *BlockVisitMarker) isVisited(blockVisitMarkerTimeout time.Duration) bool { - return time.Now().Before(time.Unix(b.VisitTime, 0).Add(blockVisitMarkerTimeout)) +func (b *BlockVisitMarker) isVisited(blockVisitMarkerTimeout time.Duration, partitionID int) bool { + return b.isCompleted() || partitionID == b.PartitionID && time.Now().Before(time.Unix(b.VisitTime, 0).Add(blockVisitMarkerTimeout)) +} + +func (b *BlockVisitMarker) isVisitedByCompactor(blockVisitMarkerTimeout time.Duration, partitionID int, compactorID string) bool { + return b.CompactorID == compactorID && b.isVisited(blockVisitMarkerTimeout, partitionID) } -func (b *BlockVisitMarker) isVisitedByCompactor(blockVisitMarkerTimeout time.Duration, compactorID string) bool { - return b.CompactorID == compactorID && time.Now().Before(time.Unix(b.VisitTime, 0).Add(blockVisitMarkerTimeout)) +func (b *BlockVisitMarker) isCompleted() bool { + return b.Status == Completed } -func ReadBlockVisitMarker(ctx context.Context, bkt objstore.InstrumentedBucketReader, logger log.Logger, blockID string, blockVisitMarkerReadFailed prometheus.Counter) (*BlockVisitMarker, error) { - visitMarkerFile := path.Join(blockID, BlockVisitMarkerFile) +func getBlockVisitMarkerFile(blockID string, partitionID int) string { + return path.Join(blockID, fmt.Sprintf("%s%d%s", BlockVisitMarkerFilePrefix, partitionID, BlockVisitMarkerFileSuffix)) +} + +func ReadBlockVisitMarker(ctx context.Context, bkt objstore.InstrumentedBucketReader, logger log.Logger, blockID string, partitionID int, blockVisitMarkerReadFailed prometheus.Counter) (*BlockVisitMarker, error) { + visitMarkerFile := getBlockVisitMarkerFile(blockID, partitionID) visitMarkerFileReader, err := bkt.ReaderWithExpectedErrs(bkt.IsObjNotFoundErr).Get(ctx, visitMarkerFile) if err != nil { if bkt.IsObjNotFoundErr(err) { @@ -75,8 +95,8 @@ func ReadBlockVisitMarker(ctx context.Context, bkt objstore.InstrumentedBucketRe return &blockVisitMarker, nil } -func UpdateBlockVisitMarker(ctx context.Context, bkt objstore.Bucket, blockID string, reader io.Reader, blockVisitMarkerWriteFailed prometheus.Counter) error { - blockVisitMarkerFilePath := path.Join(blockID, BlockVisitMarkerFile) +func UpdateBlockVisitMarker(ctx context.Context, bkt objstore.Bucket, blockID string, partitionID int, reader io.Reader, blockVisitMarkerWriteFailed prometheus.Counter) error { + blockVisitMarkerFilePath := getBlockVisitMarkerFile(blockID, partitionID) if err := bkt.Upload(ctx, blockVisitMarkerFilePath, reader); err != nil { blockVisitMarkerWriteFailed.Inc() return err @@ -84,6 +104,14 @@ func UpdateBlockVisitMarker(ctx context.Context, bkt objstore.Bucket, blockID st return nil } +func generateBlocksInfo(blocks []*metadata.Meta) string { + var blockIds []string + for _, block := range blocks { + blockIds = append(blockIds, block.ULID.String()) + } + return strings.Join(blockIds, ",") +} + func markBlocksVisited( ctx context.Context, bkt objstore.Bucket, @@ -100,38 +128,96 @@ func markBlocksVisited( reader := bytes.NewReader(visitMarkerFileContent) for _, block := range blocks { blockID := block.ULID.String() - if err := UpdateBlockVisitMarker(ctx, bkt, blockID, reader, blockVisitMarkerWriteFailed); err != nil { - level.Error(logger).Log("msg", "unable to upsert visit marker file content for block", "blockID", blockID, "err", err) + if err := UpdateBlockVisitMarker(ctx, bkt, blockID, marker.PartitionID, reader, blockVisitMarkerWriteFailed); err != nil { + level.Error(logger).Log("msg", "unable to upsert visit marker file content for block", "partition_id", marker.PartitionID, "block_id", blockID, "err", err) } reader.Reset(visitMarkerFileContent) } + level.Debug(logger).Log("msg", "marked block visited", "partition_id", marker.PartitionID, "blocks", generateBlocksInfo(blocks)) } -func markBlocksVisitedHeartBeat(ctx context.Context, bkt objstore.Bucket, logger log.Logger, blocks []*metadata.Meta, compactorID string, blockVisitMarkerFileUpdateInterval time.Duration, blockVisitMarkerWriteFailed prometheus.Counter) { - var blockIds []string - for _, block := range blocks { - blockIds = append(blockIds, block.ULID.String()) - } - blocksInfo := strings.Join(blockIds, ",") - level.Info(logger).Log("msg", fmt.Sprintf("start heart beat for blocks: %s", blocksInfo)) +func markBlocksVisitedHeartBeat( + ctx context.Context, + bkt objstore.Bucket, + logger log.Logger, + blocks []*metadata.Meta, + partitionedGroupID uint32, + partitionID int, + compactorID string, + blockVisitMarkerFileUpdateInterval time.Duration, + blockVisitMarkerWriteFailed prometheus.Counter, + errChan chan error, +) { + blocksInfo := generateBlocksInfo(blocks) + level.Info(logger).Log("msg", "start visit marker heart beat", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "blocks", blocksInfo) ticker := time.NewTicker(blockVisitMarkerFileUpdateInterval) defer ticker.Stop() + isComplete := false heartBeat: for { - level.Debug(logger).Log("msg", fmt.Sprintf("heart beat for blocks: %s", blocksInfo)) + level.Debug(logger).Log("msg", "visit marker heart beat", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "blocks", blocksInfo) blockVisitMarker := BlockVisitMarker{ - VisitTime: time.Now().Unix(), - CompactorID: compactorID, - Version: VisitMarkerVersion1, + VisitTime: time.Now().Unix(), + CompactorID: compactorID, + Status: Pending, + PartitionedGroupID: partitionedGroupID, + PartitionID: partitionID, + Version: VisitMarkerVersion1, } markBlocksVisited(ctx, bkt, logger, blocks, blockVisitMarker, blockVisitMarkerWriteFailed) select { case <-ctx.Done(): + level.Warn(logger).Log("msg", "visit marker heart beat got cancelled", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "blocks", blocksInfo) break heartBeat case <-ticker.C: continue + case err := <-errChan: + isComplete = err == nil + if err != nil { + level.Warn(logger).Log("msg", "stop visit marker heart beat due to error", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "blocks", blocksInfo, "err", err) + } + break heartBeat + } + } + if isComplete { + level.Info(logger).Log("msg", "update visit marker to completed status", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "blocks", blocksInfo) + markBlocksVisitMarkerCompleted(context.Background(), bkt, logger, blocks, partitionedGroupID, partitionID, compactorID, blockVisitMarkerWriteFailed) + } + level.Info(logger).Log("msg", "stop visit marker heart beat", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "blocks", blocksInfo) +} + +func markBlocksVisitMarkerCompleted( + ctx context.Context, + bkt objstore.Bucket, + logger log.Logger, + blocks []*metadata.Meta, + partitionedGroupID uint32, + partitionID int, + compactorID string, + blockVisitMarkerWriteFailed prometheus.Counter, +) { + blockVisitMarker := BlockVisitMarker{ + VisitTime: time.Now().Unix(), + CompactorID: compactorID, + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: partitionID, + Version: VisitMarkerVersion1, + } + visitMarkerFileContent, err := json.Marshal(blockVisitMarker) + if err != nil { + blockVisitMarkerWriteFailed.Inc() + return + } + reader := bytes.NewReader(visitMarkerFileContent) + for _, block := range blocks { + blockID := block.ULID.String() + if err := UpdateBlockVisitMarker(ctx, bkt, blockID, blockVisitMarker.PartitionID, reader, blockVisitMarkerWriteFailed); err != nil { + level.Error(logger).Log("msg", "unable to upsert completed visit marker file content for block", "partitioned_group_id", blockVisitMarker.PartitionedGroupID, "partition_id", blockVisitMarker.PartitionID, "block_id", blockID, "err", err) + } else { + level.Info(logger).Log("msg", "block partition is completed", "partitioned_group_id", blockVisitMarker.PartitionedGroupID, "partition_id", blockVisitMarker.PartitionID, "block_id", blockID) } + reader.Reset(visitMarkerFileContent) } - level.Info(logger).Log("msg", fmt.Sprintf("stop heart beat for blocks: %s", blocksInfo)) } diff --git a/pkg/compactor/block_visit_marker_test.go b/pkg/compactor/block_visit_marker_test.go index 18b7c8e1b42..6e973aa36dc 100644 --- a/pkg/compactor/block_visit_marker_test.go +++ b/pkg/compactor/block_visit_marker_test.go @@ -2,6 +2,7 @@ package compactor import ( "context" + "fmt" "testing" "time" @@ -85,10 +86,77 @@ func TestMarkBlocksVisited(t *testing.T) { logger := log.NewNopLogger() markBlocksVisited(ctx, bkt, logger, tcase.blocks, tcase.visitMarker, dummyCounter) for _, meta := range tcase.blocks { - res, err := ReadBlockVisitMarker(ctx, objstore.WithNoopInstr(bkt), logger, meta.ULID.String(), dummyCounter) + res, err := ReadBlockVisitMarker(ctx, objstore.WithNoopInstr(bkt), logger, meta.ULID.String(), tcase.visitMarker.PartitionID, dummyCounter) require.NoError(t, err) require.Equal(t, tcase.visitMarker, *res) } }) } } + +func TestMarkBlockVisitedHeartBeat(t *testing.T) { + partitionedGroupID := uint32(12345) + partitionID := 0 + compactorID := "test-compactor" + for _, tcase := range []struct { + name string + isCancelled bool + compactionErr error + expectedStatus VisitStatus + }{ + { + name: "heart beat got cancelled", + isCancelled: true, + compactionErr: nil, + expectedStatus: Pending, + }, + { + name: "heart beat complete without error", + isCancelled: false, + compactionErr: nil, + expectedStatus: Completed, + }, + { + name: "heart beat stopped due to compaction error", + isCancelled: false, + compactionErr: fmt.Errorf("some compaction failure"), + expectedStatus: Pending, + }, + } { + t.Run(tcase.name, func(t *testing.T) { + ulid0 := ulid.MustNew(uint64(time.Now().UnixMilli()+0), nil) + ulid1 := ulid.MustNew(uint64(time.Now().UnixMilli()+1), nil) + blocks := []*metadata.Meta{ + { + BlockMeta: tsdb.BlockMeta{ + ULID: ulid0, + }, + }, + { + BlockMeta: tsdb.BlockMeta{ + ULID: ulid1, + }, + }, + } + ctx, cancel := context.WithCancel(context.Background()) + dummyCounter := prometheus.NewCounter(prometheus.CounterOpts{}) + bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + logger := log.NewNopLogger() + errChan := make(chan error, 1) + go markBlocksVisitedHeartBeat(ctx, objstore.WithNoopInstr(bkt), logger, blocks, partitionedGroupID, partitionID, compactorID, time.Second, dummyCounter, errChan) + time.Sleep(2 * time.Second) + if tcase.isCancelled { + cancel() + } else { + errChan <- tcase.compactionErr + defer cancel() + } + time.Sleep(2 * time.Second) + for _, meta := range blocks { + res, err := ReadBlockVisitMarker(ctx, objstore.WithNoopInstr(bkt), logger, meta.ULID.String(), partitionID, dummyCounter) + require.NoError(t, err) + require.Equal(t, tcase.expectedStatus, res.Status) + } + }) + } +} diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index d9cf7ff2353..3e429b67707 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -2,7 +2,10 @@ package compactor import ( "context" + "encoding/json" "fmt" + "io" + "path" "time" "github.com/go-kit/log" @@ -21,6 +24,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/runutil" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -57,9 +61,11 @@ type BlocksCleaner struct { tenantBlocksMarkedForNoCompaction *prometheus.GaugeVec tenantPartialBlocks *prometheus.GaugeVec tenantBucketIndexLastUpdate *prometheus.GaugeVec + compactorPartitionError *prometheus.CounterVec + partitionedGroupInfoReadFailed prometheus.Counter } -func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, usersScanner *cortex_tsdb.UsersScanner, cfgProvider ConfigProvider, logger log.Logger, reg prometheus.Registerer) *BlocksCleaner { +func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, usersScanner *cortex_tsdb.UsersScanner, cfgProvider ConfigProvider, logger log.Logger, reg prometheus.Registerer, partitionedGroupInfoReadFailed prometheus.Counter) *BlocksCleaner { c := &BlocksCleaner{ cfg: cfg, bucketClient: bucketClient, @@ -119,6 +125,12 @@ func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, use Name: "cortex_bucket_index_last_successful_update_timestamp_seconds", Help: "Timestamp of the last successful update of a tenant's bucket index.", }, []string{"user"}), + compactorPartitionError: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: compactorPartitionErrorCountName, + Help: compactorPartitionErrorCountHelp, + ConstLabels: prometheus.Labels{"reason": "parent-block-mismatch"}, + }, []string{"user"}), + partitionedGroupInfoReadFailed: partitionedGroupInfoReadFailed, } c.Service = services.NewTimerService(cfg.CleanupInterval, c.starting, c.ticker, nil) @@ -283,6 +295,13 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userID level.Info(userLogger).Log("msg", "deleted files under "+block.DebugMetas+" for tenant marked for deletion", "count", deleted) } + // Clean up partitioned group info files + if deleted, err := bucket.DeletePrefix(ctx, userBucket, PartitionedGroupDirectory, userLogger); err != nil { + return errors.Wrap(err, "failed to delete "+PartitionedGroupDirectory) + } else if deleted > 0 { + level.Info(userLogger).Log("msg", "deleted files under "+PartitionedGroupDirectory+" for tenant marked for deletion", "count", deleted) + } + // Tenant deletion mark file is inside Markers as well. if deleted, err := bucket.DeletePrefix(ctx, userBucket, bucketindex.MarkersPathname, userLogger); err != nil { return errors.Wrap(err, "failed to delete marker files") @@ -373,6 +392,8 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string, firstRun b return err } + c.cleanPartitionedGroupInfo(ctx, userBucket, userLogger, userID, idx) + c.tenantBlocks.WithLabelValues(userID).Set(float64(len(idx.Blocks))) c.tenantBlocksMarkedForDelete.WithLabelValues(userID).Set(float64(len(idx.BlockDeletionMarks))) c.tenantBlocksMarkedForNoCompaction.WithLabelValues(userID).Set(float64(totalBlocksBlocksMarkedForNoCompaction)) @@ -382,6 +403,125 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string, firstRun b return nil } +func (c *BlocksCleaner) findResultBlocksForPartitionedGroup(ctx context.Context, userBucket objstore.InstrumentedBucket, userLogger log.Logger, index *bucketindex.Index, partitionedGroupInfo *PartitionedGroupInfo) map[int]ulid.ULID { + partitionedGroupID := partitionedGroupInfo.PartitionedGroupID + deletionMarkMap := index.BlockDeletionMarks.GetULIDSet() + var possibleResultBlocks []ulid.ULID + for _, b := range index.Blocks { + if b.MinTime >= partitionedGroupInfo.RangeStart && b.MaxTime <= partitionedGroupInfo.RangeEnd { + if _, ok := deletionMarkMap[b.ID]; !ok { + level.Info(userLogger).Log("msg", "found possible result block", "partitioned_group_id", partitionedGroupID, "block", b.ID.String()) + possibleResultBlocks = append(possibleResultBlocks, b.ID) + } + } + } + + resultBlocks := make(map[int]ulid.ULID) + for _, b := range possibleResultBlocks { + reader, err := userBucket.Get(ctx, path.Join(b.String(), block.PartitionInfoFilename)) + if err != nil { + level.Info(userLogger).Log("msg", "unable to get partition info for block", "partitioned_group_id", partitionedGroupID, "block", b.String()) + continue + } + partitionInfo, err := block.ReadPartitionInfo(reader) + if err != nil { + level.Info(userLogger).Log("msg", "unable to parse partition info for block", "partitioned_group_id", partitionedGroupID, "block", b.String()) + continue + } + if partitionInfo.PartitionedGroupID == partitionedGroupID { + level.Info(userLogger).Log("msg", "found result block", "partitioned_group_id", partitionedGroupID, "partition_id", partitionInfo.PartitionID, "block", b.String()) + resultBlocks[partitionInfo.PartitionID] = b + } + level.Info(userLogger).Log("msg", fmt.Sprintf("block does not belong to this partitioned group: %d", partitionedGroupID), "partitioned_group_id", partitionInfo.PartitionedGroupID, "partition_id", partitionInfo.PartitionID, "block", b.String()) + } + + return resultBlocks +} + +func (c *BlocksCleaner) validatePartitionedResultBlock(ctx context.Context, userBucket objstore.InstrumentedBucket, userLogger log.Logger, userID string, resultBlock ulid.ULID, partition Partition, partitionedGroupID uint32) error { + meta, err := readMeta(ctx, userBucket, userLogger, resultBlock) + if err != nil { + level.Warn(userLogger).Log("msg", "unable to read meta of result block", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String()) + return err + } + expectedSourceBlocks := partition.getBlocksSet() + if len(expectedSourceBlocks) != len(meta.Compaction.Parents) { + c.compactorPartitionError.WithLabelValues(userID).Inc() + level.Warn(userLogger).Log("msg", "result block has different number of parent blocks as partitioned group info", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String()) + return fmt.Errorf("result block %s has different number of parent blocks as partitioned group info with partitioned group id %d, partition id %d", resultBlock.String(), partitionedGroupID, partition.PartitionID) + } + for _, parentBlock := range meta.Compaction.Parents { + if _, ok := expectedSourceBlocks[parentBlock.ULID]; !ok { + c.compactorPartitionError.WithLabelValues(userID).Inc() + level.Warn(userLogger).Log("msg", "parent blocks of result block does not match partitioned group info", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String()) + return fmt.Errorf("parent blocks of result block %s does not match partitioned group info with partitioned group id %d, partition id %d", resultBlock.String(), partitionedGroupID, partition.PartitionID) + } + } + return nil +} + +func (c *BlocksCleaner) cleanPartitionedGroupInfo(ctx context.Context, userBucket objstore.InstrumentedBucket, userLogger log.Logger, userID string, index *bucketindex.Index) { + var deletePartitionedGroupInfo []string + userBucket.Iter(ctx, PartitionedGroupDirectory, func(file string) error { + partitionedGroupInfo, err := ReadPartitionedGroupInfoFile(ctx, userBucket, userLogger, file, c.partitionedGroupInfoReadFailed) + if err != nil { + level.Warn(userLogger).Log("msg", "failed to read partitioned group info", "partitioned_group_info", file) + return nil + } + resultBlocks := c.findResultBlocksForPartitionedGroup(ctx, userBucket, userLogger, index, partitionedGroupInfo) + partitionedGroupID := partitionedGroupInfo.PartitionedGroupID + for _, partition := range partitionedGroupInfo.Partitions { + if resultBlock, ok := resultBlocks[partition.PartitionID]; !ok { + level.Info(userLogger).Log("msg", "unable to find result block for partition in partitioned group", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID) + return nil + } else { + err := c.validatePartitionedResultBlock(ctx, userBucket, userLogger, userID, resultBlock, partition, partitionedGroupID) + if err != nil { + level.Warn(userLogger).Log("msg", "validate result block failed", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String(), "err", err) + return nil + } + level.Info(userLogger).Log("msg", "result block has expected parent blocks", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String()) + } + } + + // since the partitioned group were all complete, we can make sure + // all source blocks would be deleted. + blocks := partitionedGroupInfo.getAllBlocks() + for _, blockID := range blocks { + metaExists, err := userBucket.Exists(ctx, path.Join(blockID.String(), metadata.MetaFilename)) + if err != nil { + level.Info(userLogger).Log("msg", "block already deleted", "partitioned_group_id", partitionedGroupID, "block", blockID.String()) + continue + } + if metaExists { + deletionMarkerExists, err := userBucket.Exists(ctx, path.Join(blockID.String(), metadata.DeletionMarkFilename)) + if err == nil && deletionMarkerExists { + level.Info(userLogger).Log("msg", "block already marked for deletion", "partitioned_group_id", partitionedGroupID, "block", blockID.String()) + continue + } + if err := block.MarkForDeletion(ctx, userLogger, userBucket, blockID, "delete block during partitioned group completion check", c.blocksMarkedForDeletion); err != nil { + level.Warn(userLogger).Log("msg", "unable to mark block for deletion", "partitioned_group_id", partitionedGroupID, "block", blockID.String()) + // if one block can not be marked for deletion, we should + // skip delete this partitioned group. next iteration + // would try it again. + return nil + } + level.Info(userLogger).Log("msg", "marked block for deletion during partitioned group info clean up", "partitioned_group_id", partitionedGroupID, "block", blockID.String()) + } + } + level.Info(userLogger).Log("msg", "partitioned group info can be cleaned up", "partitioned_group_id", partitionedGroupID) + deletePartitionedGroupInfo = append(deletePartitionedGroupInfo, file) + return nil + }) + for _, partitionedGroupInfoFile := range deletePartitionedGroupInfo { + if err := userBucket.Delete(ctx, partitionedGroupInfoFile); err != nil { + level.Warn(userLogger).Log("msg", "failed to delete partitioned group info", "partitioned_group_info", partitionedGroupInfoFile, "err", err) + } else { + level.Info(userLogger).Log("msg", "deleted partitioned group info", "partitioned_group_info", partitionedGroupInfoFile) + } + } +} + // cleanUserPartialBlocks delete partial blocks which are safe to be deleted. The provided partials map // is updated accordingly. func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map[ulid.ULID]error, idx *bucketindex.Index, userBucket objstore.InstrumentedBucket, userLogger log.Logger) { @@ -460,3 +600,20 @@ func listBlocksOutsideRetentionPeriod(idx *bucketindex.Index, threshold time.Tim return } + +func readMeta(ctx context.Context, userBucket objstore.InstrumentedBucket, userLogger log.Logger, blockID ulid.ULID) (*metadata.Meta, error) { + metaReader, err := userBucket.Get(ctx, path.Join(blockID.String(), block.MetaFilename)) + if err != nil { + return nil, err + } + defer runutil.CloseWithLogOnErr(userLogger, metaReader, "close meta reader") + b, err := io.ReadAll(metaReader) + if err != nil { + return nil, err + } + meta := metadata.Meta{} + if err = json.Unmarshal(b, &meta); err != nil { + return nil, err + } + return &meta, nil +} diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go index 8220b9286dc..d320dc40ab9 100644 --- a/pkg/compactor/blocks_cleaner_test.go +++ b/pkg/compactor/blocks_cleaner_test.go @@ -122,7 +122,7 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions scanner := tsdb.NewUsersScanner(bucketClient, tsdb.AllUsers, logger) cfgProvider := newMockConfigProvider() - cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, reg) + cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, reg, prometheus.NewCounter(prometheus.CounterOpts{})) require.NoError(t, services.StartAndAwaitRunning(ctx, cleaner)) defer services.StopAndAwaitTerminated(ctx, cleaner) //nolint:errcheck @@ -265,7 +265,7 @@ func TestBlocksCleaner_ShouldContinueOnBlockDeletionFailure(t *testing.T) { scanner := tsdb.NewUsersScanner(bucketClient, tsdb.AllUsers, logger) cfgProvider := newMockConfigProvider() - cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, nil) + cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, nil, prometheus.NewCounter(prometheus.CounterOpts{})) require.NoError(t, services.StartAndAwaitRunning(ctx, cleaner)) defer services.StopAndAwaitTerminated(ctx, cleaner) //nolint:errcheck @@ -325,7 +325,7 @@ func TestBlocksCleaner_ShouldRebuildBucketIndexOnCorruptedOne(t *testing.T) { scanner := tsdb.NewUsersScanner(bucketClient, tsdb.AllUsers, logger) cfgProvider := newMockConfigProvider() - cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, nil) + cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, nil, prometheus.NewCounter(prometheus.CounterOpts{})) require.NoError(t, services.StartAndAwaitRunning(ctx, cleaner)) defer services.StopAndAwaitTerminated(ctx, cleaner) //nolint:errcheck @@ -376,7 +376,7 @@ func TestBlocksCleaner_ShouldRemoveMetricsForTenantsNotBelongingAnymoreToTheShar scanner := tsdb.NewUsersScanner(bucketClient, tsdb.AllUsers, logger) cfgProvider := newMockConfigProvider() - cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, reg) + cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, reg, prometheus.NewCounter(prometheus.CounterOpts{})) require.NoError(t, cleaner.cleanUsers(ctx, true)) assert.NoError(t, prom_testutil.GatherAndCompare(reg, strings.NewReader(` @@ -507,7 +507,7 @@ func TestBlocksCleaner_ShouldRemoveBlocksOutsideRetentionPeriod(t *testing.T) { scanner := tsdb.NewUsersScanner(bucketClient, tsdb.AllUsers, logger) cfgProvider := newMockConfigProvider() - cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, reg) + cleaner := NewBlocksCleaner(cfg, bucketClient, scanner, cfgProvider, logger, reg, prometheus.NewCounter(prometheus.CounterOpts{})) assertBlockExists := func(user string, block ulid.ULID, expectExists bool) { exists, err := bucketClient.Exists(ctx, path.Join(user, block.String(), metadata.MetaFilename)) diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 75732e22000..5eb45e1aafb 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "github.com/oklog/ulid" "hash/fnv" "math/rand" "os" @@ -40,8 +41,10 @@ const ( // ringKey is the key under which we store the compactors ring in the KVStore. ringKey = "compactor" - blocksMarkedForDeletionName = "cortex_compactor_blocks_marked_for_deletion_total" - blocksMarkedForDeletionHelp = "Total number of blocks marked for deletion in compactor." + blocksMarkedForDeletionName = "cortex_compactor_blocks_marked_for_deletion_total" + blocksMarkedForDeletionHelp = "Total number of blocks marked for deletion in compactor." + compactorPartitionErrorCountName = "cortex_compactor_partition_error" + compactorPartitionErrorCountHelp = "Count of errors happened during partitioning compaction." ) var ( @@ -52,7 +55,7 @@ var ( errInvalidShardingStrategy = errors.New("invalid sharding strategy") errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") - DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, _ prometheus.Gauge, _ prometheus.Counter, _ prometheus.Counter, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string) compact.Grouper { + DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, _ prometheus.Gauge, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string) compact.Grouper { return compact.NewDefaultGrouper( logger, bkt, @@ -67,7 +70,7 @@ var ( cfg.BlocksFetchConcurrency) } - ShuffleShardingGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, remainingPlannedCompactions prometheus.Gauge, blockVisitMarkerReadFailed prometheus.Counter, blockVisitMarkerWriteFailed prometheus.Counter, ring *ring.Ring, ringLifecycle *ring.Lifecycler, limits Limits, userID string) compact.Grouper { + ShuffleShardingGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, remainingPlannedCompactions prometheus.Gauge, blockVisitMarkerReadFailed prometheus.Counter, blockVisitMarkerWriteFailed prometheus.Counter, partitionedGroupInfoReadFailed prometheus.Counter, partitionedGroupInfoWriteFailed prometheus.Counter, ring *ring.Ring, ringLifecycle *ring.Lifecycler, limits Limits, userID string) compact.Grouper { return NewShuffleShardingGrouper( ctx, logger, @@ -91,7 +94,9 @@ var ( cfg.CompactionConcurrency, cfg.BlockVisitMarkerTimeout, blockVisitMarkerReadFailed, - blockVisitMarkerWriteFailed) + blockVisitMarkerWriteFailed, + partitionedGroupInfoReadFailed, + partitionedGroupInfoWriteFailed) } DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, PlannerFactory, error) { @@ -119,6 +124,14 @@ var ( } return compactor, plannerFactory, nil } + + DefaultCompactionCompleteCheckerFactory = func(_ context.Context, _ objstore.InstrumentedBucket, _ log.Logger, _ prometheus.Counter, _ prometheus.Counter) compact.ExtraCompactionCompleteChecker { + return compact.DefaultCompactionCompleteChecker{} + } + + PartitionCompactionCompleteCheckerFactory = func(ctx context.Context, bkt objstore.InstrumentedBucket, logger log.Logger, blockVisitMarkerReadFailed prometheus.Counter, partitionedGroupInfoWriteFailed prometheus.Counter) compact.ExtraCompactionCompleteChecker { + return NewPartitionCompactionCompleteChecker(ctx, bkt, logger, blockVisitMarkerReadFailed, partitionedGroupInfoWriteFailed) + } ) // BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks. @@ -134,6 +147,8 @@ type BlocksGrouperFactory func( remainingPlannedCompactions prometheus.Gauge, blockVisitMarkerReadFailed prometheus.Counter, blockVisitMarkerWriteFailed prometheus.Counter, + partitionedGroupInfoReadFailed prometheus.Counter, + partitionedGroupInfoWriteFailed prometheus.Counter, ring *ring.Ring, ringLifecycler *ring.Lifecycler, limit Limits, @@ -159,6 +174,14 @@ type PlannerFactory func( blockVisitMarkerWriteFailed prometheus.Counter, ) compact.Planner +type CompactionCompleteCheckerFactory func( + ctx context.Context, + bkt objstore.InstrumentedBucket, + logger log.Logger, + blockVisitMarkerReadFailed prometheus.Counter, + partitionedGroupInfoReadFailed prometheus.Counter, +) compact.ExtraCompactionCompleteChecker + // Limits defines limits used by the Compactor. type Limits interface { CompactorTenantShardSize(userID string) int @@ -206,6 +229,10 @@ type Config struct { // Block visit marker file config BlockVisitMarkerTimeout time.Duration `yaml:"block_visit_marker_timeout"` BlockVisitMarkerFileUpdateInterval time.Duration `yaml:"block_visit_marker_file_update_interval"` + + // Partitioning config + PartitionIndexSizeLimitInBytes int64 `yaml:"partition_index_size_limit_in_bytes"` + PartitionSeriesCountLimit int64 `yaml:"partition_series_count_limit"` } // RegisterFlags registers the Compactor flags. @@ -240,8 +267,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.Var(&cfg.EnabledTenants, "compactor.enabled-tenants", "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.") f.Var(&cfg.DisabledTenants, "compactor.disabled-tenants", "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.") - f.DurationVar(&cfg.BlockVisitMarkerTimeout, "compactor.block-visit-marker-timeout", 5*time.Minute, "How long block visit marker file should be considered as expired and able to be picked up by compactor again.") - f.DurationVar(&cfg.BlockVisitMarkerFileUpdateInterval, "compactor.block-visit-marker-file-update-interval", 1*time.Minute, "How frequently block visit marker file should be updated duration compaction.") + f.Int64Var(&cfg.PartitionIndexSizeLimitInBytes, "compactor.partition-index-size-limit-in-bytes", 0, "Index size limit in bytes for each compaction partition. 0 means no limit") + f.Int64Var(&cfg.PartitionSeriesCountLimit, "compactor.partition-series-count-limit", 0, "Time series count limit for each compaction partition. 0 means no limit") } func (cfg *Config) Validate(limits validation.Limits) error { @@ -302,6 +329,8 @@ type Compactor struct { blocksPlannerFactory PlannerFactory + compactionCompleteCheckerFactory CompactionCompleteCheckerFactory + // Client used to run operations on the bucket storing blocks. bucketClient objstore.Bucket @@ -312,22 +341,24 @@ type Compactor struct { ringSubservicesWatcher *services.FailureWatcher // Metrics. - compactionRunsStarted prometheus.Counter - compactionRunsInterrupted prometheus.Counter - compactionRunsCompleted prometheus.Counter - compactionRunsFailed prometheus.Counter - compactionRunsLastSuccess prometheus.Gauge - compactionRunDiscoveredTenants prometheus.Gauge - compactionRunSkippedTenants prometheus.Gauge - compactionRunSucceededTenants prometheus.Gauge - compactionRunFailedTenants prometheus.Gauge - compactionRunInterval prometheus.Gauge - blocksMarkedForDeletion prometheus.Counter - blocksMarkedForNoCompaction prometheus.Counter - garbageCollectedBlocks prometheus.Counter - remainingPlannedCompactions prometheus.Gauge - blockVisitMarkerReadFailed prometheus.Counter - blockVisitMarkerWriteFailed prometheus.Counter + compactionRunsStarted prometheus.Counter + compactionRunsInterrupted prometheus.Counter + compactionRunsCompleted prometheus.Counter + compactionRunsFailed prometheus.Counter + compactionRunsLastSuccess prometheus.Gauge + compactionRunDiscoveredTenants prometheus.Gauge + compactionRunSkippedTenants prometheus.Gauge + compactionRunSucceededTenants prometheus.Gauge + compactionRunFailedTenants prometheus.Gauge + compactionRunInterval prometheus.Gauge + blocksMarkedForDeletion prometheus.Counter + blocksMarkedForNoCompaction prometheus.Counter + garbageCollectedBlocks prometheus.Counter + remainingPlannedCompactions prometheus.Gauge + blockVisitMarkerReadFailed prometheus.Counter + blockVisitMarkerWriteFailed prometheus.Counter + partitionedGroupInfoReadFailed prometheus.Counter + partitionedGroupInfoWriteFailed prometheus.Counter // TSDB syncer metrics syncerMetrics *syncerMetrics @@ -357,7 +388,14 @@ func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfi } } - cortexCompactor, err := newCompactor(compactorCfg, storageCfg, cfgProvider, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory, limits) + var compactionCompleteCheckerFactory CompactionCompleteCheckerFactory + if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { + compactionCompleteCheckerFactory = PartitionCompactionCompleteCheckerFactory + } else { + compactionCompleteCheckerFactory = DefaultCompactionCompleteCheckerFactory + } + + cortexCompactor, err := newCompactor(compactorCfg, storageCfg, cfgProvider, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory, compactionCompleteCheckerFactory, limits) if err != nil { return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") } @@ -374,6 +412,7 @@ func newCompactor( bucketClientFactory func(ctx context.Context) (objstore.Bucket, error), blocksGrouperFactory BlocksGrouperFactory, blocksCompactorFactory BlocksCompactorFactory, + compactionCompleteCheckerFactory CompactionCompleteCheckerFactory, limits Limits, ) (*Compactor, error) { var remainingPlannedCompactions prometheus.Gauge @@ -384,17 +423,18 @@ func newCompactor( }) } c := &Compactor{ - compactorCfg: compactorCfg, - storageCfg: storageCfg, - cfgProvider: cfgProvider, - parentLogger: logger, - logger: log.With(logger, "component", "compactor"), - registerer: registerer, - syncerMetrics: newSyncerMetrics(registerer), - bucketClientFactory: bucketClientFactory, - blocksGrouperFactory: blocksGrouperFactory, - blocksCompactorFactory: blocksCompactorFactory, - allowedTenants: util.NewAllowedTenants(compactorCfg.EnabledTenants, compactorCfg.DisabledTenants), + compactorCfg: compactorCfg, + storageCfg: storageCfg, + cfgProvider: cfgProvider, + parentLogger: logger, + logger: log.With(logger, "component", "compactor"), + registerer: registerer, + syncerMetrics: newSyncerMetrics(registerer), + bucketClientFactory: bucketClientFactory, + blocksGrouperFactory: blocksGrouperFactory, + blocksCompactorFactory: blocksCompactorFactory, + compactionCompleteCheckerFactory: compactionCompleteCheckerFactory, + allowedTenants: util.NewAllowedTenants(compactorCfg.EnabledTenants, compactorCfg.DisabledTenants), compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_compactor_runs_started_total", @@ -457,6 +497,14 @@ func newCompactor( Name: "cortex_compactor_block_visit_marker_write_failed", Help: "Number of block visit marker file failed to be written.", }), + partitionedGroupInfoReadFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_partitioned_group_info_read_failed", + Help: "Number of partitioned group info file failed to be read.", + }), + partitionedGroupInfoWriteFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_partitioned_group_info_write_failed", + Help: "Number of partitioned group info file failed to be written.", + }), remainingPlannedCompactions: remainingPlannedCompactions, limits: limits, } @@ -505,7 +553,7 @@ func (c *Compactor) starting(ctx context.Context) error { CleanupConcurrency: c.compactorCfg.CleanupConcurrency, BlockDeletionMarksMigrationEnabled: c.compactorCfg.BlockDeletionMarksMigrationEnabled, TenantCleanupDelay: c.compactorCfg.TenantCleanupDelay, - }, c.bucketClient, c.usersScanner, c.cfgProvider, c.parentLogger, c.registerer) + }, c.bucketClient, c.usersScanner, c.cfgProvider, c.parentLogger, c.registerer, c.partitionedGroupInfoReadFailed) // Initialize the compactors ring if sharding is enabled. if c.compactorCfg.ShardingEnabled { @@ -760,7 +808,7 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { // Filters out duplicate blocks that can be formed from two or more overlapping // blocks that fully submatches the source blocks of the older blocks. - deduplicateBlocksFilter := block.NewDeduplicateFilter(c.compactorCfg.BlockSyncConcurrency) + deduplicateBlocksFilter := &DisabledDeduplicateFilter{} // While fetching blocks, we filter out blocks that were marked for deletion by using IgnoreDeletionMarkFilter. // No delay is used -- all blocks with deletion marker are ignored, and not considered for compaction. @@ -809,14 +857,13 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { return errors.Wrap(err, "failed to create syncer") } - currentCtx, cancel := context.WithCancel(ctx) - defer cancel() - compactor, err := compact.NewBucketCompactor( + compactor, err := compact.NewBucketCompactorWithCompleteChecker( ulogger, syncer, - c.blocksGrouperFactory(currentCtx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.blocksMarkedForNoCompaction, c.garbageCollectedBlocks, c.remainingPlannedCompactions, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed, c.ring, c.ringLifecycler, c.limits, userID), - c.blocksPlannerFactory(currentCtx, bucket, ulogger, c.compactorCfg, noCompactMarkerFilter, c.ringLifecycler, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed), + c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.blocksMarkedForNoCompaction, c.garbageCollectedBlocks, c.remainingPlannedCompactions, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed, c.partitionedGroupInfoReadFailed, c.partitionedGroupInfoWriteFailed, c.ring, c.ringLifecycler, c.limits, userID), + c.blocksPlannerFactory(ctx, bucket, ulogger, c.compactorCfg, noCompactMarkerFilter, c.ringLifecycler, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed), c.blocksCompactor, + c.compactionCompleteCheckerFactory(ctx, bucket, ulogger, c.blockVisitMarkerReadFailed, c.partitionedGroupInfoReadFailed), path.Join(c.compactorCfg.DataDir, "compact"), bucket, c.compactorCfg.CompactionConcurrency, @@ -949,3 +996,15 @@ func (c *Compactor) listTenantsWithMetaSyncDirectories() map[string]struct{} { return result } + +type DisabledDeduplicateFilter struct { +} + +func (f *DisabledDeduplicateFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced block.GaugeVec, modified block.GaugeVec) error { + // don't do any deduplicate filtering + return nil +} + +func (f *DisabledDeduplicateFilter) DuplicateIDs() []ulid.ULID { + return nil +} diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 52e4cf38284..048249cae1c 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/testutil" @@ -158,7 +159,7 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{}, nil) cfg := prepareConfig() - c, _, _, logs, registry := prepare(t, cfg, bucketClient, nil) + c, _, _, logs, registry := prepare(t, cfg, bucketClient, nil, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. @@ -308,7 +309,7 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", nil, errors.New("failed to iterate the bucket")) - c, _, _, logs, registry := prepare(t, prepareConfig(), bucketClient, nil) + c, _, _, logs, registry := prepare(t, prepareConfig(), bucketClient, nil, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. @@ -456,6 +457,7 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( t.Parallel() userID := "test-user" + partitionedGroupID := getPartitionedGroupID(userID) bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{userID}, nil) bucketClient.MockIter(userID+"/", []string{userID + "/01DTVP434PA9VFXSW2JKB3392D", userID + "/01FN6CDF3PNEWWRY5MPGJPE3EX"}, nil) @@ -464,18 +466,22 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) - bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", "", nil) - bucketClient.MockUpload(userID+"/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", nil) + bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload(userID+"/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", nil) bucketClient.MockGet(userID+"/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json", mockBlockMetaJSON("01FN6CDF3PNEWWRY5MPGJPE3EX"), nil) bucketClient.MockGet(userID+"/01FN6CDF3PNEWWRY5MPGJPE3EX/no-compact-mark.json", "", nil) bucketClient.MockGet(userID+"/01FN6CDF3PNEWWRY5MPGJPE3EX/deletion-mark.json", "", nil) - bucketClient.MockGet(userID+"/01FN6CDF3PNEWWRY5MPGJPE3EX/visit-mark.json", "", nil) - bucketClient.MockUpload(userID+"/01FN6CDF3PNEWWRY5MPGJPE3EX/visit-mark.json", nil) + bucketClient.MockGet(userID+"/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload(userID+"/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", nil) bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) + bucketClient.MockGet(userID+"/partitioned-groups/"+partitionedGroupID+".json", "", nil) + bucketClient.MockUpload(userID+"/partitioned-groups/"+partitionedGroupID+".json", nil) + bucketClient.MockIter(userID+"/"+PartitionedGroupDirectory, nil, nil) - c, _, tsdbPlannerMock, _, registry := prepare(t, prepareConfig(), bucketClient, nil) + c, _, tsdbPlannerMock, _, registry := prepare(t, prepareConfig(), bucketClient, nil, nil) tsdbPlannerMock.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, errors.New("Failed to plan")) + tsdbPlannerMock.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, errors.New("Failed to plan")) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. @@ -507,6 +513,8 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { t.Parallel() + partitionedGroupID1 := getPartitionedGroupID("user-1") + partitionedGroupID2 := getPartitionedGroupID("user-2") // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) @@ -519,33 +527,40 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json", mockBlockMetaJSON("01FN6CDF3PNEWWRY5MPGJPE3EX"), nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/visit-mark.json", "", nil) + bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", "", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/visit-mark.json", "", nil) + bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json", mockBlockMetaJSON("01FN3V83ABR9992RF8WRJZ76ZQ"), nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/visit-mark.json", "", nil) + bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/partition-0-visit-mark.json", "", nil) bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) bucketClient.MockGet("user-2/bucket-index.json.gz", "", nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockIter("user-2/markers/", nil, nil) bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) bucketClient.MockUpload("user-2/bucket-index.json.gz", nil) + bucketClient.MockGet("user-1/partitioned-groups/"+partitionedGroupID1+".json", "", nil) + bucketClient.MockUpload("user-1/partitioned-groups/"+partitionedGroupID1+".json", nil) + bucketClient.MockGet("user-2/partitioned-groups/"+partitionedGroupID2+".json", "", nil) + bucketClient.MockUpload("user-2/partitioned-groups/"+partitionedGroupID2+".json", nil) + bucketClient.MockIter("user-1/"+PartitionedGroupDirectory, nil, nil) + bucketClient.MockIter("user-2/"+PartitionedGroupDirectory, nil, nil) - c, _, tsdbPlanner, logs, registry := prepare(t, prepareConfig(), bucketClient, nil) + c, _, tsdbPlanner, logs, registry := prepare(t, prepareConfig(), bucketClient, nil, nil) // Mock the planner as if there's no compaction to do, // in order to simplify tests (all in all, we just want to // test our logic and not TSDB compactor which we expect to // be already tested). tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -674,8 +689,9 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { bucketClient.MockDelete("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", nil) bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) + bucketClient.MockIter("user-1/"+PartitionedGroupDirectory, nil, nil) - c, _, tsdbPlanner, logs, registry := prepare(t, cfg, bucketClient, nil) + c, _, tsdbPlanner, logs, registry := prepare(t, cfg, bucketClient, nil, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -762,6 +778,8 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { func TestCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testing.T) { t.Parallel() + partitionedGroupID1 := getPartitionedGroupID("user-1") + partitionedGroupID2 := getPartitionedGroupID("user-2") // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) @@ -774,24 +792,24 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testing.T) { bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", mockNoCompactBlockJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) - bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", "", nil) - bucketClient.MockUpload("user-1/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json", mockBlockMetaJSON("01FN6CDF3PNEWWRY5MPGJPE3EX"), nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/no-compact-mark.json", mockNoCompactBlockJSON("01FN6CDF3PNEWWRY5MPGJPE3EX"), nil) - bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/visit-mark.json", "", nil) - bucketClient.MockUpload("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/visit-mark.json", nil) + bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/visit-mark.json", "", nil) - bucketClient.MockUpload("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/visit-mark.json", nil) + bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json", mockBlockMetaJSON("01FN3V83ABR9992RF8WRJZ76ZQ"), nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/visit-mark.json", "", nil) - bucketClient.MockUpload("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/visit-mark.json", nil) + bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) bucketClient.MockGet("user-2/bucket-index.json.gz", "", nil) @@ -799,10 +817,17 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testing.T) { bucketClient.MockIter("user-2/markers/", nil, nil) bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) bucketClient.MockUpload("user-2/bucket-index.json.gz", nil) + bucketClient.MockGet("user-1/partitioned-groups/"+partitionedGroupID1+".json", "", nil) + bucketClient.MockUpload("user-1/partitioned-groups/"+partitionedGroupID1+".json", nil) + bucketClient.MockGet("user-2/partitioned-groups/"+partitionedGroupID2+".json", "", nil) + bucketClient.MockUpload("user-2/partitioned-groups/"+partitionedGroupID2+".json", nil) + bucketClient.MockIter("user-1/"+PartitionedGroupDirectory, nil, nil) + bucketClient.MockIter("user-2/"+PartitionedGroupDirectory, nil, nil) - c, _, tsdbPlanner, _, registry := prepare(t, prepareConfig(), bucketClient, nil) + c, _, tsdbPlanner, _, registry := prepare(t, prepareConfig(), bucketClient, nil, nil) tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -833,6 +858,7 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) cfg.DeletionDelay = 10 * time.Minute // Delete block after 10 minutes cfg.TenantCleanupDelay = 10 * time.Minute // To make sure it's not 0. + partitionedGroupID1 := getPartitionedGroupID("user-1") // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{"user-1"}, nil) @@ -843,21 +869,25 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) bucketClient.MockIter("user-1/01DTVP434PA9VFXSW2JKB3392D", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01DTVP434PA9VFXSW2JKB3392D/index"}, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/index", "some index content", nil) - bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", "", nil) - bucketClient.MockUpload("user-1/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", nil) bucketClient.MockExists("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", false, nil) bucketClient.MockDelete("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", nil) bucketClient.MockDelete("user-1/01DTVP434PA9VFXSW2JKB3392D/index", nil) bucketClient.MockDelete("user-1/bucket-index.json.gz", nil) + bucketClient.MockGet("user-1/partitioned-groups/"+partitionedGroupID1+".json", "", nil) + bucketClient.MockUpload("user-1/partitioned-groups/"+partitionedGroupID1+".json", nil) + bucketClient.MockIter("user-1/"+PartitionedGroupDirectory, nil, nil) - c, _, tsdbPlanner, logs, registry := prepare(t, cfg, bucketClient, nil) + c, _, tsdbPlanner, logs, registry := prepare(t, cfg, bucketClient, nil, nil) // Mock the planner as if there's no compaction to do, // in order to simplify tests (all in all, we just want to // test our logic and not TSDB compactor which we expect to // be already tested). tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -950,11 +980,12 @@ func TestCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { cfg := prepareConfig() cfg.SkipBlocksWithOutOfOrderChunksEnabled = true - c, tsdbCompac, tsdbPlanner, _, registry := prepare(t, cfg, bucketClient, nil) + c, tsdbCompac, tsdbPlanner, _, registry := prepare(t, cfg, bucketClient, nil, nil) - tsdbCompac.On("Compact", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(b1, nil) + tsdbCompac.On("Compact", mock.Anything, mock.Anything, mock.Anything).Return(b1, nil) + tsdbCompac.On("CompactWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(b1, nil) - tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{ + mockPlannedBlocks := []*metadata.Meta{ { BlockMeta: tsdb.BlockMeta{ ULID: b1, @@ -969,7 +1000,9 @@ func TestCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { MaxTime: 30, }, }, - }, nil) + } + tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return(mockPlannedBlocks, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(mockPlannedBlocks, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -993,6 +1026,8 @@ func TestCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunning(t *testing.T) { t.Parallel() + partitionedGroupID1 := getPartitionedGroupID("user-1") + partitionedGroupID2 := getPartitionedGroupID("user-2") // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) @@ -1005,27 +1040,37 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", "", nil) - bucketClient.MockUpload("user-1/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/"+block.PartitionInfoFilename, "", nil) + bucketClient.MockUpload("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json", mockBlockMetaJSON("01FN6CDF3PNEWWRY5MPGJPE3EX"), nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/visit-mark.json", "", nil) - bucketClient.MockUpload("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/visit-mark.json", nil) + bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", "", nil) + bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/"+block.PartitionInfoFilename, "", nil) + bucketClient.MockUpload("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/visit-mark.json", "", nil) - bucketClient.MockUpload("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/visit-mark.json", nil) + bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", nil) + bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/"+block.PartitionInfoFilename, "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json", mockBlockMetaJSON("01FN3V83ABR9992RF8WRJZ76ZQ"), nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/no-compact-mark.json", "", nil) - bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/visit-mark.json", "", nil) - bucketClient.MockUpload("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/visit-mark.json", nil) + bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/partition-0-visit-mark.json", nil) + bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/"+block.PartitionInfoFilename, "", nil) bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) bucketClient.MockGet("user-2/bucket-index.json.gz", "", nil) bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) bucketClient.MockUpload("user-2/bucket-index.json.gz", nil) + bucketClient.MockGet("user-1/partitioned-groups/"+partitionedGroupID1+".json", "", nil) + bucketClient.MockUpload("user-1/partitioned-groups/"+partitionedGroupID1+".json", nil) + bucketClient.MockGet("user-2/partitioned-groups/"+partitionedGroupID2+".json", "", nil) + bucketClient.MockUpload("user-2/partitioned-groups/"+partitionedGroupID2+".json", nil) + bucketClient.MockIter("user-1/"+PartitionedGroupDirectory, nil, nil) + bucketClient.MockIter("user-2/"+PartitionedGroupDirectory, nil, nil) ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) @@ -1036,13 +1081,14 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni cfg.ShardingRing.InstanceAddr = "1.2.3.4" cfg.ShardingRing.KVStore.Mock = ringStore - c, _, tsdbPlanner, logs, _ := prepare(t, cfg, bucketClient, nil) + c, _, tsdbPlanner, logs, _ := prepare(t, cfg, bucketClient, nil, nil) // Mock the planner as if there's no compaction to do, // in order to simplify tests (all in all, we just want to // test our logic and not TSDB compactor which we expect to // be already tested). tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) @@ -1097,16 +1143,21 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", userIDs, nil) for _, userID := range userIDs { + partitionedGroupID := getPartitionedGroupID(userID) bucketClient.MockIter(userID+"/", []string{userID + "/01DTVP434PA9VFXSW2JKB3392D"}, nil) bucketClient.MockIter(userID+"/markers/", nil, nil) bucketClient.MockExists(path.Join(userID, cortex_tsdb.TenantDeletionMarkPath), false, nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) - bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", "", nil) - bucketClient.MockUpload(userID+"/01DTVP434PA9VFXSW2JKB3392D/visit-mark.json", nil) + bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) + bucketClient.MockUpload(userID+"/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", nil) + bucketClient.MockGet(userID+"/01DTW0ZCPDDNV4BV83Q2SV4QAZ/"+block.PartitionInfoFilename, "", nil) bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) + bucketClient.MockGet(userID+"/partitioned-groups/"+partitionedGroupID+".json", "", nil) + bucketClient.MockUpload(userID+"/partitioned-groups/"+partitionedGroupID+".json", nil) + bucketClient.MockIter(userID+"/"+PartitionedGroupDirectory, nil, nil) } // Create a shared KV Store @@ -1126,7 +1177,7 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM cfg.ShardingRing.WaitStabilityMaxDuration = 10 * time.Second cfg.ShardingRing.KVStore.Mock = kvstore - c, _, tsdbPlanner, l, _ := prepare(t, cfg, bucketClient, nil) + c, _, tsdbPlanner, l, _ := prepare(t, cfg, bucketClient, nil, nil) defer services.StopAndAwaitTerminated(context.Background(), c) //nolint:errcheck compactors = append(compactors, c) @@ -1137,6 +1188,7 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM // test our logic and not TSDB compactor which we expect to // be already tested). tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) } // Start all compactors @@ -1206,23 +1258,29 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit blockDirectory := []string{} for blockID, blockTimes := range blocks { + groupHash := hashGroup(userID, blockTimes["startTime"], blockTimes["endTime"]) blockVisitMarker := BlockVisitMarker{ - CompactorID: "test-compactor", - VisitTime: time.Now().Unix(), - Version: VisitMarkerVersion1, + CompactorID: "test-compactor", + VisitTime: time.Now().Unix(), + PartitionedGroupID: groupHash, + PartitionID: 0, + Status: Pending, + Version: VisitMarkerVersion1, } visitMarkerFileContent, _ := json.Marshal(blockVisitMarker) bucketClient.MockGet(userID+"/"+blockID+"/meta.json", mockBlockMetaJSONWithTime(blockID, userID, blockTimes["startTime"], blockTimes["endTime"]), nil) bucketClient.MockGet(userID+"/"+blockID+"/deletion-mark.json", "", nil) bucketClient.MockGet(userID+"/"+blockID+"/no-compact-mark.json", "", nil) - bucketClient.MockGetTimes(userID+"/"+blockID+"/visit-mark.json", "", nil, 1) - bucketClient.MockGet(userID+"/"+blockID+"/visit-mark.json", string(visitMarkerFileContent), nil) - bucketClient.MockUpload(userID+"/"+blockID+"/visit-mark.json", nil) + bucketClient.MockGetTimes(userID+"/"+blockID+"/partition-0-visit-mark.json", "", nil, 1) + bucketClient.MockGet(userID+"/"+blockID+"/partition-0-visit-mark.json", string(visitMarkerFileContent), nil) + bucketClient.MockUpload(userID+"/"+blockID+"/partition-0-visit-mark.json", nil) + bucketClient.MockGet(userID+"/"+blockID+"/"+block.PartitionInfoFilename, "", nil) blockDirectory = append(blockDirectory, userID+"/"+blockID) // Get all of the unique group hashes so that they can be used to ensure all groups were compacted - groupHash := hashGroup(userID, blockTimes["startTime"], blockTimes["endTime"]) groupHashes[groupHash]++ + bucketClient.MockGet(userID+"/partitioned-groups/"+fmt.Sprint(groupHash)+".json", "", nil) + bucketClient.MockUpload(userID+"/partitioned-groups/"+fmt.Sprint(groupHash)+".json", nil) } bucketClient.MockIter(userID+"/", blockDirectory, nil) @@ -1230,6 +1288,7 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit bucketClient.MockExists(path.Join(userID, cortex_tsdb.TenantDeletionMarkPath), false, nil) bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) + bucketClient.MockIter(userID+"/"+PartitionedGroupDirectory, nil, nil) } // Create a shared KV Store @@ -1249,12 +1308,13 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit cfg.ShardingRing.WaitStabilityMinDuration = 3 * time.Second cfg.ShardingRing.WaitStabilityMaxDuration = 10 * time.Second cfg.ShardingRing.KVStore.Mock = kvstore + cfg.BlockVisitMarkerTimeout = 5 * time.Minute limits := &validation.Limits{} flagext.DefaultValues(limits) limits.CompactorTenantShardSize = 3 - c, _, tsdbPlanner, l, _ := prepare(t, cfg, bucketClient, limits) + c, _, tsdbPlanner, l, _ := prepare(t, cfg, bucketClient, limits, nil) defer services.StopAndAwaitTerminated(context.Background(), c) //nolint:errcheck compactors = append(compactors, c) @@ -1265,6 +1325,7 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit // test our logic and not TSDB compactor which we expect to // be already tested). tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) } // Start all compactors @@ -1510,7 +1571,7 @@ func prepareConfig() Config { return compactorCfg } -func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Bucket, limits *validation.Limits) (*Compactor, *tsdbCompactorMock, *tsdbPlannerMock, *concurrency.SyncBuffer, prometheus.Gatherer) { +func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Bucket, limits *validation.Limits, tsdbGrouper *tsdbGrouperMock) (*Compactor, *tsdbCompactorMock, *tsdbPlannerMock, *concurrency.SyncBuffer, prometheus.Gatherer) { storageCfg := cortex_tsdb.BlocksStorageConfig{} flagext.DefaultValues(&storageCfg) @@ -1547,13 +1608,26 @@ func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Bucket, li } var blocksGrouperFactory BlocksGrouperFactory + if tsdbGrouper != nil { + blocksGrouperFactory = func(_ context.Context, _ Config, _ objstore.InstrumentedBucket, _ log.Logger, _ prometheus.Registerer, _, _, _ prometheus.Counter, _ prometheus.Gauge, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string) compact.Grouper { + return tsdbGrouper + } + } else { + if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { + blocksGrouperFactory = ShuffleShardingGrouperFactory + } else { + blocksGrouperFactory = DefaultBlocksGrouperFactory + } + } + + var compactionCompleteCheckerFactory CompactionCompleteCheckerFactory if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { - blocksGrouperFactory = ShuffleShardingGrouperFactory + compactionCompleteCheckerFactory = PartitionCompactionCompleteCheckerFactory } else { - blocksGrouperFactory = DefaultBlocksGrouperFactory + compactionCompleteCheckerFactory = DefaultCompactionCompleteCheckerFactory } - c, err := newCompactor(compactorCfg, storageCfg, overrides, logger, registry, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory, overrides) + c, err := newCompactor(compactorCfg, storageCfg, overrides, logger, registry, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory, compactionCompleteCheckerFactory, overrides) require.NoError(t, err) return c, tsdbCompactor, tsdbPlanner, logs, registry @@ -1578,6 +1652,11 @@ func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Blo return args.Get(0).(ulid.ULID), args.Error(1) } +func (m *tsdbCompactorMock) CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionId int) (ulid.ULID, error) { + args := m.Called(dest, dirs, open, partitionCount, partitionId) + return args.Get(0).(ulid.ULID), args.Error(1) +} + type tsdbPlannerMock struct { mock.Mock noCompactMarkFilters []*compact.GatherNoCompactionMarkFilter @@ -1587,6 +1666,10 @@ func (m *tsdbPlannerMock) Plan(ctx context.Context, metasByMinTime []*metadata.M args := m.Called(ctx, metasByMinTime) return args.Get(0).([]*metadata.Meta), args.Error(1) } +func (m *tsdbPlannerMock) PlanWithPartition(ctx context.Context, metasByMinTime []*metadata.Meta, partitionID int, errChan chan error) ([]*metadata.Meta, error) { + args := m.Called(ctx, metasByMinTime, partitionID, errChan) + return args.Get(0).([]*metadata.Meta), args.Error(1) +} func (m *tsdbPlannerMock) getNoCompactBlocks() []string { @@ -1601,17 +1684,70 @@ func (m *tsdbPlannerMock) getNoCompactBlocks() []string { return result } -func mockBlockMetaJSON(id string) string { - meta := tsdb.BlockMeta{ +type tsdbGrouperMock struct { + mock.Mock +} + +func (m *tsdbGrouperMock) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*compact.Group, err error) { + args := m.Called(blocks) + return args.Get(0).([]*compact.Group), args.Error(1) +} + +var ( + BlockMinTime = int64(1574776800000) + BlockMaxTime = int64(1574784000000) +) + +func getPartitionedGroupID(userID string) string { + return fmt.Sprint(hashGroup(userID, BlockMinTime, BlockMaxTime)) +} + +func mockBlockGroup(userID string, ids []string, bkt *bucket.ClientMock) *compact.Group { + dummyCounter := prometheus.NewCounter(prometheus.CounterOpts{}) + group, _ := compact.NewGroup( + log.NewNopLogger(), + bkt, + getPartitionedGroupID(userID), + nil, + 0, + true, + true, + dummyCounter, + dummyCounter, + dummyCounter, + dummyCounter, + dummyCounter, + dummyCounter, + dummyCounter, + dummyCounter, + metadata.NoneFunc, + 1, + 1, + ) + for _, id := range ids { + meta := mockBlockMeta(id) + group.AppendMeta(&metadata.Meta{ + BlockMeta: meta, + }) + } + return group +} + +func mockBlockMeta(id string) tsdb.BlockMeta { + return tsdb.BlockMeta{ Version: 1, ULID: ulid.MustParse(id), - MinTime: 1574776800000, - MaxTime: 1574784000000, + MinTime: BlockMinTime, + MaxTime: BlockMaxTime, Compaction: tsdb.BlockMetaCompaction{ Level: 1, Sources: []ulid.ULID{ulid.MustParse(id)}, }, } +} + +func mockBlockMetaJSON(id string) string { + meta := mockBlockMeta(id) content, err := json.Marshal(meta) if err != nil { @@ -1714,7 +1850,7 @@ func TestCompactor_DeleteLocalSyncFiles(t *testing.T) { cfg.ShardingRing.KVStore.Mock = kvstore // Each compactor will get its own temp dir for storing local files. - c, _, tsdbPlanner, _, _ := prepare(t, cfg, inmem, nil) + c, _, tsdbPlanner, _, _ := prepare(t, cfg, inmem, nil, nil) t.Cleanup(func() { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) }) @@ -1726,6 +1862,7 @@ func TestCompactor_DeleteLocalSyncFiles(t *testing.T) { // test our logic and not TSDB compactor which we expect to // be already tested). tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) } require.Equal(t, 2, len(compactors)) @@ -1783,7 +1920,7 @@ func TestCompactor_ShouldFailCompactionOnTimeout(t *testing.T) { // Set ObservePeriod to longer than the timeout period to mock a timeout while waiting on ring to become ACTIVE cfg.ShardingRing.ObservePeriod = time.Second * 10 - c, _, _, logs, _ := prepare(t, cfg, bucketClient, nil) + c, _, _, logs, _ := prepare(t, cfg, bucketClient, nil, nil) // Try to start the compactor with a bad consul kv-store. The err := services.StartAndAwaitRunning(context.Background(), c) @@ -1797,63 +1934,50 @@ func TestCompactor_ShouldFailCompactionOnTimeout(t *testing.T) { }, removeIgnoredLogs(strings.Split(strings.TrimSpace(logs.String()), "\n"))) } -func TestCompactor_ShouldNotTreatInterruptionsAsErrors(t *testing.T) { - bucketClient := objstore.NewInMemBucket() - id := ulid.MustNew(ulid.Now(), rand.Reader) - require.NoError(t, bucketClient.Upload(context.Background(), "user-1/"+id.String()+"/meta.json", strings.NewReader(mockBlockMetaJSON(id.String())))) +func TestCompactor_ShouldNotHangIfPlannerReturnNothing(t *testing.T) { + t.Parallel() - b1 := createTSDBBlock(t, bucketClient, "user-1", 10, 20, map[string]string{"__name__": "Teste"}) - b2 := createTSDBBlock(t, bucketClient, "user-1", 20, 30, map[string]string{"__name__": "Teste"}) + bucketClient := &bucket.ClientMock{} - c, tsdbCompactor, tsdbPlanner, logs, registry := prepare(t, prepareConfig(), bucketClient, nil) + bucketClient.MockIter("", []string{"user-1"}, nil) + bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ"}, nil) + bucketClient.MockIter("user-1/markers/", nil, nil) + bucketClient.MockExists(path.Join("user-1", cortex_tsdb.TenantDeletionMarkPath), false, nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), nil) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/no-compact-mark.json", "", nil) + bucketClient.MockGet("user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", "", nil) + bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) + bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) + bucketClient.MockIter("user-1/"+PartitionedGroupDirectory, nil, nil) - ctx, cancel := context.WithCancel(context.Background()) - tsdbCompactor.On("Compact", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ulid.ULID{}, context.Canceled).Run(func(args mock.Arguments) { - cancel() - }) - tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{ - { - BlockMeta: tsdb.BlockMeta{ - ULID: b1, - MinTime: 10, - MaxTime: 20, - }, - }, - { - BlockMeta: tsdb.BlockMeta{ - ULID: b2, - MinTime: 20, - MaxTime: 30, - }, - }, - }, nil) - require.NoError(t, services.StartAndAwaitRunning(ctx, c)) + ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) + t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - cortex_testutil.Poll(t, 1*time.Second, 1.0, func() interface{} { - return prom_testutil.ToFloat64(c.compactionRunsInterrupted) - }) + cfg := prepareConfig() + cfg.ShardingEnabled = true + cfg.ShardingRing.InstanceID = "compactor-1" + cfg.ShardingRing.InstanceAddr = "1.2.3.4" + cfg.ShardingRing.KVStore.Mock = ringStore - require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) + tsdbGrouper := tsdbGrouperMock{} + mockGroups := []*compact.Group{mockBlockGroup("user-1", []string{"01DTVP434PA9VFXSW2JKB3392D", "01DTW0ZCPDDNV4BV83Q2SV4QAZ"}, bucketClient)} + tsdbGrouper.On("Groups", mock.Anything).Return(mockGroups, nil) - assert.NoError(t, prom_testutil.GatherAndCompare(registry, strings.NewReader(` - # TYPE cortex_compactor_runs_completed_total counter - # HELP cortex_compactor_runs_completed_total Total number of compaction runs successfully completed. - cortex_compactor_runs_completed_total 0 + c, _, tsdbPlanner, _, _ := prepare(t, cfg, bucketClient, nil, &tsdbGrouper) + tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) + tsdbPlanner.On("PlanWithPartition", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]*metadata.Meta{}, nil) - # TYPE cortex_compactor_runs_interrupted_total counter - # HELP cortex_compactor_runs_interrupted_total Total number of compaction runs interrupted. - cortex_compactor_runs_interrupted_total 1 + require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - # TYPE cortex_compactor_runs_failed_total counter - # HELP cortex_compactor_runs_failed_total Total number of compaction runs failed. - cortex_compactor_runs_failed_total 0 - `), - "cortex_compactor_runs_completed_total", - "cortex_compactor_runs_interrupted_total", - "cortex_compactor_runs_failed_total", - )) + // Wait until a run has completed. + cortex_testutil.Poll(t, 5*time.Second, 1.0, func() interface{} { + return prom_testutil.ToFloat64(c.compactionRunsCompleted) + }) - lines := strings.Split(logs.String(), "\n") - require.Contains(t, lines, `level=info component=compactor msg="interrupting compaction of user blocks" user=user-1`) - require.NotContains(t, logs.String(), `level=error`) + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c)) } diff --git a/pkg/compactor/partition_compaction_complete_checker.go b/pkg/compactor/partition_compaction_complete_checker.go new file mode 100644 index 00000000000..1f33856d82c --- /dev/null +++ b/pkg/compactor/partition_compaction_complete_checker.go @@ -0,0 +1,67 @@ +package compactor + +import ( + "context" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/compact" +) + +type PartitionCompactionCompleteChecker struct { + ctx context.Context + bkt objstore.InstrumentedBucket + logger log.Logger + blockVisitMarkerReadFailed prometheus.Counter + partitionedGroupInfoReadFailed prometheus.Counter +} + +func NewPartitionCompactionCompleteChecker( + ctx context.Context, + bkt objstore.InstrumentedBucket, + logger log.Logger, + blockVisitMarkerReadFailed prometheus.Counter, + partitionedGroupInfoReadFailed prometheus.Counter, +) *PartitionCompactionCompleteChecker { + return &PartitionCompactionCompleteChecker{ + ctx: ctx, + bkt: bkt, + logger: logger, + blockVisitMarkerReadFailed: blockVisitMarkerReadFailed, + partitionedGroupInfoReadFailed: partitionedGroupInfoReadFailed, + } +} + +func (p *PartitionCompactionCompleteChecker) IsComplete(group *compact.Group, blockID ulid.ULID) bool { + partitionedGroupID := group.PartitionedGroupID() + currentPartitionID := group.PartitionID() + partitionedGroupInfo, err := ReadPartitionedGroupInfo(p.ctx, p.bkt, p.logger, partitionedGroupID, p.partitionedGroupInfoReadFailed) + if err != nil { + level.Warn(p.logger).Log("msg", "unable to read partitioned group info", "partitioned_group_id", partitionedGroupID, "block_id", blockID, "err", err) + return false + } + return p.IsPartitionedBlockComplete(partitionedGroupInfo, currentPartitionID, blockID) +} + +func (p *PartitionCompactionCompleteChecker) IsPartitionedBlockComplete(partitionedGroupInfo *PartitionedGroupInfo, currentPartitionID int, blockID ulid.ULID) bool { + partitionedGroupID := partitionedGroupInfo.PartitionedGroupID + for _, partitionID := range partitionedGroupInfo.getPartitionIDsByBlock(blockID) { + // Skip current partition ID since current one is completed + if partitionID != currentPartitionID { + blockVisitMarker, err := ReadBlockVisitMarker(p.ctx, p.bkt, p.logger, blockID.String(), partitionID, p.blockVisitMarkerReadFailed) + if err != nil { + level.Warn(p.logger).Log("msg", "unable to read all visit markers for block", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "block_id", blockID, "err", err) + return false + } + if !blockVisitMarker.isCompleted() { + level.Warn(p.logger).Log("msg", "block has incomplete partition", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "block_id", blockID) + return false + } + } + } + level.Info(p.logger).Log("msg", "block has all partitions completed", "partitioned_group_id", partitionedGroupID, "block_id", blockID) + return true +} diff --git a/pkg/compactor/partition_compaction_complete_checker_test.go b/pkg/compactor/partition_compaction_complete_checker_test.go new file mode 100644 index 00000000000..1c65ccfba11 --- /dev/null +++ b/pkg/compactor/partition_compaction_complete_checker_test.go @@ -0,0 +1,379 @@ +package compactor + +import ( + "context" + "encoding/json" + "github.com/cortexproject/cortex/pkg/storage/bucket" + "github.com/go-kit/log" + "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/compact" + "testing" + "time" +) + +func TestPartitionCompactionCompleteChecker(t *testing.T) { + ulid0 := ulid.MustNew(0, nil) + ulid1 := ulid.MustNew(1, nil) + ulid2 := ulid.MustNew(2, nil) + + rangeStart := (1 * time.Hour).Milliseconds() + rangeEnd := (2 * time.Hour).Milliseconds() + partitionedGroupID := uint32(12345) + compactorID := "compactor1" + timeBefore1h := time.Now().Add(-1 * time.Hour).Unix() + timeNow := time.Now().Unix() + + for _, tcase := range []struct { + name string + partitionedGroupInfo PartitionedGroupInfo + blocks map[ulid.ULID]struct { + expectComplete bool + visitMarkers []BlockVisitMarker + } + }{ + { + name: "all partitions are complete 1", + partitionedGroupInfo: PartitionedGroupInfo{ + PartitionedGroupID: partitionedGroupID, + PartitionCount: 2, + Partitions: []Partition{ + { + PartitionID: 0, + Blocks: []ulid.ULID{ + ulid0, + ulid1, + }, + }, + { + PartitionID: 1, + Blocks: []ulid.ULID{ + ulid0, + ulid2, + }, + }, + }, + RangeStart: rangeStart, + RangeEnd: rangeEnd, + Version: VisitMarkerVersion1, + }, + blocks: map[ulid.ULID]struct { + expectComplete bool + visitMarkers []BlockVisitMarker + }{ + ulid0: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 0, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 1, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid1: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 0, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid2: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 1, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + }, + }, + }, + }, + { + name: "all partitions are complete 2", + partitionedGroupInfo: PartitionedGroupInfo{ + PartitionedGroupID: partitionedGroupID, + PartitionCount: 3, + Partitions: []Partition{ + { + PartitionID: 0, + Blocks: []ulid.ULID{ + ulid0, + }, + }, + { + PartitionID: 1, + Blocks: []ulid.ULID{ + ulid1, + }, + }, + { + PartitionID: 2, + Blocks: []ulid.ULID{ + ulid2, + }, + }, + }, + RangeStart: rangeStart, + RangeEnd: rangeEnd, + Version: VisitMarkerVersion1, + }, + blocks: map[ulid.ULID]struct { + expectComplete bool + visitMarkers []BlockVisitMarker + }{ + ulid0: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 0, + CompactorID: compactorID, + VisitTime: timeNow, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid1: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 1, + CompactorID: compactorID, + VisitTime: timeNow, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid2: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 2, + CompactorID: compactorID, + VisitTime: timeNow, + Version: VisitMarkerVersion1, + }, + }, + }, + }, + }, + { + name: "not all partitions are complete 1", + partitionedGroupInfo: PartitionedGroupInfo{ + PartitionedGroupID: partitionedGroupID, + PartitionCount: 3, + Partitions: []Partition{ + { + PartitionID: 0, + Blocks: []ulid.ULID{ + ulid0, + }, + }, + { + PartitionID: 1, + Blocks: []ulid.ULID{ + ulid0, + ulid1, + }, + }, + { + PartitionID: 2, + Blocks: []ulid.ULID{ + ulid2, + }, + }, + }, + RangeStart: rangeStart, + RangeEnd: rangeEnd, + Version: VisitMarkerVersion1, + }, + blocks: map[ulid.ULID]struct { + expectComplete bool + visitMarkers []BlockVisitMarker + }{ + ulid0: { + expectComplete: false, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 0, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + { + Status: Pending, + PartitionedGroupID: partitionedGroupID, + PartitionID: 1, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid1: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 1, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid2: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 2, + CompactorID: compactorID, + VisitTime: timeBefore1h, + Version: VisitMarkerVersion1, + }, + }, + }, + }, + }, + { + name: "not all partitions are complete 2", + partitionedGroupInfo: PartitionedGroupInfo{ + PartitionedGroupID: partitionedGroupID, + PartitionCount: 3, + Partitions: []Partition{ + { + PartitionID: 0, + Blocks: []ulid.ULID{ + ulid0, + }, + }, + { + PartitionID: 1, + Blocks: []ulid.ULID{ + ulid0, + ulid1, + }, + }, + { + PartitionID: 2, + Blocks: []ulid.ULID{ + ulid2, + }, + }, + }, + RangeStart: rangeStart, + RangeEnd: rangeEnd, + Version: VisitMarkerVersion1, + }, + blocks: map[ulid.ULID]struct { + expectComplete bool + visitMarkers []BlockVisitMarker + }{ + ulid0: { + expectComplete: false, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 0, + CompactorID: compactorID, + VisitTime: timeNow, + Version: VisitMarkerVersion1, + }, + { + Status: Pending, + PartitionedGroupID: partitionedGroupID, + PartitionID: 1, + CompactorID: compactorID, + VisitTime: timeNow, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid1: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 1, + CompactorID: compactorID, + VisitTime: timeNow, + Version: VisitMarkerVersion1, + }, + }, + }, + ulid2: { + expectComplete: true, + visitMarkers: []BlockVisitMarker{ + { + Status: Completed, + PartitionedGroupID: partitionedGroupID, + PartitionID: 2, + CompactorID: compactorID, + VisitTime: timeNow, + Version: VisitMarkerVersion1, + }, + }, + }, + }, + }, + } { + t.Run(tcase.name, func(t *testing.T) { + bkt := &bucket.ClientMock{} + partitionedGroupInfoFileContent, _ := json.Marshal(tcase.partitionedGroupInfo) + bkt.MockGet(getPartitionedGroupFile(partitionedGroupID), string(partitionedGroupInfoFileContent), nil) + checker := NewPartitionCompactionCompleteChecker( + context.Background(), + objstore.WithNoopInstr(bkt), + log.NewNopLogger(), + prometheus.NewCounter(prometheus.CounterOpts{}), + prometheus.NewCounter(prometheus.CounterOpts{}), + ) + group := compact.Group{} + // set partitionID to -1 so it will go through all partitionIDs when checking + group.SetPartitionInfo(tcase.partitionedGroupInfo.PartitionedGroupID, tcase.partitionedGroupInfo.PartitionCount, -1) + for blockID, blockTCase := range tcase.blocks { + for _, visitMarker := range blockTCase.visitMarkers { + visitMarkerFileContent, _ := json.Marshal(visitMarker) + bkt.MockGet(getBlockVisitMarkerFile(blockID.String(), visitMarker.PartitionID), string(visitMarkerFileContent), nil) + } + require.Equal(t, blockTCase.expectComplete, checker.IsComplete(&group, blockID)) + } + }) + } +} diff --git a/pkg/compactor/partitioned_group_info.go b/pkg/compactor/partitioned_group_info.go new file mode 100644 index 00000000000..3b054ffa6f6 --- /dev/null +++ b/pkg/compactor/partitioned_group_info.go @@ -0,0 +1,143 @@ +package compactor + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/cortexproject/cortex/pkg/util/runutil" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "io" + "path" + "strings" +) + +const ( + PartitionedGroupDirectory = "partitioned-groups" + PartitionedGroupInfoVersion1 = 1 +) + +var ( + ErrorPartitionedGroupInfoNotFound = errors.New("partitioned group info not found") + ErrorUnmarshalPartitionedGroupInfo = errors.New("unmarshal partitioned group info JSON") +) + +type Partition struct { + PartitionID int `json:"partitionID"` + Blocks []ulid.ULID `json:"blocks"` +} + +func (p *Partition) getBlocksSet() map[ulid.ULID]struct{} { + res := make(map[ulid.ULID]struct{}) + for _, blockID := range p.Blocks { + res[blockID] = struct{}{} + } + return res +} + +type PartitionedGroupInfo struct { + PartitionedGroupID uint32 `json:"partitionedGroupID"` + PartitionCount int `json:"partitionCount"` + Partitions []Partition `json:"partitions"` + RangeStart int64 `json:"rangeStart"` + RangeEnd int64 `json:"rangeEnd"` + // Version of the file. + Version int `json:"version"` +} + +func (p *PartitionedGroupInfo) getPartitionIDsByBlock(blockID ulid.ULID) []int { + var partitionIDs []int +partitionLoop: + for _, partition := range p.Partitions { + for _, block := range partition.Blocks { + if block == blockID { + partitionIDs = append(partitionIDs, partition.PartitionID) + continue partitionLoop + } + } + } + return partitionIDs +} + +func (p *PartitionedGroupInfo) getAllBlocks() []ulid.ULID { + uniqueBlocks := make(map[ulid.ULID]struct{}) + for _, partition := range p.Partitions { + for _, block := range partition.Blocks { + uniqueBlocks[block] = struct{}{} + } + } + blocks := make([]ulid.ULID, len(uniqueBlocks)) + i := 0 + for block, _ := range uniqueBlocks { + blocks[i] = block + i++ + } + return blocks +} + +func (p PartitionedGroupInfo) String() string { + var partitions []string + for _, partition := range p.Partitions { + partitions = append(partitions, fmt.Sprintf("(PartitionID: %d, Blocks: %s)", partition.PartitionID, partition.Blocks)) + } + return fmt.Sprintf("{PartitionedGroupID: %d, PartitionCount: %d, Partitions: %s}", p.PartitionedGroupID, p.PartitionCount, strings.Join(partitions, ", ")) +} + +func getPartitionedGroupFile(partitionedGroupID uint32) string { + return path.Join(PartitionedGroupDirectory, fmt.Sprintf("%d.json", partitionedGroupID)) +} + +func ReadPartitionedGroupInfo(ctx context.Context, bkt objstore.InstrumentedBucketReader, logger log.Logger, partitionedGroupID uint32, partitionedGroupInfoReadFailed prometheus.Counter) (*PartitionedGroupInfo, error) { + return ReadPartitionedGroupInfoFile(ctx, bkt, logger, getPartitionedGroupFile(partitionedGroupID), partitionedGroupInfoReadFailed) +} + +func ReadPartitionedGroupInfoFile(ctx context.Context, bkt objstore.InstrumentedBucketReader, logger log.Logger, partitionedGroupFile string, partitionedGroupInfoReadFailed prometheus.Counter) (*PartitionedGroupInfo, error) { + partitionedGroupReader, err := bkt.ReaderWithExpectedErrs(bkt.IsObjNotFoundErr).Get(ctx, partitionedGroupFile) + if err != nil { + if bkt.IsObjNotFoundErr(err) { + return nil, errors.Wrapf(ErrorPartitionedGroupInfoNotFound, "partitioned group file: %s", partitionedGroupReader) + } + partitionedGroupInfoReadFailed.Inc() + return nil, errors.Wrapf(err, "get partitioned group file: %s", partitionedGroupReader) + } + defer runutil.CloseWithLogOnErr(logger, partitionedGroupReader, "close partitioned group reader") + p, err := io.ReadAll(partitionedGroupReader) + if err != nil { + partitionedGroupInfoReadFailed.Inc() + return nil, errors.Wrapf(err, "read partitioned group file: %s", partitionedGroupFile) + } + partitionedGroupInfo := PartitionedGroupInfo{} + if err = json.Unmarshal(p, &partitionedGroupInfo); err != nil { + partitionedGroupInfoReadFailed.Inc() + return nil, errors.Wrapf(ErrorUnmarshalPartitionedGroupInfo, "partitioned group file: %s, error: %v", partitionedGroupFile, err.Error()) + } + if partitionedGroupInfo.Version != VisitMarkerVersion1 { + partitionedGroupInfoReadFailed.Inc() + return nil, errors.Errorf("unexpected partitioned group file version %d, expected %d", partitionedGroupInfo.Version, VisitMarkerVersion1) + } + return &partitionedGroupInfo, nil +} + +func UpdatePartitionedGroupInfo(ctx context.Context, bkt objstore.InstrumentedBucket, logger log.Logger, partitionedGroupInfo PartitionedGroupInfo, partitionedGroupInfoReadFailed prometheus.Counter, partitionedGroupInfoWriteFailed prometheus.Counter) (*PartitionedGroupInfo, error) { + existingPartitionedGroup, err := ReadPartitionedGroupInfo(ctx, bkt, logger, partitionedGroupInfo.PartitionedGroupID, partitionedGroupInfoReadFailed) + if existingPartitionedGroup != nil { + level.Warn(logger).Log("msg", "partitioned group info already exists", "partitioned_group_id", partitionedGroupInfo.PartitionedGroupID) + return existingPartitionedGroup, nil + } + partitionedGroupFile := getPartitionedGroupFile(partitionedGroupInfo.PartitionedGroupID) + partitionedGroupInfoContent, err := json.Marshal(partitionedGroupInfo) + if err != nil { + partitionedGroupInfoWriteFailed.Inc() + return nil, err + } + reader := bytes.NewReader(partitionedGroupInfoContent) + if err := bkt.Upload(ctx, partitionedGroupFile, reader); err != nil { + return nil, err + } + return &partitionedGroupInfo, nil +} diff --git a/pkg/compactor/partitioned_group_info_test.go b/pkg/compactor/partitioned_group_info_test.go new file mode 100644 index 00000000000..f8bee62d173 --- /dev/null +++ b/pkg/compactor/partitioned_group_info_test.go @@ -0,0 +1,159 @@ +package compactor + +import ( + "context" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/thanos-io/objstore" + + cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" +) + +func TestPartitionedGroupInfo(t *testing.T) { + ulid0 := ulid.MustNew(0, nil) + ulid1 := ulid.MustNew(1, nil) + ulid2 := ulid.MustNew(2, nil) + rangeStart := (1 * time.Hour).Milliseconds() + rangeEnd := (2 * time.Hour).Milliseconds() + partitionedGroupID := uint32(12345) + for _, tcase := range []struct { + name string + partitionedGroupInfo PartitionedGroupInfo + }{ + { + name: "write partitioned group info 1", + partitionedGroupInfo: PartitionedGroupInfo{ + PartitionedGroupID: partitionedGroupID, + PartitionCount: 2, + Partitions: []Partition{ + { + PartitionID: 0, + Blocks: []ulid.ULID{ + ulid0, + ulid1, + }, + }, + { + PartitionID: 1, + Blocks: []ulid.ULID{ + ulid0, + ulid2, + }, + }, + }, + RangeStart: rangeStart, + RangeEnd: rangeEnd, + Version: VisitMarkerVersion1, + }, + }, + { + name: "write partitioned group info 2", + partitionedGroupInfo: PartitionedGroupInfo{ + PartitionedGroupID: partitionedGroupID, + PartitionCount: 3, + Partitions: []Partition{ + { + PartitionID: 0, + Blocks: []ulid.ULID{ + ulid0, + }, + }, + { + PartitionID: 1, + Blocks: []ulid.ULID{ + ulid1, + }, + }, + { + PartitionID: 2, + Blocks: []ulid.ULID{ + ulid2, + }, + }, + }, + RangeStart: rangeStart, + RangeEnd: rangeEnd, + Version: VisitMarkerVersion1, + }, + }, + } { + t.Run(tcase.name, func(t *testing.T) { + ctx := context.Background() + dummyReadCounter := prometheus.NewCounter(prometheus.CounterOpts{}) + dummyWriteCounter := prometheus.NewCounter(prometheus.CounterOpts{}) + testBkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + bkt := objstore.WithNoopInstr(testBkt) + logger := log.NewNopLogger() + writeRes, err := UpdatePartitionedGroupInfo(ctx, bkt, logger, tcase.partitionedGroupInfo, dummyReadCounter, dummyWriteCounter) + require.NoError(t, err) + require.Equal(t, tcase.partitionedGroupInfo, *writeRes) + readRes, err := ReadPartitionedGroupInfo(ctx, bkt, logger, tcase.partitionedGroupInfo.PartitionedGroupID, dummyReadCounter) + require.NoError(t, err) + require.Equal(t, tcase.partitionedGroupInfo, *readRes) + }) + } +} + +func TestGetPartitionIDsByBlock(t *testing.T) { + ulid0 := ulid.MustNew(0, nil) + ulid1 := ulid.MustNew(1, nil) + ulid2 := ulid.MustNew(2, nil) + ulid3 := ulid.MustNew(3, nil) + partitionedGroupInfo := PartitionedGroupInfo{ + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + Partitions: []Partition{ + { + PartitionID: 0, + Blocks: []ulid.ULID{ + ulid0, + ulid1, + }, + }, + { + PartitionID: 1, + Blocks: []ulid.ULID{ + ulid0, + ulid2, + }, + }, + { + PartitionID: 2, + Blocks: []ulid.ULID{ + ulid0, + ulid1, + ulid2, + ulid3, + }, + }, + }, + RangeStart: (1 * time.Hour).Milliseconds(), + RangeEnd: (2 * time.Hour).Milliseconds(), + Version: VisitMarkerVersion1, + } + + res0 := partitionedGroupInfo.getPartitionIDsByBlock(ulid0) + require.Equal(t, 3, len(res0)) + require.Contains(t, res0, 0) + require.Contains(t, res0, 1) + require.Contains(t, res0, 2) + + res1 := partitionedGroupInfo.getPartitionIDsByBlock(ulid1) + require.Equal(t, 2, len(res1)) + require.Contains(t, res1, 0) + require.Contains(t, res1, 2) + + res2 := partitionedGroupInfo.getPartitionIDsByBlock(ulid2) + require.Equal(t, 2, len(res2)) + require.Contains(t, res2, 1) + require.Contains(t, res2, 2) + + res3 := partitionedGroupInfo.getPartitionIDsByBlock(ulid3) + require.Equal(t, 1, len(res3)) + require.Contains(t, res3, 2) +} diff --git a/pkg/compactor/shuffle_sharding_grouper.go b/pkg/compactor/shuffle_sharding_grouper.go index 3ed5c3bdeee..636acf9eda7 100644 --- a/pkg/compactor/shuffle_sharding_grouper.go +++ b/pkg/compactor/shuffle_sharding_grouper.go @@ -3,7 +3,10 @@ package compactor import ( "context" "fmt" + thanosblock "github.com/thanos-io/thanos/pkg/block" "hash/fnv" + "math" + "path" "sort" "strings" "time" @@ -50,9 +53,13 @@ type ShuffleShardingGrouper struct { ringLifecyclerAddr string ringLifecyclerID string - blockVisitMarkerTimeout time.Duration - blockVisitMarkerReadFailed prometheus.Counter - blockVisitMarkerWriteFailed prometheus.Counter + blockVisitMarkerTimeout time.Duration + blockVisitMarkerReadFailed prometheus.Counter + blockVisitMarkerWriteFailed prometheus.Counter + partitionedGroupInfoReadFailed prometheus.Counter + partitionedGroupInfoWriteFailed prometheus.Counter + + blockPartitionInfoMap map[ulid.ULID]*thanosblock.PartitionInfo } func NewShuffleShardingGrouper( @@ -79,6 +86,8 @@ func NewShuffleShardingGrouper( blockVisitMarkerTimeout time.Duration, blockVisitMarkerReadFailed prometheus.Counter, blockVisitMarkerWriteFailed prometheus.Counter, + partitionedGroupInfoReadFailed prometheus.Counter, + partitionedGroupInfoWriteFailed prometheus.Counter, ) *ShuffleShardingGrouper { if logger == nil { logger = log.NewNopLogger() @@ -117,18 +126,21 @@ func NewShuffleShardingGrouper( Name: "thanos_compact_group_vertical_compactions_total", Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.", }, []string{"group"}), - compactorCfg: compactorCfg, - ring: ring, - ringLifecyclerAddr: ringLifecyclerAddr, - ringLifecyclerID: ringLifecyclerID, - limits: limits, - userID: userID, - blockFilesConcurrency: blockFilesConcurrency, - blocksFetchConcurrency: blocksFetchConcurrency, - compactionConcurrency: compactionConcurrency, - blockVisitMarkerTimeout: blockVisitMarkerTimeout, - blockVisitMarkerReadFailed: blockVisitMarkerReadFailed, - blockVisitMarkerWriteFailed: blockVisitMarkerWriteFailed, + compactorCfg: compactorCfg, + ring: ring, + ringLifecyclerAddr: ringLifecyclerAddr, + ringLifecyclerID: ringLifecyclerID, + limits: limits, + userID: userID, + blockFilesConcurrency: blockFilesConcurrency, + blocksFetchConcurrency: blocksFetchConcurrency, + compactionConcurrency: compactionConcurrency, + blockVisitMarkerTimeout: blockVisitMarkerTimeout, + blockVisitMarkerReadFailed: blockVisitMarkerReadFailed, + blockVisitMarkerWriteFailed: blockVisitMarkerWriteFailed, + partitionedGroupInfoReadFailed: partitionedGroupInfoReadFailed, + partitionedGroupInfoWriteFailed: partitionedGroupInfoWriteFailed, + blockPartitionInfoMap: make(map[ulid.ULID]*thanosblock.PartitionInfo), } } @@ -140,6 +152,11 @@ func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (re for _, b := range blocks { key := b.Thanos.GroupKey() mainGroups[key] = append(mainGroups[key], b) + _, err := g.getBlockPartitionInfo(b.ULID) // this will put fetched partition info in g.blockPartitionInfoMap + if err != nil { + level.Warn(g.logger).Log("msg", "unable to get block partition info", "block", b.ULID.String(), "err", err) + } + } // For each group, we have to further split it into set of blocks @@ -164,7 +181,7 @@ func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (re var groups []blocksGroup for _, mainBlocks := range mainGroups { - groups = append(groups, groupBlocksByCompactableRanges(mainBlocks, g.compactorCfg.BlockRanges.ToMilliseconds())...) + groups = append(groups, groupBlocksByCompactableRanges(mainBlocks, g.blockPartitionInfoMap, g.compactorCfg.BlockRanges.ToMilliseconds())...) } // Ensure groups are sorted by smallest range, oldest min time first. The rationale @@ -174,23 +191,23 @@ func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (re sort.SliceStable(groups, func(i, j int) bool { iGroup := groups[i] jGroup := groups[j] - iMinTime := iGroup.minTime() - iMaxTime := iGroup.maxTime() - jMinTime := jGroup.minTime() - jMaxTime := jGroup.maxTime() - iLength := iMaxTime - iMinTime - jLength := jMaxTime - jMinTime + iRangeStart := iGroup.rangeStart + iRangeEnd := iGroup.rangeEnd + jRangeStart := jGroup.rangeStart + jRangeEnd := jGroup.rangeEnd + iLength := iRangeEnd - iRangeStart + jLength := jRangeEnd - jRangeStart if iLength != jLength { return iLength < jLength } - if iMinTime != jMinTime { - return iMinTime < jMinTime + if iRangeStart != jRangeStart { + return iRangeStart < jRangeStart } - iGroupHash := hashGroup(g.userID, iGroup.rangeStart, iGroup.rangeEnd) + iGroupHash := hashGroup(g.userID, iRangeStart, iRangeEnd) iGroupKey := createGroupKey(iGroupHash, iGroup) - jGroupHash := hashGroup(g.userID, jGroup.rangeStart, jGroup.rangeEnd) + jGroupHash := hashGroup(g.userID, jRangeStart, jRangeEnd) jGroupKey := createGroupKey(jGroupHash, jGroup) // Guarantee stable sort for tests. return iGroupKey < jGroupKey @@ -212,85 +229,284 @@ mainLoop: groupHash := hashGroup(g.userID, group.rangeStart, group.rangeEnd) - if isVisited, err := g.isGroupVisited(group.blocks, g.ringLifecyclerID); err != nil { - level.Warn(g.logger).Log("msg", "unable to check if blocks in group are visited", "group hash", groupHash, "err", err, "group", group.String()) - continue - } else if isVisited { - level.Info(g.logger).Log("msg", "skipping group because at least one block in group is visited", "group_hash", groupHash) + partitionedGroupInfo, err := g.generatePartitionBlockGroup(group, groupHash) + if err != nil { + level.Warn(g.logger).Log("msg", "unable to update partitioned group info", "partitioned_group_id", groupHash, "err", err) continue } + level.Debug(g.logger).Log("msg", "generated partitioned groups", "groups", partitionedGroupInfo) + + partitionedGroupID := partitionedGroupInfo.PartitionedGroupID + partitionCount := partitionedGroupInfo.PartitionCount + for _, partition := range partitionedGroupInfo.Partitions { + partitionID := partition.PartitionID + partitionedGroup, err := createBlocksGroup(blocks, partition.Blocks, partitionedGroupInfo.RangeStart, partitionedGroupInfo.RangeEnd) + if err != nil { + level.Error(g.logger).Log("msg", "unable to create partitioned group", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "err", err) + continue + } + if isVisited, err := g.isGroupVisited(partitionedGroup.blocks, partitionID, g.ringLifecyclerID); err != nil { + level.Warn(g.logger).Log("msg", "unable to check if blocks in partition are visited", "group hash", groupHash, "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "err", err, "group", group.String()) + continue + } else if isVisited { + level.Info(g.logger).Log("msg", "skipping group because at least one block in partition is visited", "group_hash", groupHash, "partitioned_group_id", partitionedGroupID, "partition_id", partitionID) + continue + } - remainingCompactions++ - groupKey := createGroupKey(groupHash, group) + remainingCompactions++ + partitionedGroupKey := createGroupKeyWithPartitionID(groupHash, partitionID, *partitionedGroup) + + level.Info(g.logger).Log("msg", "found compactable group for user", "group_hash", groupHash, "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "partition_count", partitionCount, "group", partitionedGroup.String()) + blockVisitMarker := BlockVisitMarker{ + VisitTime: time.Now().Unix(), + CompactorID: g.ringLifecyclerID, + Status: Pending, + PartitionedGroupID: partitionedGroupID, + PartitionID: partitionID, + Version: VisitMarkerVersion1, + } + markBlocksVisited(g.ctx, g.bkt, g.logger, partitionedGroup.blocks, blockVisitMarker, g.blockVisitMarkerWriteFailed) + + resolution := partitionedGroup.blocks[0].Thanos.Downsample.Resolution + externalLabels := labels.FromMap(partitionedGroup.blocks[0].Thanos.Labels) + thanosGroup, err := compact.NewGroup( + log.With(g.logger, "groupKey", partitionedGroupKey, "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "partition_count", partitionCount, "rangeStart", partitionedGroup.rangeStartTime().String(), "rangeEnd", partitionedGroup.rangeEndTime().String(), "externalLabels", externalLabels, "downsampleResolution", resolution), + g.bkt, + partitionedGroupKey, + externalLabels, + resolution, + true, // No malformed index. + true, // Enable vertical compaction. + g.compactions.WithLabelValues(partitionedGroupKey), + g.compactionRunsStarted.WithLabelValues(partitionedGroupKey), + g.compactionRunsCompleted.WithLabelValues(partitionedGroupKey), + g.compactionFailures.WithLabelValues(partitionedGroupKey), + g.verticalCompactions.WithLabelValues(partitionedGroupKey), + g.garbageCollectedBlocks, + g.blocksMarkedForDeletion, + g.blocksMarkedForNoCompact, + g.hashFunc, + g.blockFilesConcurrency, + g.blocksFetchConcurrency, + ) + if err != nil { + level.Error(g.logger).Log("msg", "failed to create partitioned group", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "partition_count", partitionCount, "blocks", partition.Blocks) + } - level.Info(g.logger).Log("msg", "found compactable group for user", "group_hash", groupHash, "group", group.String()) - blockVisitMarker := BlockVisitMarker{ - VisitTime: time.Now().Unix(), - CompactorID: g.ringLifecyclerID, - Version: VisitMarkerVersion1, + for _, m := range partitionedGroup.blocks { + if err := thanosGroup.AppendMeta(m); err != nil { + level.Error(g.logger).Log("msg", "failed to add block to partitioned group", "partitioned_group_id", partitionedGroupID, "partition_id", partitionID, "partition_count", partitionCount, "block", m.ULID) + } + } + thanosGroup.SetPartitionInfo(partitionedGroupID, partitionCount, partitionID) + + outGroups = append(outGroups, thanosGroup) + if len(outGroups) >= g.compactionConcurrency { + break mainLoop + } } - markBlocksVisited(g.ctx, g.bkt, g.logger, group.blocks, blockVisitMarker, g.blockVisitMarkerWriteFailed) - - // All the blocks within the same group have the same downsample - // resolution and external labels. - resolution := group.blocks[0].Thanos.Downsample.Resolution - externalLabels := labels.FromMap(group.blocks[0].Thanos.Labels) - - thanosGroup, err := compact.NewGroup( - log.With(g.logger, "groupKey", groupKey, "rangeStart", group.rangeStartTime().String(), "rangeEnd", group.rangeEndTime().String(), "externalLabels", externalLabels, "downsampleResolution", resolution), - g.bkt, - groupKey, - externalLabels, - resolution, - false, // No malformed index. - true, // Enable vertical compaction. - g.compactions.WithLabelValues(groupKey), - g.compactionRunsStarted.WithLabelValues(groupKey), - g.compactionRunsCompleted.WithLabelValues(groupKey), - g.compactionFailures.WithLabelValues(groupKey), - g.verticalCompactions.WithLabelValues(groupKey), - g.garbageCollectedBlocks, - g.blocksMarkedForDeletion, - g.blocksMarkedForNoCompact, - g.hashFunc, - g.blockFilesConcurrency, - g.blocksFetchConcurrency, - ) - if err != nil { - return nil, errors.Wrap(err, "create compaction group") + } + + level.Info(g.logger).Log("msg", fmt.Sprintf("total groups for compaction: %d", len(outGroups))) + + return outGroups, nil +} + +func (g *ShuffleShardingGrouper) generatePartitionBlockGroup(group blocksGroup, groupHash uint32) (*PartitionedGroupInfo, error) { + partitionedGroupInfo, err := g.partitionBlockGroup(group, groupHash) + if err != nil { + return nil, err + } + updatedPartitionedGroupInfo, err := UpdatePartitionedGroupInfo(g.ctx, g.bkt, g.logger, *partitionedGroupInfo, g.partitionedGroupInfoReadFailed, g.partitionedGroupInfoWriteFailed) + if err != nil { + level.Warn(g.logger).Log("msg", "unable to update partitioned group info", "partitioned_group_id", partitionedGroupInfo.PartitionedGroupID, "err", err) + return nil, err + } + level.Debug(g.logger).Log("msg", "generated partitioned groups", "groups", updatedPartitionedGroupInfo) + return updatedPartitionedGroupInfo, nil +} + +func (g *ShuffleShardingGrouper) partitionBlockGroup(group blocksGroup, groupHash uint32) (*PartitionedGroupInfo, error) { + partitionCount := g.calculatePartitionCount(group) + blocksByMinTime := g.groupBlocksByMinTime(group) + partitionedGroups, err := g.partitionBlocksGroup(partitionCount, blocksByMinTime, group.rangeStart, group.rangeEnd) + if err != nil { + return nil, err + } + + var partitions []Partition + for partitionID, partitionedGroup := range partitionedGroups { + var blockIDs []ulid.ULID + for _, m := range partitionedGroup.blocks { + blockIDs = append(blockIDs, m.ULID) } + partitions = append(partitions, Partition{ + PartitionID: partitionID, + Blocks: blockIDs, + }) + } + partitionedGroupInfo := PartitionedGroupInfo{ + PartitionedGroupID: groupHash, + PartitionCount: partitionCount, + Partitions: partitions, + RangeStart: group.rangeStart, + RangeEnd: group.rangeEnd, + Version: PartitionedGroupInfoVersion1, + } + return &partitionedGroupInfo, nil +} - for _, m := range group.blocks { - if err := thanosGroup.AppendMeta(m); err != nil { - return nil, errors.Wrap(err, "add block to compaction group") +func (g *ShuffleShardingGrouper) calculatePartitionCount(group blocksGroup) int { + indexSizeLimit := g.compactorCfg.PartitionIndexSizeLimitInBytes + seriesCountLimit := g.compactorCfg.PartitionSeriesCountLimit + totalIndexSizeInBytes := int64(0) + totalSeriesCount := int64(0) + for _, block := range group.blocks { + blockFiles := block.Thanos.Files + totalSeriesCount += int64(block.Stats.NumSeries) + var indexFile *metadata.File + for _, file := range blockFiles { + if file.RelPath == thanosblock.IndexFilename { + indexFile = &file } } + if indexFile == nil { + level.Debug(g.logger).Log("msg", "unable to find index file in metadata", "block", block.ULID) + break + } + indexSize := indexFile.SizeBytes + totalIndexSizeInBytes += indexSize + } + partitionNumberBasedOnIndex := 1 + if indexSizeLimit > 0 && totalIndexSizeInBytes > indexSizeLimit { + partitionNumberBasedOnIndex = g.findNearestPartitionNumber(float64(totalIndexSizeInBytes), float64(indexSizeLimit)) + } + partitionNumberBasedOnSeries := 1 + if seriesCountLimit > 0 && totalSeriesCount > seriesCountLimit { + partitionNumberBasedOnSeries = g.findNearestPartitionNumber(float64(totalSeriesCount), float64(seriesCountLimit)) + } + partitionNumber := partitionNumberBasedOnIndex + if partitionNumberBasedOnSeries > partitionNumberBasedOnIndex { + partitionNumber = partitionNumberBasedOnSeries + } + level.Debug(g.logger).Log("msg", "calculated partition number for group", "group", group.String(), "partition_number", partitionNumber, "total_index_size", totalIndexSizeInBytes, "index_size_limit", indexSizeLimit, "total_series_count", totalSeriesCount, "series_count_limit", seriesCountLimit) + return partitionNumber +} + +func (g *ShuffleShardingGrouper) findNearestPartitionNumber(size float64, limit float64) int { + return int(math.Pow(2, math.Ceil(math.Log2(size/limit)))) +} - outGroups = append(outGroups, thanosGroup) - if len(outGroups) == g.compactionConcurrency { - break mainLoop +func (g *ShuffleShardingGrouper) groupBlocksByMinTime(group blocksGroup) map[int64][]*metadata.Meta { + blocksByMinTime := make(map[int64][]*metadata.Meta) + for _, block := range group.blocks { + blockRange := block.MaxTime - block.MinTime + minTime := block.MinTime + for _, tr := range g.compactorCfg.BlockRanges.ToMilliseconds() { + if blockRange <= tr { + minTime = tr * (block.MinTime / tr) + break + } } + blocksByMinTime[minTime] = append(blocksByMinTime[minTime], block) } + return blocksByMinTime +} - level.Info(g.logger).Log("msg", fmt.Sprintf("total groups for compaction: %d", len(outGroups))) +func getBlockPartitionInfoFile(blockID ulid.ULID) string { + return path.Join(blockID.String(), thanosblock.PartitionInfoFilename) +} - return outGroups, nil +func (g *ShuffleShardingGrouper) getBlockPartitionInfo(blockID ulid.ULID) (*thanosblock.PartitionInfo, error) { + if _, ok := g.blockPartitionInfoMap[blockID]; ok { + return g.blockPartitionInfoMap[blockID], nil + } + rc, err := g.bkt.ReaderWithExpectedErrs(g.bkt.IsObjNotFoundErr).Get(g.ctx, getBlockPartitionInfoFile(blockID)) + if err != nil { + if g.bkt.IsObjNotFoundErr(err) { + defaultPartitionInfo := &thanosblock.PartitionInfo{ + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + } + g.blockPartitionInfoMap[blockID] = defaultPartitionInfo + return defaultPartitionInfo, nil + } + return nil, err + } + partitionInfo, err := thanosblock.ReadPartitionInfo(rc) + if err != nil { + return nil, err + } + g.blockPartitionInfoMap[blockID] = partitionInfo + return partitionInfo, nil +} + +func (g *ShuffleShardingGrouper) partitionBlocksGroup(partitionCount int, blocksByMinTime map[int64][]*metadata.Meta, rangeStart int64, rangeEnd int64) (map[int]blocksGroup, error) { + partitionedGroups := make(map[int]blocksGroup) + addToPartitionedGroups := func(blocks []*metadata.Meta, partitionID int) { + if _, ok := partitionedGroups[partitionID]; !ok { + partitionedGroups[partitionID] = blocksGroup{ + rangeStart: rangeStart, + rangeEnd: rangeEnd, + blocks: []*metadata.Meta{}, + } + } + partitionedGroup := partitionedGroups[partitionID] + partitionedGroup.blocks = append(partitionedGroup.blocks, blocks...) + partitionedGroups[partitionID] = partitionedGroup + } + + for _, blocksInSameTimeInterval := range blocksByMinTime { + numOfBlocks := len(blocksInSameTimeInterval) + numBlocksCheck := math.Log2(float64(numOfBlocks)) + if math.Ceil(numBlocksCheck) == math.Floor(numBlocksCheck) { + // Case that number of blocks in this time interval is 2^n, should + // use modulo calculation to find blocks for each partition ID. + for _, block := range blocksInSameTimeInterval { + partitionInfo, err := g.getBlockPartitionInfo(block.ULID) + if err != nil { + return nil, err + } + if numOfBlocks < partitionCount { + for partitionID := partitionInfo.PartitionID; partitionID < partitionCount; partitionID += numOfBlocks { + addToPartitionedGroups([]*metadata.Meta{block}, partitionID) + } + } else if numOfBlocks == partitionCount { + addToPartitionedGroups([]*metadata.Meta{block}, partitionInfo.PartitionID) + } else { + addToPartitionedGroups([]*metadata.Meta{block}, partitionInfo.PartitionID%partitionCount) + } + } + } else { + // Case that number of blocks in this time interval is not 2^n, should + // include all blocks in all partitions. + for partitionID := 0; partitionID < partitionCount; partitionID++ { + addToPartitionedGroups(blocksInSameTimeInterval, partitionID) + } + } + } + return partitionedGroups, nil } -func (g *ShuffleShardingGrouper) isGroupVisited(blocks []*metadata.Meta, compactorID string) (bool, error) { +func (g *ShuffleShardingGrouper) isGroupVisited(blocks []*metadata.Meta, partitionID int, compactorID string) (bool, error) { for _, block := range blocks { blockID := block.ULID.String() - blockVisitMarker, err := ReadBlockVisitMarker(g.ctx, g.bkt, g.logger, blockID, g.blockVisitMarkerReadFailed) + blockVisitMarker, err := ReadBlockVisitMarker(g.ctx, g.bkt, g.logger, blockID, partitionID, g.blockVisitMarkerReadFailed) if err != nil { if errors.Is(err, ErrorBlockVisitMarkerNotFound) { - level.Debug(g.logger).Log("msg", "no visit marker file for block", "blockID", blockID) + level.Warn(g.logger).Log("msg", "no visit marker file for block", "partition_id", partitionID, "block_id", blockID) continue } - level.Error(g.logger).Log("msg", "unable to read block visit marker file", "blockID", blockID, "err", err) + level.Error(g.logger).Log("msg", "unable to read block visit marker file", "partition_id", partitionID, "block_id", blockID, "err", err) return true, err } - if compactorID != blockVisitMarker.CompactorID && blockVisitMarker.isVisited(g.blockVisitMarkerTimeout) { - level.Debug(g.logger).Log("msg", fmt.Sprintf("visited block: %s", blockID)) + if blockVisitMarker.isCompleted() { + level.Info(g.logger).Log("msg", "block visit marker with partition ID is completed", "partition_id", partitionID, "block_id", blockID) + return true, nil + } + if compactorID != blockVisitMarker.CompactorID && blockVisitMarker.isVisited(g.blockVisitMarkerTimeout, partitionID) { + level.Info(g.logger).Log("msg", "visited block with partition ID", "partition_id", partitionID, "block_id", blockID) return true, nil } } @@ -324,6 +540,24 @@ func createGroupKey(groupHash uint32, group blocksGroup) string { return fmt.Sprintf("%v%s", groupHash, group.blocks[0].Thanos.GroupKey()) } +func createGroupKeyWithPartitionID(groupHash uint32, partitionID int, group blocksGroup) string { + return fmt.Sprintf("%v%d%s", groupHash, partitionID, group.blocks[0].Thanos.GroupKey()) +} + +func createBlocksGroup(blocks map[ulid.ULID]*metadata.Meta, blockIDs []ulid.ULID, rangeStart int64, rangeEnd int64) (*blocksGroup, error) { + var group blocksGroup + group.rangeStart = rangeStart + group.rangeEnd = rangeEnd + for _, blockID := range blockIDs { + if m, ok := blocks[blockID]; !ok { + return nil, errors.New(fmt.Sprintf("block not found: %s", blockID)) + } else { + group.blocks = append(group.blocks, m) + } + } + return &group, nil +} + // blocksGroup struct and functions copied and adjusted from https://github.com/cortexproject/cortex/pull/2616 type blocksGroup struct { rangeStart int64 // Included. @@ -393,7 +627,7 @@ func (g blocksGroup) maxTime() int64 { // to smaller ranges. If a smaller range contains more than 1 block (and thus it should // be compacted), the larger range block group is not generated until each of its // smaller ranges have 1 block each at most. -func groupBlocksByCompactableRanges(blocks []*metadata.Meta, ranges []int64) []blocksGroup { +func groupBlocksByCompactableRanges(blocks []*metadata.Meta, blocksPartitionInfo map[ulid.ULID]*thanosblock.PartitionInfo, ranges []int64) []blocksGroup { if len(blocks) == 0 { return nil } @@ -421,7 +655,20 @@ func groupBlocksByCompactableRanges(blocks []*metadata.Meta, ranges []int64) []b } } - groups = append(groups, group) + firstBlockPartitionInfo, ok := blocksPartitionInfo[group.blocks[0].ULID] + if !ok { + continue nextGroup + } + for _, block := range group.blocks { + blockPartitionInfo, ok := blocksPartitionInfo[block.ULID] + if !ok { + continue nextGroup + } + if blockPartitionInfo.PartitionedGroupID <= 0 || blockPartitionInfo.PartitionedGroupID != firstBlockPartitionInfo.PartitionedGroupID { + groups = append(groups, group) + continue nextGroup + } + } } } @@ -439,10 +686,10 @@ func groupBlocksByCompactableRanges(blocks []*metadata.Meta, ranges []int64) []b } // If the group covers the full range, it's fine. - if group.maxTime()-group.minTime() == group.rangeLength() { - idx++ - continue - } + //if group.maxTime()-group.minTime() == group.rangeLength() { + // idx++ + // continue + //} // If the group's maxTime is after 1 block range, we can compact assuming that // all the required blocks have already been uploaded. @@ -484,6 +731,11 @@ func groupBlocksByRange(blocks []*metadata.Meta, tr int64) []blocksGroup { continue } + if skipHighLevelBlock(m, tr) { + i++ + continue + } + // Add all blocks to the current group that are within [t0, t0+tr]. for ; i < len(blocks); i++ { // If the block does not start within this group, then we should break the iteration @@ -498,6 +750,10 @@ func groupBlocksByRange(blocks []*metadata.Meta, tr int64) []blocksGroup { continue } + if skipHighLevelBlock(blocks[i], tr) { + continue + } + group.blocks = append(group.blocks, blocks[i]) } @@ -509,6 +765,14 @@ func groupBlocksByRange(blocks []*metadata.Meta, tr int64) []blocksGroup { return ret } +func skipHighLevelBlock(block *metadata.Meta, tr int64) bool { + // Skip blocks that have rounded range equal to tr, and level > 1 + // Because tr is divisible by the previous tr, block range falls in + // (tr/2, tr] should be rounded to tr. + blockRange := block.MaxTime - block.MinTime + return blockRange <= tr && blockRange > tr/2 && block.Compaction.Level > 1 +} + func getRangeStart(m *metadata.Meta, tr int64) int64 { // Compute start of aligned time range of size tr closest to the current block's start. // This code has been copied from TSDB. diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go index d74224ed831..6a69b88f1cf 100644 --- a/pkg/compactor/shuffle_sharding_grouper_test.go +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/json" - "path" "testing" "time" @@ -17,10 +16,12 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" + thanosblock "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/storage/bucket" + cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -336,19 +337,15 @@ func TestShuffleShardingGrouper_Groups(t *testing.T) { Name: "cortex_compactor_remaining_planned_compactions", Help: "Total number of plans that remain to be compacted.", }) - blockVisitMarkerReadFailed := promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_visit_marker_read_failed", - Help: "Number of block visit marker file failed to be read.", - }) - blockVisitMarkerWriteFailed := promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_visit_marker_write_failed", - Help: "Number of block visit marker file failed to be written.", - }) + blockVisitMarkerReadFailed := prometheus.NewCounter(prometheus.CounterOpts{}) + blockVisitMarkerWriteFailed := prometheus.NewCounter(prometheus.CounterOpts{}) + partitionedGroupInfoReadFailed := prometheus.NewCounter(prometheus.CounterOpts{}) + partitionedGroupInfoWriteFailed := prometheus.NewCounter(prometheus.CounterOpts{}) bkt := &bucket.ClientMock{} blockVisitMarkerTimeout := 5 * time.Minute for _, visitedBlock := range testData.visitedBlocks { - visitMarkerFile := path.Join(visitedBlock.id.String(), BlockVisitMarkerFile) + visitMarkerFile := getBlockVisitMarkerFile(visitedBlock.id.String(), 0) expireTime := time.Now() if visitedBlock.isExpired { expireTime = expireTime.Add(-1 * blockVisitMarkerTimeout) @@ -361,6 +358,15 @@ func TestShuffleShardingGrouper_Groups(t *testing.T) { visitMarkerFileContent, _ := json.Marshal(blockVisitMarker) bkt.MockGet(visitMarkerFile, string(visitMarkerFileContent), nil) } + for _, block := range testData.blocks { + partitionInfo := thanosblock.PartitionInfo{ + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + } + partitionInfoContent, _ := json.Marshal(partitionInfo) + bkt.MockGet(getBlockPartitionInfoFile(block.ULID), string(partitionInfoContent), nil) + } bkt.MockUpload(mock.Anything, nil) bkt.MockGet(mock.Anything, "", nil) @@ -390,6 +396,8 @@ func TestShuffleShardingGrouper_Groups(t *testing.T) { blockVisitMarkerTimeout, blockVisitMarkerReadFailed, blockVisitMarkerWriteFailed, + partitionedGroupInfoReadFailed, + partitionedGroupInfoWriteFailed, ) actual, err := g.Groups(testData.blocks) require.NoError(t, err) @@ -406,180 +414,384 @@ func TestShuffleShardingGrouper_Groups(t *testing.T) { } func TestGroupBlocksByCompactableRanges(t *testing.T) { + block1Ulid := ulid.MustNew(1, nil) + block2Ulid := ulid.MustNew(2, nil) + block3Ulid := ulid.MustNew(3, nil) + block4Ulid := ulid.MustNew(4, nil) + block5Ulid := ulid.MustNew(5, nil) + block6Ulid := ulid.MustNew(6, nil) + block7Ulid := ulid.MustNew(7, nil) + block8Ulid := ulid.MustNew(8, nil) + block9Ulid := ulid.MustNew(9, nil) + + defaultPartitionInfo := &thanosblock.PartitionInfo{ + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + } + tests := map[string]struct { - ranges []int64 - blocks []*metadata.Meta - expected []blocksGroup + ranges []int64 + blocks []*metadata.Meta + blocksPartitionInfo map[ulid.ULID]*thanosblock.PartitionInfo + expected []blocksGroup }{ "no input blocks": { - ranges: []int64{20}, - blocks: nil, - expected: nil, + ranges: []int64{20}, + blocks: nil, + blocksPartitionInfo: nil, + expected: nil, }, "only 1 block in input": { ranges: []int64{20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, }, expected: nil, }, "only 1 block for each range (single range)": { ranges: []int64{20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 40, MaxTime: 60}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, }, expected: nil, }, "only 1 block for each range (multiple ranges)": { ranges: []int64{10, 20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 40, MaxTime: 60}}, }, expected: nil, }, "input blocks can be compacted on the 1st range only": { ranges: []int64{20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 25, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 50}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 50, MaxTime: 60}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 25, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 40, MaxTime: 50}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 50, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{ULID: block7Ulid, MinTime: 60, MaxTime: 70}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, + block4Ulid: defaultPartitionInfo, + block5Ulid: defaultPartitionInfo, + block6Ulid: defaultPartitionInfo, + block7Ulid: defaultPartitionInfo, }, expected: []blocksGroup{ {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 25, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 25, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, }}, {rangeStart: 40, rangeEnd: 60, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 50}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 50, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 40, MaxTime: 50}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 50, MaxTime: 60}}, }}, }, }, "input blocks can be compacted on the 2nd range only": { ranges: []int64{10, 20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 60, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 70, MaxTime: 80}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, + block4Ulid: defaultPartitionInfo, + block5Ulid: defaultPartitionInfo, + block6Ulid: defaultPartitionInfo, }, expected: []blocksGroup{ {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 30, MaxTime: 40}}, }}, {rangeStart: 60, rangeEnd: 80, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 60, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 70, MaxTime: 80}}, }}, }, }, "input blocks can be compacted on a mix of 1st and 2nd ranges, guaranteeing no overlaps and giving preference to smaller ranges": { ranges: []int64{10, 20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 7, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 75, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 7, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{ULID: block7Ulid, MinTime: 60, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{ULID: block8Ulid, MinTime: 70, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block9Ulid, MinTime: 75, MaxTime: 80}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, + block4Ulid: defaultPartitionInfo, + block5Ulid: defaultPartitionInfo, + block6Ulid: defaultPartitionInfo, + block7Ulid: defaultPartitionInfo, + block8Ulid: defaultPartitionInfo, + block9Ulid: defaultPartitionInfo, }, expected: []blocksGroup{ {rangeStart: 0, rangeEnd: 10, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 7, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 7, MaxTime: 10}}, }}, {rangeStart: 70, rangeEnd: 80, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 75, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block8Ulid, MinTime: 70, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block9Ulid, MinTime: 75, MaxTime: 80}}, }}, {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 30, MaxTime: 40}}, }}, }, }, "input blocks have already been compacted with the largest range": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 40, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, }, expected: nil, }, "input blocks match the largest range but can be compacted because overlapping": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 40, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, + block4Ulid: defaultPartitionInfo, }, expected: []blocksGroup{ {rangeStart: 80, rangeEnd: 120, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, }}, }, }, "a block with time range crossing two 1st level ranges should be NOT considered for 1st level compaction": { ranges: []int64{20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, + block4Ulid: defaultPartitionInfo, }, expected: []blocksGroup{ {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, }}, }, }, "a block with time range crossing two 1st level ranges should BE considered for 2nd level compaction": { ranges: []int64{20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 40}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, }, expected: []blocksGroup{ {rangeStart: 0, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 40}}, }}, }, }, "a block with time range larger then the largest compaction range should NOT be considered for compaction": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 150}}, // This block is larger then the largest compaction range. - {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 30, MaxTime: 150}}, // This block is larger then the largest compaction range. + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 40, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 80, MaxTime: 120}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: defaultPartitionInfo, + block2Ulid: defaultPartitionInfo, + block3Ulid: defaultPartitionInfo, + block4Ulid: defaultPartitionInfo, + block5Ulid: defaultPartitionInfo, }, expected: []blocksGroup{ {rangeStart: 80, rangeEnd: 120, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 80, MaxTime: 120}}, + }}, + }, + }, + "a group with all blocks having same partitioned group id should be ignored": { + ranges: []int64{10, 20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: { + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 0, + }, + block2Ulid: { + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 1, + }, + block3Ulid: { + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 2, + }, + block4Ulid: defaultPartitionInfo, + block5Ulid: defaultPartitionInfo, + }, + expected: []blocksGroup{ + {rangeStart: 10, rangeEnd: 20, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}}, + }}, + }, + }, + "a group with all blocks having partitioned group id is 0 should not be ignored": { + ranges: []int64{10, 20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: { + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + }, + block2Ulid: { + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + }, + block3Ulid: { + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + }, + block4Ulid: defaultPartitionInfo, + block5Ulid: defaultPartitionInfo, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 10, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10}}, + }}, + {rangeStart: 10, rangeEnd: 20, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}}, + }}, + }, + }, + "a group with blocks from two different partitioned groups": { + ranges: []int64{10, 20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + }, + blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ + block1Ulid: { + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 0, + }, + block2Ulid: { + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 1, + }, + block3Ulid: { + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 2, + }, + block4Ulid: { + PartitionedGroupID: uint32(54321), + PartitionCount: 2, + PartitionID: 0, + }, + block5Ulid: { + PartitionedGroupID: uint32(54321), + PartitionCount: 2, + PartitionID: 1, + }, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 20, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, }}, }, }, @@ -587,7 +799,7 @@ func TestGroupBlocksByCompactableRanges(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - assert.Equal(t, testData.expected, groupBlocksByCompactableRanges(testData.blocks, testData.ranges)) + assert.Equal(t, testData.expected, groupBlocksByCompactableRanges(testData.blocks, testData.blocksPartitionInfo, testData.ranges)) }) } } @@ -723,6 +935,558 @@ func TestBlocksGroup_overlaps(t *testing.T) { } } +func TestGroupPartitioning(t *testing.T) { + t0block1Ulid := ulid.MustNew(1, nil) + t0block2Ulid := ulid.MustNew(2, nil) + t0block3Ulid := ulid.MustNew(3, nil) + t1block1Ulid := ulid.MustNew(4, nil) + t1block2Ulid := ulid.MustNew(5, nil) + t2block1Ulid := ulid.MustNew(6, nil) + t2block2Ulid := ulid.MustNew(7, nil) + t2block3Ulid := ulid.MustNew(8, nil) + t2block4Ulid := ulid.MustNew(9, nil) + t3block1Ulid := ulid.MustNew(10, nil) + t3block2Ulid := ulid.MustNew(11, nil) + t3block3Ulid := ulid.MustNew(12, nil) + t3block4Ulid := ulid.MustNew(13, nil) + t3block5Ulid := ulid.MustNew(14, nil) + t3block6Ulid := ulid.MustNew(15, nil) + t3block7Ulid := ulid.MustNew(16, nil) + t3block8Ulid := ulid.MustNew(17, nil) + t4block1Ulid := ulid.MustNew(18, nil) + t5block1Ulid := ulid.MustNew(19, nil) + + blocks := + map[ulid.ULID]*metadata.Meta{ + t0block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t0block1Ulid, MinTime: 1 * time.Hour.Milliseconds(), MaxTime: 3 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t0block2Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t0block2Ulid, MinTime: 1 * time.Hour.Milliseconds(), MaxTime: 3 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t0block3Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t0block3Ulid, MinTime: 1 * time.Hour.Milliseconds(), MaxTime: 3 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t1block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t1block1Ulid, MinTime: 3 * time.Hour.Milliseconds(), MaxTime: 5 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t1block2Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t1block2Ulid, MinTime: 3 * time.Hour.Milliseconds(), MaxTime: 5 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block1Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block2Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block2Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block3Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block3Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block4Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block4Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block1Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block2Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block2Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block3Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block3Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block4Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block4Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block5Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block5Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block6Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block6Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block7Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block7Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block8Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block8Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t4block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t4block1Ulid, MinTime: 9 * time.Hour.Milliseconds(), MaxTime: 15 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t5block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t5block1Ulid, MinTime: 15 * time.Hour.Milliseconds(), MaxTime: 21 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + } + + testCompactorID := "test-compactor" + + tests := map[string]struct { + ranges []time.Duration + rangeStart int64 + rangeEnd int64 + indexSize int64 + indexLimit int64 + seriesCount int64 + seriesLimit int64 + blocks map[*metadata.Meta]int + expected struct { + partitionCount int + partitions map[int][]ulid.ULID + } + }{ + "test blocks generated by partition": { + ranges: []time.Duration{2 * time.Hour, 6 * time.Hour}, + rangeStart: 1 * time.Hour.Milliseconds(), + rangeEnd: 9 * time.Hour.Milliseconds(), + indexSize: int64(14), + indexLimit: int64(64), + blocks: map[*metadata.Meta]int{ + blocks[t1block1Ulid]: 0, blocks[t1block2Ulid]: 1, + blocks[t2block1Ulid]: 0, blocks[t2block2Ulid]: 1, blocks[t2block3Ulid]: 2, blocks[t2block4Ulid]: 3, + blocks[t3block1Ulid]: 0, blocks[t3block2Ulid]: 1, blocks[t3block3Ulid]: 2, blocks[t3block4Ulid]: 3, + blocks[t3block5Ulid]: 4, blocks[t3block6Ulid]: 5, blocks[t3block7Ulid]: 6, blocks[t3block8Ulid]: 7}, + expected: struct { + partitionCount int + partitions map[int][]ulid.ULID + }{ + partitionCount: 4, + partitions: map[int][]ulid.ULID{ + 0: {t1block1Ulid, t2block1Ulid, t3block1Ulid, t3block5Ulid}, + 1: {t1block2Ulid, t2block2Ulid, t3block2Ulid, t3block6Ulid}, + 2: {t1block1Ulid, t2block3Ulid, t3block3Ulid, t3block7Ulid}, + 3: {t1block2Ulid, t2block4Ulid, t3block4Ulid, t3block8Ulid}, + }, + }, + }, + "test all level 1 blocks": { + ranges: []time.Duration{2 * time.Hour, 6 * time.Hour}, + rangeStart: 1 * time.Hour.Milliseconds(), + rangeEnd: 9 * time.Hour.Milliseconds(), + indexSize: int64(30), + indexLimit: int64(64), + blocks: map[*metadata.Meta]int{ + blocks[t0block1Ulid]: 0, blocks[t0block2Ulid]: 0, blocks[t0block3Ulid]: 0, + }, + expected: struct { + partitionCount int + partitions map[int][]ulid.ULID + }{ + partitionCount: 2, + partitions: map[int][]ulid.ULID{ + 0: {t0block1Ulid, t0block2Ulid, t0block3Ulid}, + 1: {t0block1Ulid, t0block2Ulid, t0block3Ulid}, + }, + }, + }, + "test high level blocks generated without partitioning": { + ranges: []time.Duration{2 * time.Hour, 6 * time.Hour}, + rangeStart: 1 * time.Hour.Milliseconds(), + rangeEnd: 9 * time.Hour.Milliseconds(), + indexSize: int64(50), + indexLimit: int64(64), + blocks: map[*metadata.Meta]int{ + blocks[t4block1Ulid]: 0, blocks[t5block1Ulid]: 0, + }, + expected: struct { + partitionCount int + partitions map[int][]ulid.ULID + }{ + partitionCount: 2, + partitions: map[int][]ulid.ULID{ + 0: {t4block1Ulid, t5block1Ulid}, + 1: {t4block1Ulid, t5block1Ulid}, + }, + }, + }, + "test blocks generated by partition with series limit": { + ranges: []time.Duration{2 * time.Hour, 6 * time.Hour}, + rangeStart: 1 * time.Hour.Milliseconds(), + rangeEnd: 9 * time.Hour.Milliseconds(), + seriesCount: int64(14), + seriesLimit: int64(64), + blocks: map[*metadata.Meta]int{ + blocks[t1block1Ulid]: 0, blocks[t1block2Ulid]: 1, + blocks[t2block1Ulid]: 0, blocks[t2block2Ulid]: 1, blocks[t2block3Ulid]: 2, blocks[t2block4Ulid]: 3, + blocks[t3block1Ulid]: 0, blocks[t3block2Ulid]: 1, blocks[t3block3Ulid]: 2, blocks[t3block4Ulid]: 3, + blocks[t3block5Ulid]: 4, blocks[t3block6Ulid]: 5, blocks[t3block7Ulid]: 6, blocks[t3block8Ulid]: 7}, + expected: struct { + partitionCount int + partitions map[int][]ulid.ULID + }{ + partitionCount: 4, + partitions: map[int][]ulid.ULID{ + 0: {t1block1Ulid, t2block1Ulid, t3block1Ulid, t3block5Ulid}, + 1: {t1block2Ulid, t2block2Ulid, t3block2Ulid, t3block6Ulid}, + 2: {t1block1Ulid, t2block3Ulid, t3block3Ulid, t3block7Ulid}, + 3: {t1block2Ulid, t2block4Ulid, t3block4Ulid, t3block8Ulid}, + }, + }, + }, + "test blocks generated by partition with both index and series limit set": { + ranges: []time.Duration{2 * time.Hour, 6 * time.Hour}, + rangeStart: 1 * time.Hour.Milliseconds(), + rangeEnd: 9 * time.Hour.Milliseconds(), + indexSize: int64(1), + indexLimit: int64(64), + seriesCount: int64(14), + seriesLimit: int64(64), + blocks: map[*metadata.Meta]int{ + blocks[t1block1Ulid]: 0, blocks[t1block2Ulid]: 1, + blocks[t2block1Ulid]: 0, blocks[t2block2Ulid]: 1, blocks[t2block3Ulid]: 2, blocks[t2block4Ulid]: 3, + blocks[t3block1Ulid]: 0, blocks[t3block2Ulid]: 1, blocks[t3block3Ulid]: 2, blocks[t3block4Ulid]: 3, + blocks[t3block5Ulid]: 4, blocks[t3block6Ulid]: 5, blocks[t3block7Ulid]: 6, blocks[t3block8Ulid]: 7}, + expected: struct { + partitionCount int + partitions map[int][]ulid.ULID + }{ + partitionCount: 4, + partitions: map[int][]ulid.ULID{ + 0: {t1block1Ulid, t2block1Ulid, t3block1Ulid, t3block5Ulid}, + 1: {t1block2Ulid, t2block2Ulid, t3block2Ulid, t3block6Ulid}, + 2: {t1block1Ulid, t2block3Ulid, t3block3Ulid, t3block7Ulid}, + 3: {t1block2Ulid, t2block4Ulid, t3block4Ulid, t3block8Ulid}, + }, + }, + }, + "test blocks generated by partition with partition number equals to 1": { + ranges: []time.Duration{2 * time.Hour, 6 * time.Hour}, + rangeStart: 1 * time.Hour.Milliseconds(), + rangeEnd: 9 * time.Hour.Milliseconds(), + blocks: map[*metadata.Meta]int{ + blocks[t1block1Ulid]: 0, blocks[t1block2Ulid]: 1, + blocks[t2block1Ulid]: 0, blocks[t2block2Ulid]: 1, blocks[t2block3Ulid]: 2, blocks[t2block4Ulid]: 3, + blocks[t3block1Ulid]: 0, blocks[t3block2Ulid]: 1, blocks[t3block3Ulid]: 2, blocks[t3block4Ulid]: 3, + blocks[t3block5Ulid]: 4, blocks[t3block6Ulid]: 5, blocks[t3block7Ulid]: 6, blocks[t3block8Ulid]: 7}, + expected: struct { + partitionCount int + partitions map[int][]ulid.ULID + }{ + partitionCount: 1, + partitions: map[int][]ulid.ULID{ + 0: {t1block1Ulid, t1block2Ulid, t2block1Ulid, t2block2Ulid, t2block3Ulid, t2block4Ulid, t3block1Ulid, + t3block2Ulid, t3block3Ulid, t3block4Ulid, t3block5Ulid, t3block6Ulid, t3block7Ulid, t3block8Ulid}, + }, + }, + }, + "test blocks generated by partition in random order": { + ranges: []time.Duration{2 * time.Hour, 6 * time.Hour}, + rangeStart: 1 * time.Hour.Milliseconds(), + rangeEnd: 9 * time.Hour.Milliseconds(), + indexSize: int64(14), + indexLimit: int64(64), + blocks: map[*metadata.Meta]int{ + blocks[t1block2Ulid]: 1, blocks[t1block1Ulid]: 0, + blocks[t2block4Ulid]: 3, blocks[t2block1Ulid]: 0, blocks[t2block3Ulid]: 2, blocks[t2block2Ulid]: 1, + blocks[t3block1Ulid]: 0, blocks[t3block6Ulid]: 5, blocks[t3block3Ulid]: 2, blocks[t3block5Ulid]: 4, + blocks[t3block2Ulid]: 1, blocks[t3block7Ulid]: 6, blocks[t3block4Ulid]: 3, blocks[t3block8Ulid]: 7}, + expected: struct { + partitionCount int + partitions map[int][]ulid.ULID + }{ + partitionCount: 4, + partitions: map[int][]ulid.ULID{ + 0: {t1block1Ulid, t2block1Ulid, t3block1Ulid, t3block5Ulid}, + 1: {t1block2Ulid, t2block2Ulid, t3block2Ulid, t3block6Ulid}, + 2: {t1block1Ulid, t2block3Ulid, t3block3Ulid, t3block7Ulid}, + 3: {t1block2Ulid, t2block4Ulid, t3block4Ulid, t3block8Ulid}, + }, + }, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + compactorCfg := &Config{ + BlockRanges: testData.ranges, + PartitionIndexSizeLimitInBytes: testData.indexLimit, + PartitionSeriesCountLimit: testData.seriesLimit, + } + + limits := &validation.Limits{} + overrides, err := validation.NewOverrides(*limits, nil) + require.NoError(t, err) + + ring := &RingMock{} + + bkt := &bucket.ClientMock{} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + g := NewShuffleShardingGrouper( + ctx, + nil, + objstore.WithNoopInstr(bkt), + false, // Do not accept malformed indexes + true, // Enable vertical compaction + nil, + nil, + nil, + nil, + nil, + metadata.NoneFunc, + *compactorCfg, + ring, + "test-addr", + testCompactorID, + overrides, + "", + 10, + 3, + 1, + 5*time.Minute, + nil, + nil, + nil, + nil, + ) + var testBlocks []*metadata.Meta + for block, partitionID := range testData.blocks { + block.Thanos.Files = []metadata.File{ + {RelPath: thanosblock.IndexFilename, SizeBytes: testData.indexSize}, + } + block.Stats.NumSeries = uint64(testData.seriesCount) + testBlocks = append(testBlocks, block) + partitionInfo := thanosblock.PartitionInfo{ + PartitionID: partitionID, + } + partitionInfoContent, _ := json.Marshal(partitionInfo) + bkt.MockGet(getBlockPartitionInfoFile(block.ULID), string(partitionInfoContent), nil) + } + testGroup := blocksGroup{ + rangeStart: testData.rangeStart, + rangeEnd: testData.rangeEnd, + blocks: testBlocks, + } + actual, err := g.partitionBlockGroup(testGroup, uint32(0)) + require.NoError(t, err) + require.Equal(t, testData.expected.partitionCount, actual.PartitionCount) + require.Len(t, actual.Partitions, len(testData.expected.partitions)) + for _, actualPartition := range actual.Partitions { + actualPartitionID := actualPartition.PartitionID + require.ElementsMatch(t, testData.expected.partitions[actualPartitionID], actualPartition.Blocks) + } + }) + } +} + +func TestPartitionStrategyChange_shouldUseOriginalPartitionedGroup(t *testing.T) { + t1block1Ulid := ulid.MustNew(4, nil) + t1block2Ulid := ulid.MustNew(5, nil) + t2block1Ulid := ulid.MustNew(6, nil) + t2block2Ulid := ulid.MustNew(7, nil) + t2block3Ulid := ulid.MustNew(8, nil) + t2block4Ulid := ulid.MustNew(9, nil) + t3block1Ulid := ulid.MustNew(10, nil) + t3block2Ulid := ulid.MustNew(11, nil) + t3block3Ulid := ulid.MustNew(12, nil) + t3block4Ulid := ulid.MustNew(13, nil) + t3block5Ulid := ulid.MustNew(14, nil) + t3block6Ulid := ulid.MustNew(15, nil) + t3block7Ulid := ulid.MustNew(16, nil) + t3block8Ulid := ulid.MustNew(17, nil) + + blocks := + map[ulid.ULID]*metadata.Meta{ + t1block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t1block1Ulid, MinTime: 3 * time.Hour.Milliseconds(), MaxTime: 5 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t1block2Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t1block2Ulid, MinTime: 3 * time.Hour.Milliseconds(), MaxTime: 5 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block1Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block2Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block2Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block3Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block3Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t2block4Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t2block4Ulid, MinTime: 5 * time.Hour.Milliseconds(), MaxTime: 7 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block1Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block1Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block2Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block2Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block3Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block3Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block4Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block4Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block5Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block5Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block6Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block6Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block7Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block7Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + t3block8Ulid: { + BlockMeta: tsdb.BlockMeta{ULID: t3block8Ulid, MinTime: 7 * time.Hour.Milliseconds(), MaxTime: 9 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"external": "1"}}, + }, + } + + partitionedGroupID := uint32(12345) + indexSize := int64(10) + seriesCount := int64(10) + testRanges := []time.Duration{2 * time.Hour, 6 * time.Hour} + testRangeStart := 1 * time.Hour.Milliseconds() + testRangeEnd := 9 * time.Hour.Milliseconds() + testBlocks := map[*metadata.Meta]int{ + blocks[t1block1Ulid]: 0, blocks[t1block2Ulid]: 1, + blocks[t2block1Ulid]: 0, blocks[t2block2Ulid]: 1, blocks[t2block3Ulid]: 2, blocks[t2block4Ulid]: 3, + blocks[t3block1Ulid]: 0, blocks[t3block2Ulid]: 1, blocks[t3block3Ulid]: 2, blocks[t3block4Ulid]: 3, + blocks[t3block5Ulid]: 4, blocks[t3block6Ulid]: 5, blocks[t3block7Ulid]: 6, blocks[t3block8Ulid]: 7, + } + bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + var updatedTestBlocks []*metadata.Meta + for block, partitionID := range testBlocks { + block.Thanos.Files = []metadata.File{ + {RelPath: thanosblock.IndexFilename, SizeBytes: indexSize}, + } + block.Stats.NumSeries = uint64(seriesCount) + updatedTestBlocks = append(updatedTestBlocks, block) + partitionInfo := thanosblock.PartitionInfo{ + PartitionID: partitionID, + } + partitionInfoContent, _ := json.Marshal(partitionInfo) + bkt.Upload(context.Background(), getBlockPartitionInfoFile(block.ULID), bytes.NewReader(partitionInfoContent)) + } + testGroup := blocksGroup{ + rangeStart: testRangeStart, + rangeEnd: testRangeEnd, + blocks: updatedTestBlocks, + } + createGrouper := func(bkt objstore.Bucket, compactorCfg *Config) *ShuffleShardingGrouper { + limits := &validation.Limits{} + overrides, err := validation.NewOverrides(*limits, nil) + require.NoError(t, err) + + ring := &RingMock{} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return NewShuffleShardingGrouper( + ctx, + nil, + objstore.WithNoopInstr(bkt), + false, // Do not accept malformed indexes + true, // Enable vertical compaction + nil, + nil, + nil, + nil, + nil, + metadata.NoneFunc, + *compactorCfg, + ring, + "test-addr", + "test-compactor", + overrides, + "", + 10, + 3, + 1, + 5*time.Minute, + nil, + nil, + nil, + nil, + ) + } + + expectedPartitions := map[int][]ulid.ULID{ + 0: {t1block1Ulid, t2block1Ulid, t3block1Ulid, t3block5Ulid}, + 1: {t1block2Ulid, t2block2Ulid, t3block2Ulid, t3block6Ulid}, + 2: {t1block1Ulid, t2block3Ulid, t3block3Ulid, t3block7Ulid}, + 3: {t1block2Ulid, t2block4Ulid, t3block4Ulid, t3block8Ulid}, + } + + // test base case + compactorCfg1 := &Config{ + BlockRanges: testRanges, + PartitionIndexSizeLimitInBytes: int64(40), + } + grouper1 := createGrouper(bkt, compactorCfg1) + partitionedGroup1, err := grouper1.generatePartitionBlockGroup(testGroup, partitionedGroupID) + require.NoError(t, err) + require.Equal(t, 4, partitionedGroup1.PartitionCount) + require.Len(t, partitionedGroup1.Partitions, 4) + partitionMap := make(map[int][]ulid.ULID) + for _, partition := range partitionedGroup1.Partitions { + partitionID := partition.PartitionID + require.ElementsMatch(t, expectedPartitions[partitionID], partition.Blocks) + partitionMap[partitionID] = partition.Blocks + } + + // test limit increased + compactorCfg2 := &Config{ + BlockRanges: testRanges, + PartitionIndexSizeLimitInBytes: int64(80), + } + grouper2 := createGrouper(bkt, compactorCfg2) + partitionedGroup2, err := grouper2.generatePartitionBlockGroup(testGroup, partitionedGroupID) + require.NoError(t, err) + require.Equal(t, partitionedGroup1.PartitionCount, partitionedGroup2.PartitionCount) + require.Len(t, partitionedGroup2.Partitions, len(partitionedGroup1.Partitions)) + for _, partition := range partitionedGroup2.Partitions { + partitionID := partition.PartitionID + require.ElementsMatch(t, partitionMap[partitionID], partition.Blocks) + } + + // test limit decreased + compactorCfg3 := &Config{ + BlockRanges: testRanges, + PartitionIndexSizeLimitInBytes: int64(20), + } + grouper3 := createGrouper(bkt, compactorCfg3) + partitionedGroup3, err := grouper3.generatePartitionBlockGroup(testGroup, partitionedGroupID) + require.NoError(t, err) + require.Equal(t, partitionedGroup1.PartitionCount, partitionedGroup3.PartitionCount) + require.Len(t, partitionedGroup3.Partitions, len(partitionedGroup1.Partitions)) + for _, partition := range partitionedGroup3.Partitions { + partitionID := partition.PartitionID + require.ElementsMatch(t, partitionMap[partitionID], partition.Blocks) + } +} + type RingMock struct { mock.Mock } diff --git a/pkg/compactor/shuffle_sharding_planner.go b/pkg/compactor/shuffle_sharding_planner.go index edb579b8289..6d3305efe45 100644 --- a/pkg/compactor/shuffle_sharding_planner.go +++ b/pkg/compactor/shuffle_sharding_planner.go @@ -3,10 +3,12 @@ package compactor import ( "context" "fmt" + "github.com/go-kit/log/level" "time" "github.com/go-kit/log" "github.com/oklog/ulid" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -52,6 +54,10 @@ func NewShuffleShardingPlanner( } func (p *ShuffleShardingPlanner) Plan(_ context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + return nil, errors.New("not support without partitioning") +} + +func (p *ShuffleShardingPlanner) PlanWithPartition(_ context.Context, metasByMinTime []*metadata.Meta, partitionID int, errChan chan error) ([]*metadata.Meta, error) { // Ensure all blocks fits within the largest range. This is a double check // to ensure there's no bug in the previous blocks grouping, given this Plan() // is just a pass-through. @@ -62,6 +68,7 @@ func (p *ShuffleShardingPlanner) Plan(_ context.Context, metasByMinTime []*metad noCompactMarked := p.noCompBlocksFunc() resultMetas := make([]*metadata.Meta, 0, len(metasByMinTime)) + var partitionGroupID uint32 for _, b := range metasByMinTime { blockID := b.ULID.String() if _, excluded := noCompactMarked[b.ULID]; excluded { @@ -72,24 +79,29 @@ func (p *ShuffleShardingPlanner) Plan(_ context.Context, metasByMinTime []*metad return nil, fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", blockID, b.MinTime, b.MaxTime, rangeStart, rangeEnd) } - blockVisitMarker, err := ReadBlockVisitMarker(p.ctx, p.bkt, p.logger, blockID, p.blockVisitMarkerReadFailed) + blockVisitMarker, err := ReadBlockVisitMarker(p.ctx, p.bkt, p.logger, blockID, partitionID, p.blockVisitMarkerReadFailed) if err != nil { // shuffle_sharding_grouper should put visit marker file for blocks ready for // compaction. So error should be returned if visit marker file does not exist. - return nil, fmt.Errorf("unable to get visit marker file for block %s: %s", blockID, err.Error()) + return nil, fmt.Errorf("unable to get visit marker file for block %s with partition ID %d: %s", blockID, partitionID, err.Error()) + } + if blockVisitMarker.isCompleted() { + return nil, fmt.Errorf("block %s with partition ID %d is in completed status", blockID, partitionID) } - if !blockVisitMarker.isVisitedByCompactor(p.blockVisitMarkerTimeout, p.ringLifecyclerID) { - return nil, fmt.Errorf("block %s is not visited by current compactor %s", blockID, p.ringLifecyclerID) + if !blockVisitMarker.isVisitedByCompactor(p.blockVisitMarkerTimeout, partitionID, p.ringLifecyclerID) { + return nil, fmt.Errorf("block %s with partition ID %d is not visited by current compactor %s", blockID, partitionID, p.ringLifecyclerID) } + partitionGroupID = blockVisitMarker.PartitionedGroupID resultMetas = append(resultMetas, b) } if len(resultMetas) < 2 { + level.Info(p.logger).Log("msg", "result meta size is less than 2", "partitioned_group_id", partitionGroupID, "partition_id", partitionID, "size", len(resultMetas)) return nil, nil } - go markBlocksVisitedHeartBeat(p.ctx, p.bkt, p.logger, resultMetas, p.ringLifecyclerID, p.blockVisitMarkerFileUpdateInterval, p.blockVisitMarkerWriteFailed) + go markBlocksVisitedHeartBeat(p.ctx, p.bkt, p.logger, resultMetas, partitionGroupID, partitionID, p.ringLifecyclerID, p.blockVisitMarkerFileUpdateInterval, p.blockVisitMarkerWriteFailed, errChan) return resultMetas, nil } diff --git a/pkg/compactor/shuffle_sharding_planner_test.go b/pkg/compactor/shuffle_sharding_planner_test.go index e87ece0c2b7..5ca527dbc23 100644 --- a/pkg/compactor/shuffle_sharding_planner_test.go +++ b/pkg/compactor/shuffle_sharding_planner_test.go @@ -4,14 +4,12 @@ import ( "context" "encoding/json" "fmt" - "path" "testing" "time" "github.com/go-kit/log" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -28,6 +26,7 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id ulid.ULID isExpired bool compactorID string + status VisitStatus } currentCompactor := "test-compactor" @@ -37,13 +36,17 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { block2ulid := ulid.MustNew(2, nil) block3ulid := ulid.MustNew(3, nil) + partitionID0 := 0 + tests := map[string]struct { - ranges []int64 - noCompactBlocks map[ulid.ULID]*metadata.NoCompactMark - blocks []*metadata.Meta - expected []*metadata.Meta - expectedErr error - visitedBlocks []VisitedBlock + ranges []int64 + noCompactBlocks map[ulid.ULID]*metadata.NoCompactMark + blocks []*metadata.Meta + expected []*metadata.Meta + expectedErr error + visitedBlocks []VisitedBlock + partitionGroupID uint32 + partitionID int }{ "test basic plan": { ranges: []int64{2 * time.Hour.Milliseconds()}, @@ -68,13 +71,17 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, { id: block2ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, }, + partitionGroupID: 12345, + partitionID: partitionID0, expected: []*metadata.Meta{ { BlockMeta: tsdb.BlockMeta{ @@ -115,14 +122,18 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, { id: block2ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, }, - expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block2ulid.String(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds()), + partitionGroupID: 12345, + partitionID: partitionID0, + expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block2ulid.String(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds()), }, "test blocks outside largest range 1": { ranges: []int64{2 * time.Hour.Milliseconds()}, @@ -147,14 +158,18 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, { id: block2ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, }, - expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block1ulid.String(), 0*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds()), + partitionGroupID: 12345, + partitionID: partitionID0, + expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block1ulid.String(), 0*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds()), }, "test blocks outside largest range 2": { ranges: []int64{2 * time.Hour.Milliseconds()}, @@ -179,14 +194,18 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, { id: block2ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, }, - expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block2ulid.String(), 0*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds()), + partitionGroupID: 12345, + partitionID: partitionID0, + expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block2ulid.String(), 0*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds()), }, "test should skip blocks marked for no compact": { ranges: []int64{2 * time.Hour.Milliseconds()}, @@ -219,18 +238,23 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, { id: block2ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, { id: block3ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, }, + partitionGroupID: 12345, + partitionID: partitionID0, expected: []*metadata.Meta{ { BlockMeta: tsdb.BlockMeta{ @@ -272,14 +296,18 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, { id: block2ulid, isExpired: false, compactorID: currentCompactor, + status: Pending, }, }, - expected: []*metadata.Meta{}, + partitionGroupID: 12345, + partitionID: partitionID0, + expected: []*metadata.Meta{}, }, "test should not compact if visit marker file is not expired and visited by other compactor": { ranges: []int64{2 * time.Hour.Milliseconds()}, @@ -304,9 +332,12 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: false, compactorID: otherCompactor, + status: Pending, }, }, - expectedErr: fmt.Errorf("block %s is not visited by current compactor %s", block1ulid.String(), currentCompactor), + partitionGroupID: 12345, + partitionID: partitionID0, + expectedErr: fmt.Errorf("block %s with partition ID %d is not visited by current compactor %s", block1ulid.String(), partitionID0, currentCompactor), }, "test should not compact if visit marker file is expired": { ranges: []int64{2 * time.Hour.Milliseconds()}, @@ -331,9 +362,42 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { id: block1ulid, isExpired: true, compactorID: currentCompactor, + status: Pending, + }, + }, + partitionGroupID: 12345, + partitionID: partitionID0, + expectedErr: fmt.Errorf("block %s with partition ID %d is not visited by current compactor %s", block1ulid.String(), partitionID0, currentCompactor), + }, + "test should not compact if visit marker file has completed status": { + ranges: []int64{2 * time.Hour.Milliseconds()}, + blocks: []*metadata.Meta{ + { + BlockMeta: tsdb.BlockMeta{ + ULID: block1ulid, + MinTime: 1 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + { + BlockMeta: tsdb.BlockMeta{ + ULID: block2ulid, + MinTime: 1 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + }, + visitedBlocks: []VisitedBlock{ + { + id: block1ulid, + isExpired: false, + compactorID: currentCompactor, + status: Completed, }, }, - expectedErr: fmt.Errorf("block %s is not visited by current compactor %s", block1ulid.String(), currentCompactor), + partitionGroupID: 12345, + partitionID: partitionID0, + expectedErr: fmt.Errorf("block %s with partition ID %d is in completed status", block1ulid.String(), partitionID0), }, } @@ -342,30 +406,26 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { t.Run(testName, func(t *testing.T) { bkt := &bucket.ClientMock{} for _, visitedBlock := range testData.visitedBlocks { - visitMarkerFile := path.Join(visitedBlock.id.String(), BlockVisitMarkerFile) + visitMarkerFile := getBlockVisitMarkerFile(visitedBlock.id.String(), testData.partitionID) expireTime := time.Now() if visitedBlock.isExpired { expireTime = expireTime.Add(-1 * blockVisitMarkerTimeout) } blockVisitMarker := BlockVisitMarker{ - CompactorID: visitedBlock.compactorID, - VisitTime: expireTime.Unix(), - Version: VisitMarkerVersion1, + CompactorID: visitedBlock.compactorID, + VisitTime: expireTime.Unix(), + Version: VisitMarkerVersion1, + Status: visitedBlock.status, + PartitionedGroupID: testData.partitionGroupID, + PartitionID: testData.partitionID, } visitMarkerFileContent, _ := json.Marshal(blockVisitMarker) bkt.MockGet(visitMarkerFile, string(visitMarkerFileContent), nil) } bkt.MockUpload(mock.Anything, nil) - registerer := prometheus.NewPedanticRegistry() - blockVisitMarkerReadFailed := promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_visit_marker_read_failed", - Help: "Number of block visit marker file failed to be read.", - }) - blockVisitMarkerWriteFailed := promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_visit_marker_write_failed", - Help: "Number of block visit marker file failed to be written.", - }) + blockVisitMarkerReadFailed := prometheus.NewCounter(prometheus.CounterOpts{}) + blockVisitMarkerWriteFailed := prometheus.NewCounter(prometheus.CounterOpts{}) logs := &concurrency.SyncBuffer{} logger := log.NewLogfmtLogger(logs) @@ -383,7 +443,7 @@ func TestShuffleShardingPlanner_Plan(t *testing.T) { blockVisitMarkerReadFailed, blockVisitMarkerWriteFailed, ) - actual, err := p.Plan(context.Background(), testData.blocks) + actual, err := p.PlanWithPartition(context.Background(), testData.blocks, testData.partitionID, make(chan error)) if testData.expectedErr != nil { assert.Equal(t, err, testData.expectedErr) diff --git a/pkg/storage/tsdb/bucketindex/index.go b/pkg/storage/tsdb/bucketindex/index.go index 5c5f6cb5d4b..6d93fa35e59 100644 --- a/pkg/storage/tsdb/bucketindex/index.go +++ b/pkg/storage/tsdb/bucketindex/index.go @@ -226,6 +226,14 @@ func (s BlockDeletionMarks) GetULIDs() []ulid.ULID { return ids } +func (s BlockDeletionMarks) GetULIDSet() map[ulid.ULID]struct{} { + res := make(map[ulid.ULID]struct{}) + for _, m := range s { + res[m.ID] = struct{}{} + } + return res +} + func (s BlockDeletionMarks) Clone() BlockDeletionMarks { clone := make(BlockDeletionMarks, len(s)) for i, m := range s { diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 2f175d3e7ec..059687b6c1e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -331,10 +331,23 @@ func (c *genericMergeSeriesSet) Next() bool { for { // Firstly advance all the current series sets. If any of them have run out, // we can drop them, otherwise they should be inserted back into the heap. - for _, set := range c.currentSets { - if set.Next() { - heap.Push(&c.heap, set) - } + ch := make(chan genericSeriesSet, len(c.currentSets)) + wg := sync.WaitGroup{} + for i := range c.currentSets { + wg.Add(1) + i := i + go func() { + defer wg.Done() + if c.currentSets[i].Next() { + ch <- c.currentSets[i] + } + }() + } + + wg.Wait() + close(ch) + for set := range ch { + heap.Push(&c.heap, set) } if len(c.heap) == 0 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 1d7e93f7f06..d8da21130a6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -17,10 +17,12 @@ import ( "context" "crypto/rand" "fmt" + "golang.org/x/sync/errgroup" "io" "os" "path/filepath" "sort" + "sync" "time" "github.com/go-kit/log" @@ -392,6 +394,10 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { + return c.CompactWithPartition(dest, dirs, open, 1, 0) +} + +func (c *LeveledCompactor) CompactWithPartition(dest string, dirs []string, open []*Block, partitionCount int, partitionId int) (uid ulid.ULID, err error) { var ( blocks []BlockReader bs []*Block @@ -435,7 +441,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u uid = ulid.MustNew(ulid.Now(), rand.Reader) meta := CompactBlockMetas(uid, metas...) - err = c.write(dest, meta, blocks...) + err = c.write(dest, meta, partitionId, partitionCount, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { for _, b := range bs { @@ -501,7 +507,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p } } - err := c.write(dest, meta, b) + err := c.write(dest, meta, 0, 1, b) if err != nil { return uid, err } @@ -546,7 +552,7 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error { } // write creates a new block that is the union of the provided blocks into dir. -func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) { +func (c *LeveledCompactor) write(dest string, meta *BlockMeta, partitionId int, partitionCount int, blocks ...BlockReader) (err error) { dir := filepath.Join(dest, meta.ULID.String()) tmp := dir + tmpForCreationBlockDirSuffix var closers []io.Closer @@ -594,7 +600,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe } closers = append(closers, indexw) - if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil { + if err := c.populateBlock(blocks, meta, indexw, chunkw, partitionId, partitionCount); err != nil { return errors.Wrap(err, "populate block") } @@ -662,13 +668,14 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe // populateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { +func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, partitionId int, partitionCount int) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } var ( sets []storage.ChunkSeriesSet + setsMtx sync.Mutex symbols index.StringIter closers []io.Closer overlapping bool @@ -684,6 +691,8 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, c.metrics.populatingBlocks.Set(1) globalMaxt := blocks[0].Meta().MaxTime + g, _:= errgroup.WithContext(c.ctx) + g.SetLimit(8) for i, b := range blocks { select { case <-c.ctx.Done(): @@ -726,8 +735,16 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, return err } all = indexr.SortedPostings(all) - // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) + g.Go(func() error { + shardStart := time.Now() + shardedPosting := index.NewShardedPosting(all, uint64(partitionCount), uint64(partitionId), indexr.Series) + fmt.Printf("finished sharding, duration: %v\n", time.Since(shardStart)) + // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. + setsMtx.Lock() + sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, shardedPosting, meta.MinTime, meta.MaxTime-1, false)) + setsMtx.Unlock() + return nil + }) syms := indexr.Symbols() if i == 0 { symbols = syms @@ -735,6 +752,9 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } symbols = NewMergedStringIter(symbols, syms) } + if err := g.Wait(); err != nil { + return err + } for symbols.Next() { if err := indexw.AddSymbol(symbols.At()); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 8df2bccf67e..e7d2115e9b9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -16,6 +16,7 @@ package index import ( "container/heap" "encoding/binary" + "fmt" "runtime" "sort" "sync" @@ -24,6 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunks" ) var allPostingsKey = labels.Label{} @@ -420,6 +422,55 @@ func (e errPostings) Seek(storage.SeriesRef) bool { return false } func (e errPostings) At() storage.SeriesRef { return 0 } func (e errPostings) Err() error { return e.err } +type shardedPostings struct { + //postings Postings + //labelsFn func(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error + series []storage.SeriesRef + cur storage.SeriesRef + initialized bool + + //bufChks []chunks.Meta + //bufLbls labels.Labels +} + +func NewShardedPosting(postings Postings, partitionCount uint64, partitionId uint64, labelsFn func(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error) *shardedPostings { + bufChks := make([]chunks.Meta, 0) + bufLbls := make(labels.Labels, 0) + series := make([]storage.SeriesRef, 0) + for postings.Next() { + err := labelsFn(postings.At(), &bufLbls, &bufChks) + if err != nil { + fmt.Printf("err: %v", err) + } + if bufLbls.Hash() % partitionCount == partitionId { + posting := postings.At() + series = append(series, posting) + } + } + return &shardedPostings{series: series, initialized: false} +} + +func (p *shardedPostings) Next() bool { + if len(p.series) > 0 { + p.cur, p.series = p.series[0], p.series[1:] + return true + } + return false +} + +func (p *shardedPostings) At() storage.SeriesRef { + return p.cur +} + +func (p *shardedPostings) Seek(v storage.SeriesRef) bool { + fmt.Println("ran seek") + return false +} + +func (p *shardedPostings) Err() error { + return nil +} + var emptyPostings = errPostings{} // EmptyPostings returns a postings list that's always empty. diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 7886e5d8693..6ccb7518e86 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -37,6 +37,8 @@ const ( IndexHeaderFilename = "index-header" // ChunksDirname is the known dir name for chunks with compressed samples. ChunksDirname = "chunks" + // PartitionInfoFilename is JSON filename for partition information. + PartitionInfoFilename = "partition-info.json" // DebugMetas is a directory for debug meta files that happen in the past. Useful for debugging. DebugMetas = "debug/metas" @@ -153,6 +155,14 @@ func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return cleanUp(logger, bkt, id, errors.Wrap(err, "upload index")) } + // level 1 blocks should not have partition info file + if meta.Compaction.Level > 1 { + if err := objstore.UploadFile(ctx, logger, bkt, filepath.Join(bdir, PartitionInfoFilename), path.Join(id.String(), PartitionInfoFilename)); err != nil { + // Don't call cleanUp here. Partition info file acts in a similar way as meta file. + return errors.Wrap(err, "upload partition info") + } + } + // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file to be pending uploads. if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), strings.NewReader(metaEncoded.String())); err != nil { // Don't call cleanUp here. Despite getting error, meta.json may have been uploaded in certain cases, diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 3a228f994f2..1a9e213847d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -576,6 +576,10 @@ func (f *LabelShardedMetaFilter) Filter(_ context.Context, metas map[ulid.ULID]* var _ MetadataFilter = &DeduplicateFilter{} +type IDeduplicateFilter interface { + DuplicateIDs() []ulid.ULID +} + // DeduplicateFilter is a BaseFetcher filter that filters out older blocks that have exactly the same data. // Not go-routine safe. type DeduplicateFilter struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go b/vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go new file mode 100644 index 00000000000..24602cdbd9c --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go @@ -0,0 +1,79 @@ +package block + +import ( + "encoding/json" + "io" + "os" + "path/filepath" + + "github.com/go-kit/log" + "github.com/prometheus/prometheus/tsdb/fileutil" + + "github.com/thanos-io/thanos/pkg/runutil" +) + +type PartitionInfo struct { + PartitionedGroupID uint32 `json:"partitionedGroupID"` + PartitionCount int `json:"partitionCount"` + PartitionID int `json:"partitionID"` +} + +// WriteToDir writes the encoded partition info into /partition-info.json. +func (p PartitionInfo) WriteToDir(logger log.Logger, dir string) error { + // Make any changes to the file appear atomic. + path := filepath.Join(dir, PartitionInfoFilename) + tmp := path + ".tmp" + + f, err := os.Create(tmp) + if err != nil { + return err + } + + if err := p.Write(f); err != nil { + runutil.CloseWithLogOnErr(logger, f, "close partition info") + return err + } + if err := f.Close(); err != nil { + return err + } + return renameFile(logger, tmp, path) +} + +// Write writes the given encoded partition info to writer. +func (p PartitionInfo) Write(w io.Writer) error { + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + return enc.Encode(&p) +} + +func renameFile(logger log.Logger, from, to string) error { + if err := os.RemoveAll(to); err != nil { + return err + } + if err := os.Rename(from, to); err != nil { + return err + } + + // Directory was renamed; sync parent dir to persist rename. + pdir, err := fileutil.OpenDir(filepath.Dir(to)) + if err != nil { + return err + } + + if err = fileutil.Fdatasync(pdir); err != nil { + runutil.CloseWithLogOnErr(logger, pdir, "close dir") + return err + } + return pdir.Close() +} + +// Read the block partition info from the given reader. +func ReadPartitionInfo(rc io.ReadCloser) (_ *PartitionInfo, err error) { + defer runutil.ExhaustCloseWithErrCapture(&err, rc, "close partition info JSON") + + var p PartitionInfo + if err = json.NewDecoder(rc).Decode(&p); err != nil { + return nil, err + } + return &p, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index c6c36bf3cc1..4362b1533fb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -58,7 +58,7 @@ type Syncer struct { blocks map[ulid.ULID]*metadata.Meta partial map[ulid.ULID]error metrics *syncerMetrics - duplicateBlocksFilter *block.DeduplicateFilter + duplicateBlocksFilter block.IDeduplicateFilter ignoreDeletionMarkFilter *block.IgnoreDeletionMarkFilter } @@ -95,7 +95,7 @@ func newSyncerMetrics(reg prometheus.Registerer, blocksMarkedForDeletion, garbag // NewMetaSyncer returns a new Syncer for the given Bucket and directory. // Blocks must be at least as old as the sync delay for being considered. -func NewMetaSyncer(logger log.Logger, reg prometheus.Registerer, bkt objstore.Bucket, fetcher block.MetadataFetcher, duplicateBlocksFilter *block.DeduplicateFilter, ignoreDeletionMarkFilter *block.IgnoreDeletionMarkFilter, blocksMarkedForDeletion, garbageCollectedBlocks prometheus.Counter) (*Syncer, error) { +func NewMetaSyncer(logger log.Logger, reg prometheus.Registerer, bkt objstore.Bucket, fetcher block.MetadataFetcher, duplicateBlocksFilter block.IDeduplicateFilter, ignoreDeletionMarkFilter *block.IgnoreDeletionMarkFilter, blocksMarkedForDeletion, garbageCollectedBlocks prometheus.Counter) (*Syncer, error) { if logger == nil { logger = log.NewNopLogger() } @@ -177,7 +177,7 @@ func (s *Syncer) GarbageCollect(ctx context.Context) error { if _, exists := deletionMarkMap[id]; exists { continue } - garbageIDs = append(garbageIDs, id) + //garbageIDs = append(garbageIDs, id) } for _, id := range garbageIDs { @@ -350,6 +350,9 @@ type Group struct { hashFunc metadata.HashFunc blockFilesConcurrency int compactBlocksFetchConcurrency int + partitionedGroupID uint32 + partitionCount int + partitionID int } // NewGroup returns a new compaction group. @@ -490,6 +493,24 @@ func (cg *Group) Resolution() int64 { return cg.resolution } +func (cg *Group) PartitionedGroupID() uint32 { + return cg.partitionedGroupID +} + +func (cg *Group) PartitionCount() int { + return cg.partitionCount +} + +func (cg *Group) PartitionID() int { + return cg.partitionID +} + +func (cg *Group) SetPartitionInfo(partitionedGroupID uint32, partitionCount int, partitionID int) { + cg.partitionedGroupID = partitionedGroupID + cg.partitionCount = partitionCount + cg.partitionID = partitionID +} + // CompactProgressMetrics contains Prometheus metrics related to compaction progress. type CompactProgressMetrics struct { NumberOfCompactionRuns *prometheus.GaugeVec @@ -728,6 +749,19 @@ type Planner interface { // Plan returns a list of blocks that should be compacted into single one. // The blocks can be overlapping. The provided metadata has to be ordered by minTime. Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) + + PlanWithPartition(ctx context.Context, metasByMinTime []*metadata.Meta, partitionID int, errChan chan error) ([]*metadata.Meta, error) +} + +type ExtraCompactionCompleteChecker interface { + IsComplete(group *Group, blockID ulid.ULID) bool +} + +type DefaultCompactionCompleteChecker struct { +} + +func (c DefaultCompactionCompleteChecker) IsComplete(_ *Group, _ ulid.ULID) bool { + return true } // Compactor provides compaction against an underlying storage of time series data. @@ -747,11 +781,13 @@ type Compactor interface { // * The source dirs are marked Deletable. // * Returns empty ulid.ULID{}. Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) + + CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionId int) (ulid.ULID, error) } // Compact plans and runs a single compaction against the group. The compacted result // is uploaded into the bucket the blocks were retrieved from. -func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor) (shouldRerun bool, compID ulid.ULID, rerr error) { +func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor, completeChecker ExtraCompactionCompleteChecker) (shouldRerun bool, compID ulid.ULID, rerr error) { cg.compactionRunsStarted.Inc() subDir := filepath.Join(dir, cg.Key()) @@ -771,10 +807,13 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp return false, ulid.ULID{}, errors.Wrap(err, "create compaction group dir") } + errChan := make(chan error, 1) err := tracing.DoInSpanWithErr(ctx, "compaction_group", func(ctx context.Context) (err error) { - shouldRerun, compID, err = cg.compact(ctx, subDir, planner, comp) + shouldRerun, compID, err = cg.compact(ctx, subDir, planner, comp, completeChecker, errChan) return err }, opentracing.Tags{"group.key": cg.Key()}) + errChan <- err + close(errChan) if err != nil { cg.compactionFailures.Inc() return false, ulid.ULID{}, err @@ -974,7 +1013,7 @@ func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, return nil } -func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor) (shouldRerun bool, compID ulid.ULID, _ error) { +func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor, completeChecker ExtraCompactionCompleteChecker, errChan chan error) (shouldRerun bool, compID ulid.ULID, _ error) { cg.mtx.Lock() defer cg.mtx.Unlock() @@ -992,8 +1031,13 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp var toCompact []*metadata.Meta if err := tracing.DoInSpanWithErr(ctx, "compaction_planning", func(ctx context.Context) (e error) { - toCompact, e = planner.Plan(ctx, cg.metasByMinTime) - return e + if cg.partitionCount > 0 { + toCompact, e = planner.PlanWithPartition(ctx, cg.metasByMinTime, cg.partitionID, errChan) + return e + } else { + toCompact, e = planner.Plan(ctx, cg.metasByMinTime) + return e + } }); err != nil { return false, ulid.ULID{}, errors.Wrap(err, "plan compaction") } @@ -1006,7 +1050,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp // Due to #183 we verify that none of the blocks in the plan have overlapping sources. // This is one potential source of how we could end up with duplicated chunks. - uniqueSources := map[ulid.ULID]struct{}{} + //uniqueSources := map[ulid.ULID]struct{}{} // Once we have a plan we need to download the actual data. groupCompactionBegin := time.Now() @@ -1017,12 +1061,12 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp toCompactDirs := make([]string, 0, len(toCompact)) for _, m := range toCompact { bdir := filepath.Join(dir, m.ULID.String()) - for _, s := range m.Compaction.Sources { - if _, ok := uniqueSources[s]; ok { - return false, ulid.ULID{}, halt(errors.Errorf("overlapping sources detected for plan %v", toCompact)) - } - uniqueSources[s] = struct{}{} - } + //for _, s := range m.Compaction.Sources { + // if _, ok := uniqueSources[s]; ok { + // return false, ulid.ULID{}, halt(errors.Errorf("overlapping sources detected for plan %v", toCompact)) + // } + // uniqueSources[s] = struct{}{} + //} func(ctx context.Context, meta *metadata.Meta) { g.Go(func() error { if err := tracing.DoInSpanWithErr(ctx, "compaction_block_download", func(ctx context.Context) error { @@ -1072,7 +1116,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp begin = time.Now() if err := tracing.DoInSpanWithErr(ctx, "compaction", func(ctx context.Context) (e error) { - compID, e = comp.Compact(dir, toCompactDirs, nil) + compID, e = comp.CompactWithPartition(dir, toCompactDirs, nil, cg.partitionCount, cg.partitionID) return e }); err != nil { return false, ulid.ULID{}, halt(errors.Wrapf(err, "compact blocks %v", toCompactDirs)) @@ -1082,7 +1126,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp level.Info(cg.logger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", sourceBlockStr) for _, meta := range toCompact { if meta.Stats.NumSamples == 0 { - if err := cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String())); err != nil { + if err := cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String()), completeChecker); err != nil { level.Warn(cg.logger).Log("msg", "failed to mark for deletion an empty block found during compaction", "block", meta.ULID) } } @@ -1110,6 +1154,15 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp return false, ulid.ULID{}, errors.Wrapf(err, "failed to finalize the block %s", bdir) } + partitionInfo := block.PartitionInfo{ + PartitionedGroupID: cg.partitionedGroupID, + PartitionCount: cg.partitionCount, + PartitionID: cg.partitionID, + } + if err := partitionInfo.WriteToDir(cg.logger, bdir); err != nil { + return false, ulid.ULID{}, errors.Wrapf(err, "failed to put partition info for the block %s", bdir) + } + if err = os.Remove(filepath.Join(bdir, "tombstones")); err != nil { return false, ulid.ULID{}, errors.Wrap(err, "remove tombstones") } @@ -1145,7 +1198,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp // Eventually the block we just uploaded should get synced into the group again (including sync-delay). for _, meta := range toCompact { err = tracing.DoInSpanWithErr(ctx, "compaction_block_delete", func(ctx context.Context) error { - return cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String())) + return cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String()), completeChecker) }, opentracing.Tags{"block.id": meta.ULID}) if err != nil { return false, ulid.ULID{}, retry(errors.Wrapf(err, "mark old block for deletion from bucket")) @@ -1158,17 +1211,19 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp return true, compID, nil } -func (cg *Group) deleteBlock(id ulid.ULID, bdir string) error { +func (cg *Group) deleteBlock(id ulid.ULID, bdir string, completeChecker ExtraCompactionCompleteChecker) error { if err := os.RemoveAll(bdir); err != nil { return errors.Wrapf(err, "remove old block dir %s", id) } - // Spawn a new context so we always mark a block for deletion in full on shutdown. - delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - level.Info(cg.logger).Log("msg", "marking compacted block for deletion", "old_block", id) - if err := block.MarkForDeletion(delCtx, cg.logger, cg.bkt, id, "source of compacted block", cg.blocksMarkedForDeletion); err != nil { - return errors.Wrapf(err, "mark block %s for deletion from bucket", id) + if completeChecker.IsComplete(cg, id) { + // Spawn a new context so we always mark a block for deletion in full on shutdown. + delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + level.Info(cg.logger).Log("msg", "marking compacted block for deletion", "old_block", id) + if err := block.MarkForDeletion(delCtx, cg.logger, cg.bkt, id, "source of compacted block", cg.blocksMarkedForDeletion); err != nil { + return errors.Wrapf(err, "mark block %s for deletion from bucket", id) + } } return nil } @@ -1180,6 +1235,7 @@ type BucketCompactor struct { grouper Grouper comp Compactor planner Planner + completeChecker ExtraCompactionCompleteChecker compactDir string bkt objstore.Bucket concurrency int @@ -1197,6 +1253,35 @@ func NewBucketCompactor( bkt objstore.Bucket, concurrency int, skipBlocksWithOutOfOrderChunks bool, +) (*BucketCompactor, error) { + if concurrency <= 0 { + return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency) + } + return NewBucketCompactorWithCompleteChecker( + logger, + sy, + grouper, + planner, + comp, + DefaultCompactionCompleteChecker{}, + compactDir, + bkt, + concurrency, + skipBlocksWithOutOfOrderChunks, + ) +} + +func NewBucketCompactorWithCompleteChecker( + logger log.Logger, + sy *Syncer, + grouper Grouper, + planner Planner, + comp Compactor, + completeChecker ExtraCompactionCompleteChecker, + compactDir string, + bkt objstore.Bucket, + concurrency int, + skipBlocksWithOutOfOrderChunks bool, ) (*BucketCompactor, error) { if concurrency <= 0 { return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency) @@ -1207,6 +1292,7 @@ func NewBucketCompactor( grouper: grouper, planner: planner, comp: comp, + completeChecker: completeChecker, compactDir: compactDir, bkt: bkt, concurrency: concurrency, @@ -1247,7 +1333,7 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { go func() { defer wg.Done() for g := range groupChan { - shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.planner, c.comp) + shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.planner, c.comp, c.completeChecker) if err == nil { if shouldRerunGroup { mtx.Lock() diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go index 5c2a93df8dc..18c4e62ccde 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -93,6 +93,10 @@ func (p *tsdbBasedPlanner) plan(noCompactMarked map[ulid.ULID]*metadata.NoCompac return nil, nil } +func (p *tsdbBasedPlanner) PlanWithPartition(ctx context.Context, metasByMinTime []*metadata.Meta, partitionID int, errChan chan error) ([]*metadata.Meta, error) { + return nil, errors.New("not support with partitioning") +} + // selectMetas returns the dir metas that should be compacted into a single new block. // If only a single block range is configured, the result is always nil. // Copied and adjusted from https://github.com/prometheus/prometheus/blob/3d8826a3d42566684283a9b7f7e812e412c24407/tsdb/compact.go#L229. @@ -303,3 +307,7 @@ PlanLoop: return plan, nil } } + +func (t *largeTotalIndexSizeFilter) PlanWithPartition(ctx context.Context, metasByMinTime []*metadata.Meta, partitionID int, errChan chan error) ([]*metadata.Meta, error) { + return nil, errors.New("not support with partitioning") +} From f2e60cc732731c84afea0e2ff6d5f7df142dc637 Mon Sep 17 00:00:00 2001 From: Alex Le Date: Tue, 6 Dec 2022 16:43:47 -0800 Subject: [PATCH 002/133] Fixed lint Signed-off-by: Alex Le --- pkg/compactor/blocks_cleaner.go | 21 +++++++++++-------- pkg/compactor/compactor.go | 2 +- pkg/compactor/compactor_test.go | 9 +++++--- ...tition_compaction_complete_checker_test.go | 8 ++++--- pkg/compactor/partitioned_group_info.go | 14 +++++++------ pkg/compactor/shuffle_sharding_grouper.go | 13 +++--------- .../shuffle_sharding_grouper_test.go | 3 ++- pkg/compactor/shuffle_sharding_planner.go | 2 +- .../prometheus/prometheus/tsdb/compact.go | 4 ++-- .../thanos-io/thanos/pkg/compact/compact.go | 2 +- 10 files changed, 41 insertions(+), 37 deletions(-) diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index 3e429b67707..4c89b1dd20e 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -462,7 +462,7 @@ func (c *BlocksCleaner) validatePartitionedResultBlock(ctx context.Context, user func (c *BlocksCleaner) cleanPartitionedGroupInfo(ctx context.Context, userBucket objstore.InstrumentedBucket, userLogger log.Logger, userID string, index *bucketindex.Index) { var deletePartitionedGroupInfo []string - userBucket.Iter(ctx, PartitionedGroupDirectory, func(file string) error { + err := userBucket.Iter(ctx, PartitionedGroupDirectory, func(file string) error { partitionedGroupInfo, err := ReadPartitionedGroupInfoFile(ctx, userBucket, userLogger, file, c.partitionedGroupInfoReadFailed) if err != nil { level.Warn(userLogger).Log("msg", "failed to read partitioned group info", "partitioned_group_info", file) @@ -471,17 +471,17 @@ func (c *BlocksCleaner) cleanPartitionedGroupInfo(ctx context.Context, userBucke resultBlocks := c.findResultBlocksForPartitionedGroup(ctx, userBucket, userLogger, index, partitionedGroupInfo) partitionedGroupID := partitionedGroupInfo.PartitionedGroupID for _, partition := range partitionedGroupInfo.Partitions { - if resultBlock, ok := resultBlocks[partition.PartitionID]; !ok { + if _, ok := resultBlocks[partition.PartitionID]; !ok { level.Info(userLogger).Log("msg", "unable to find result block for partition in partitioned group", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID) return nil - } else { - err := c.validatePartitionedResultBlock(ctx, userBucket, userLogger, userID, resultBlock, partition, partitionedGroupID) - if err != nil { - level.Warn(userLogger).Log("msg", "validate result block failed", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String(), "err", err) - return nil - } - level.Info(userLogger).Log("msg", "result block has expected parent blocks", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String()) } + resultBlock := resultBlocks[partition.PartitionID] + err := c.validatePartitionedResultBlock(ctx, userBucket, userLogger, userID, resultBlock, partition, partitionedGroupID) + if err != nil { + level.Warn(userLogger).Log("msg", "validate result block failed", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String(), "err", err) + return nil + } + level.Info(userLogger).Log("msg", "result block has expected parent blocks", "partitioned_group_id", partitionedGroupID, "partition_id", partition.PartitionID, "block", resultBlock.String()) } // since the partitioned group were all complete, we can make sure @@ -513,6 +513,9 @@ func (c *BlocksCleaner) cleanPartitionedGroupInfo(ctx context.Context, userBucke deletePartitionedGroupInfo = append(deletePartitionedGroupInfo, file) return nil }) + if err != nil { + level.Warn(userLogger).Log("msg", "error return when going through partitioned group directory", "err", err) + } for _, partitionedGroupInfoFile := range deletePartitionedGroupInfo { if err := userBucket.Delete(ctx, partitionedGroupInfoFile); err != nil { level.Warn(userLogger).Log("msg", "failed to delete partitioned group info", "partitioned_group_info", partitionedGroupInfoFile, "err", err) diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 5eb45e1aafb..9e956bd72b3 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -4,7 +4,6 @@ import ( "context" "flag" "fmt" - "github.com/oklog/ulid" "hash/fnv" "math/rand" "os" @@ -15,6 +14,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 048249cae1c..e440a4b623a 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1652,8 +1652,8 @@ func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Blo return args.Get(0).(ulid.ULID), args.Error(1) } -func (m *tsdbCompactorMock) CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionId int) (ulid.ULID, error) { - args := m.Called(dest, dirs, open, partitionCount, partitionId) +func (m *tsdbCompactorMock) CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionID int) (ulid.ULID, error) { + args := m.Called(dest, dirs, open, partitionCount, partitionID) return args.Get(0).(ulid.ULID), args.Error(1) } @@ -1726,9 +1726,12 @@ func mockBlockGroup(userID string, ids []string, bkt *bucket.ClientMock) *compac ) for _, id := range ids { meta := mockBlockMeta(id) - group.AppendMeta(&metadata.Meta{ + err := group.AppendMeta(&metadata.Meta{ BlockMeta: meta, }) + if err != nil { + continue + } } return group } diff --git a/pkg/compactor/partition_compaction_complete_checker_test.go b/pkg/compactor/partition_compaction_complete_checker_test.go index 1c65ccfba11..8d74c86ff43 100644 --- a/pkg/compactor/partition_compaction_complete_checker_test.go +++ b/pkg/compactor/partition_compaction_complete_checker_test.go @@ -3,15 +3,17 @@ package compactor import ( "context" "encoding/json" - "github.com/cortexproject/cortex/pkg/storage/bucket" + "testing" + "time" + "github.com/go-kit/log" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/compact" - "testing" - "time" + + "github.com/cortexproject/cortex/pkg/storage/bucket" ) func TestPartitionCompactionCompleteChecker(t *testing.T) { diff --git a/pkg/compactor/partitioned_group_info.go b/pkg/compactor/partitioned_group_info.go index 3b054ffa6f6..c4d3a9201a1 100644 --- a/pkg/compactor/partitioned_group_info.go +++ b/pkg/compactor/partitioned_group_info.go @@ -5,16 +5,18 @@ import ( "context" "encoding/json" "fmt" - "github.com/cortexproject/cortex/pkg/util/runutil" + "io" + "path" + "strings" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" - "io" - "path" - "strings" + + "github.com/cortexproject/cortex/pkg/util/runutil" ) const ( @@ -73,7 +75,7 @@ func (p *PartitionedGroupInfo) getAllBlocks() []ulid.ULID { } blocks := make([]ulid.ULID, len(uniqueBlocks)) i := 0 - for block, _ := range uniqueBlocks { + for block := range uniqueBlocks { blocks[i] = block i++ } @@ -124,7 +126,7 @@ func ReadPartitionedGroupInfoFile(ctx context.Context, bkt objstore.Instrumented } func UpdatePartitionedGroupInfo(ctx context.Context, bkt objstore.InstrumentedBucket, logger log.Logger, partitionedGroupInfo PartitionedGroupInfo, partitionedGroupInfoReadFailed prometheus.Counter, partitionedGroupInfoWriteFailed prometheus.Counter) (*PartitionedGroupInfo, error) { - existingPartitionedGroup, err := ReadPartitionedGroupInfo(ctx, bkt, logger, partitionedGroupInfo.PartitionedGroupID, partitionedGroupInfoReadFailed) + existingPartitionedGroup, _ := ReadPartitionedGroupInfo(ctx, bkt, logger, partitionedGroupInfo.PartitionedGroupID, partitionedGroupInfoReadFailed) if existingPartitionedGroup != nil { level.Warn(logger).Log("msg", "partitioned group info already exists", "partitioned_group_id", partitionedGroupInfo.PartitionedGroupID) return existingPartitionedGroup, nil diff --git a/pkg/compactor/shuffle_sharding_grouper.go b/pkg/compactor/shuffle_sharding_grouper.go index 636acf9eda7..9e2eb0cf6ab 100644 --- a/pkg/compactor/shuffle_sharding_grouper.go +++ b/pkg/compactor/shuffle_sharding_grouper.go @@ -3,7 +3,6 @@ package compactor import ( "context" "fmt" - thanosblock "github.com/thanos-io/thanos/pkg/block" "hash/fnv" "math" "path" @@ -19,6 +18,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/thanos-io/objstore" + thanosblock "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" @@ -549,11 +549,10 @@ func createBlocksGroup(blocks map[ulid.ULID]*metadata.Meta, blockIDs []ulid.ULID group.rangeStart = rangeStart group.rangeEnd = rangeEnd for _, blockID := range blockIDs { - if m, ok := blocks[blockID]; !ok { + if _, ok := blocks[blockID]; !ok { return nil, errors.New(fmt.Sprintf("block not found: %s", blockID)) - } else { - group.blocks = append(group.blocks, m) } + group.blocks = append(group.blocks, blocks[blockID]) } return &group, nil } @@ -604,12 +603,6 @@ func (g blocksGroup) rangeLength() int64 { return g.rangeEnd - g.rangeStart } -// minTime returns the MinTime across all blocks in the group. -func (g blocksGroup) minTime() int64 { - // Blocks are expected to be sorted by MinTime. - return g.blocks[0].MinTime -} - // maxTime returns the MaxTime across all blocks in the group. func (g blocksGroup) maxTime() int64 { max := g.blocks[0].MaxTime diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go index 6a69b88f1cf..b3716248f72 100644 --- a/pkg/compactor/shuffle_sharding_grouper_test.go +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -1387,7 +1387,8 @@ func TestPartitionStrategyChange_shouldUseOriginalPartitionedGroup(t *testing.T) PartitionID: partitionID, } partitionInfoContent, _ := json.Marshal(partitionInfo) - bkt.Upload(context.Background(), getBlockPartitionInfoFile(block.ULID), bytes.NewReader(partitionInfoContent)) + err := bkt.Upload(context.Background(), getBlockPartitionInfoFile(block.ULID), bytes.NewReader(partitionInfoContent)) + require.NoError(t, err) } testGroup := blocksGroup{ rangeStart: testRangeStart, diff --git a/pkg/compactor/shuffle_sharding_planner.go b/pkg/compactor/shuffle_sharding_planner.go index 6d3305efe45..f0f040dce21 100644 --- a/pkg/compactor/shuffle_sharding_planner.go +++ b/pkg/compactor/shuffle_sharding_planner.go @@ -3,10 +3,10 @@ package compactor import ( "context" "fmt" - "github.com/go-kit/log/level" "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index d8da21130a6..ddcefc20d79 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -397,7 +397,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u return c.CompactWithPartition(dest, dirs, open, 1, 0) } -func (c *LeveledCompactor) CompactWithPartition(dest string, dirs []string, open []*Block, partitionCount int, partitionId int) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) CompactWithPartition(dest string, dirs []string, open []*Block, partitionCount int, partitionID int) (uid ulid.ULID, err error) { var ( blocks []BlockReader bs []*Block @@ -441,7 +441,7 @@ func (c *LeveledCompactor) CompactWithPartition(dest string, dirs []string, open uid = ulid.MustNew(ulid.Now(), rand.Reader) meta := CompactBlockMetas(uid, metas...) - err = c.write(dest, meta, partitionId, partitionCount, blocks...) + err = c.write(dest, meta, partitionID, partitionCount, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { for _, b := range bs { diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 4362b1533fb..9f70700fecc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -782,7 +782,7 @@ type Compactor interface { // * Returns empty ulid.ULID{}. Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) - CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionId int) (ulid.ULID, error) + CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionID int) (ulid.ULID, error) } // Compact plans and runs a single compaction against the group. The compacted result From 5f37f268afade805a5efc13b0816279b1986bcbf Mon Sep 17 00:00:00 2001 From: Alex Le Date: Tue, 3 Jan 2023 11:58:05 -0800 Subject: [PATCH 003/133] Refactored thans and prometheus changes Signed-off-by: Alex Le --- pkg/compactor/blocks_cleaner.go | 10 +- pkg/compactor/compactor.go | 6 +- pkg/compactor/compactor_test.go | 11 +- pkg/compactor/shuffle_sharding_grouper.go | 72 ++-- .../shuffle_sharding_grouper_test.go | 382 ++++++------------ .../prometheus/prometheus/tsdb/compact.go | 29 +- .../thanos-io/thanos/pkg/block/block.go | 10 - .../thanos/pkg/block/metadata/meta.go | 9 + .../thanos/pkg/block/partition_info.go | 79 ---- .../thanos-io/thanos/pkg/compact/compact.go | 67 ++- .../thanos/pkg/compact/sharded_postings.go | 69 ++++ 11 files changed, 314 insertions(+), 430 deletions(-) delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/compact/sharded_postings.go diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index 4c89b1dd20e..655153a6595 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -418,16 +418,16 @@ func (c *BlocksCleaner) findResultBlocksForPartitionedGroup(ctx context.Context, resultBlocks := make(map[int]ulid.ULID) for _, b := range possibleResultBlocks { - reader, err := userBucket.Get(ctx, path.Join(b.String(), block.PartitionInfoFilename)) + meta, err := block.DownloadMeta(ctx, userLogger, userBucket, b) if err != nil { - level.Info(userLogger).Log("msg", "unable to get partition info for block", "partitioned_group_id", partitionedGroupID, "block", b.String()) + level.Info(userLogger).Log("msg", "unable to get meta for block", "partitioned_group_id", partitionedGroupID, "block", b.String()) continue } - partitionInfo, err := block.ReadPartitionInfo(reader) - if err != nil { - level.Info(userLogger).Log("msg", "unable to parse partition info for block", "partitioned_group_id", partitionedGroupID, "block", b.String()) + if meta.Thanos.PartitionInfo == nil { + level.Info(userLogger).Log("msg", "unable to get partition info for block", "partitioned_group_id", partitionedGroupID, "block", b.String()) continue } + partitionInfo := meta.Thanos.PartitionInfo if partitionInfo.PartitionedGroupID == partitionedGroupID { level.Info(userLogger).Log("msg", "found result block", "partitioned_group_id", partitionedGroupID, "partition_id", partitionInfo.PartitionID, "block", b.String()) resultBlocks[partitionInfo.PartitionID] = b diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 9e956bd72b3..28911c21158 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -267,6 +267,9 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.Var(&cfg.EnabledTenants, "compactor.enabled-tenants", "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.") f.Var(&cfg.DisabledTenants, "compactor.disabled-tenants", "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.") + f.DurationVar(&cfg.BlockVisitMarkerTimeout, "compactor.block-visit-marker-timeout", 5*time.Minute, "How long block visit marker file should be considered as expired and able to be picked up by compactor again.") + f.DurationVar(&cfg.BlockVisitMarkerFileUpdateInterval, "compactor.block-visit-marker-file-update-interval", 1*time.Minute, "How frequently block visit marker file should be updated duration compaction.") + f.Int64Var(&cfg.PartitionIndexSizeLimitInBytes, "compactor.partition-index-size-limit-in-bytes", 0, "Index size limit in bytes for each compaction partition. 0 means no limit") f.Int64Var(&cfg.PartitionSeriesCountLimit, "compactor.partition-series-count-limit", 0, "Time series count limit for each compaction partition. 0 means no limit") } @@ -857,13 +860,14 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { return errors.Wrap(err, "failed to create syncer") } - compactor, err := compact.NewBucketCompactorWithCompleteChecker( + compactor, err := compact.NewBucketCompactorWithCheckerAndCallback( ulogger, syncer, c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.blocksMarkedForNoCompaction, c.garbageCollectedBlocks, c.remainingPlannedCompactions, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed, c.partitionedGroupInfoReadFailed, c.partitionedGroupInfoWriteFailed, c.ring, c.ringLifecycler, c.limits, userID), c.blocksPlannerFactory(ctx, bucket, ulogger, c.compactorCfg, noCompactMarkerFilter, c.ringLifecycler, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed), c.blocksCompactor, c.compactionCompleteCheckerFactory(ctx, bucket, ulogger, c.blockVisitMarkerReadFailed, c.partitionedGroupInfoReadFailed), + compact.NoopCompactionLifecycleCallback{}, path.Join(c.compactorCfg.DataDir, "compact"), bucket, c.compactorCfg.CompactionConcurrency, diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index e440a4b623a..a460938c783 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" - "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/testutil" @@ -1041,26 +1040,22 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) - bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/"+block.PartitionInfoFilename, "", nil) bucketClient.MockUpload("user-1/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json", mockBlockMetaJSON("01FN6CDF3PNEWWRY5MPGJPE3EX"), nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/no-compact-mark.json", "", nil) bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", "", nil) - bucketClient.MockGet("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/"+block.PartitionInfoFilename, "", nil) bucketClient.MockUpload("user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/partition-0-visit-mark.json", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", mockBlockMetaJSON("01DTW0ZCPDDNV4BV83Q2SV4QAZ"), nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/no-compact-mark.json", "", nil) bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", "", nil) bucketClient.MockUpload("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/partition-0-visit-mark.json", nil) - bucketClient.MockGet("user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/"+block.PartitionInfoFilename, "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json", mockBlockMetaJSON("01FN3V83ABR9992RF8WRJZ76ZQ"), nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/deletion-mark.json", "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/no-compact-mark.json", "", nil) bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/partition-0-visit-mark.json", "", nil) bucketClient.MockUpload("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/partition-0-visit-mark.json", nil) - bucketClient.MockGet("user-2/01FN3V83ABR9992RF8WRJZ76ZQ/"+block.PartitionInfoFilename, "", nil) bucketClient.MockGet("user-1/bucket-index.json.gz", "", nil) bucketClient.MockGet("user-2/bucket-index.json.gz", "", nil) bucketClient.MockUpload("user-1/bucket-index.json.gz", nil) @@ -1152,7 +1147,6 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", "", nil) bucketClient.MockUpload(userID+"/01DTVP434PA9VFXSW2JKB3392D/partition-0-visit-mark.json", nil) - bucketClient.MockGet(userID+"/01DTW0ZCPDDNV4BV83Q2SV4QAZ/"+block.PartitionInfoFilename, "", nil) bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) bucketClient.MockGet(userID+"/partitioned-groups/"+partitionedGroupID+".json", "", nil) @@ -1274,7 +1268,6 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit bucketClient.MockGetTimes(userID+"/"+blockID+"/partition-0-visit-mark.json", "", nil, 1) bucketClient.MockGet(userID+"/"+blockID+"/partition-0-visit-mark.json", string(visitMarkerFileContent), nil) bucketClient.MockUpload(userID+"/"+blockID+"/partition-0-visit-mark.json", nil) - bucketClient.MockGet(userID+"/"+blockID+"/"+block.PartitionInfoFilename, "", nil) blockDirectory = append(blockDirectory, userID+"/"+blockID) // Get all of the unique group hashes so that they can be used to ensure all groups were compacted @@ -1652,8 +1645,8 @@ func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Blo return args.Get(0).(ulid.ULID), args.Error(1) } -func (m *tsdbCompactorMock) CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionID int) (ulid.ULID, error) { - args := m.Called(dest, dirs, open, partitionCount, partitionID) +func (m *tsdbCompactorMock) CompactWithAdditionalPostings(dest string, dirs []string, open []*tsdb.Block, additionalPostingsProvider tsdb.AdditionalPostingsFunc) (ulid.ULID, error) { + args := m.Called(dest, dirs, open, additionalPostingsProvider) return args.Get(0).(ulid.ULID), args.Error(1) } diff --git a/pkg/compactor/shuffle_sharding_grouper.go b/pkg/compactor/shuffle_sharding_grouper.go index 9e2eb0cf6ab..cdb736bccf9 100644 --- a/pkg/compactor/shuffle_sharding_grouper.go +++ b/pkg/compactor/shuffle_sharding_grouper.go @@ -5,7 +5,6 @@ import ( "fmt" "hash/fnv" "math" - "path" "sort" "strings" "time" @@ -58,8 +57,6 @@ type ShuffleShardingGrouper struct { blockVisitMarkerWriteFailed prometheus.Counter partitionedGroupInfoReadFailed prometheus.Counter partitionedGroupInfoWriteFailed prometheus.Counter - - blockPartitionInfoMap map[ulid.ULID]*thanosblock.PartitionInfo } func NewShuffleShardingGrouper( @@ -140,7 +137,6 @@ func NewShuffleShardingGrouper( blockVisitMarkerWriteFailed: blockVisitMarkerWriteFailed, partitionedGroupInfoReadFailed: partitionedGroupInfoReadFailed, partitionedGroupInfoWriteFailed: partitionedGroupInfoWriteFailed, - blockPartitionInfoMap: make(map[ulid.ULID]*thanosblock.PartitionInfo), } } @@ -152,11 +148,6 @@ func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (re for _, b := range blocks { key := b.Thanos.GroupKey() mainGroups[key] = append(mainGroups[key], b) - _, err := g.getBlockPartitionInfo(b.ULID) // this will put fetched partition info in g.blockPartitionInfoMap - if err != nil { - level.Warn(g.logger).Log("msg", "unable to get block partition info", "block", b.ULID.String(), "err", err) - } - } // For each group, we have to further split it into set of blocks @@ -181,7 +172,7 @@ func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (re var groups []blocksGroup for _, mainBlocks := range mainGroups { - groups = append(groups, groupBlocksByCompactableRanges(mainBlocks, g.blockPartitionInfoMap, g.compactorCfg.BlockRanges.ToMilliseconds())...) + groups = append(groups, groupBlocksByCompactableRanges(mainBlocks, g.compactorCfg.BlockRanges.ToMilliseconds())...) } // Ensure groups are sorted by smallest range, oldest min time first. The rationale @@ -413,35 +404,6 @@ func (g *ShuffleShardingGrouper) groupBlocksByMinTime(group blocksGroup) map[int return blocksByMinTime } -func getBlockPartitionInfoFile(blockID ulid.ULID) string { - return path.Join(blockID.String(), thanosblock.PartitionInfoFilename) -} - -func (g *ShuffleShardingGrouper) getBlockPartitionInfo(blockID ulid.ULID) (*thanosblock.PartitionInfo, error) { - if _, ok := g.blockPartitionInfoMap[blockID]; ok { - return g.blockPartitionInfoMap[blockID], nil - } - rc, err := g.bkt.ReaderWithExpectedErrs(g.bkt.IsObjNotFoundErr).Get(g.ctx, getBlockPartitionInfoFile(blockID)) - if err != nil { - if g.bkt.IsObjNotFoundErr(err) { - defaultPartitionInfo := &thanosblock.PartitionInfo{ - PartitionedGroupID: 0, - PartitionCount: 1, - PartitionID: 0, - } - g.blockPartitionInfoMap[blockID] = defaultPartitionInfo - return defaultPartitionInfo, nil - } - return nil, err - } - partitionInfo, err := thanosblock.ReadPartitionInfo(rc) - if err != nil { - return nil, err - } - g.blockPartitionInfoMap[blockID] = partitionInfo - return partitionInfo, nil -} - func (g *ShuffleShardingGrouper) partitionBlocksGroup(partitionCount int, blocksByMinTime map[int64][]*metadata.Meta, rangeStart int64, rangeEnd int64) (map[int]blocksGroup, error) { partitionedGroups := make(map[int]blocksGroup) addToPartitionedGroups := func(blocks []*metadata.Meta, partitionID int) { @@ -464,9 +426,13 @@ func (g *ShuffleShardingGrouper) partitionBlocksGroup(partitionCount int, blocks // Case that number of blocks in this time interval is 2^n, should // use modulo calculation to find blocks for each partition ID. for _, block := range blocksInSameTimeInterval { - partitionInfo, err := g.getBlockPartitionInfo(block.ULID) - if err != nil { - return nil, err + partitionInfo := block.Thanos.PartitionInfo + if partitionInfo == nil { + // For legacy blocks with level > 1, treat PartitionID is always 0. + // So it can be included in every partition. + partitionInfo = &metadata.PartitionInfo{ + PartitionID: 0, + } } if numOfBlocks < partitionCount { for partitionID := partitionInfo.PartitionID; partitionID < partitionCount; partitionID += numOfBlocks { @@ -620,7 +586,7 @@ func (g blocksGroup) maxTime() int64 { // to smaller ranges. If a smaller range contains more than 1 block (and thus it should // be compacted), the larger range block group is not generated until each of its // smaller ranges have 1 block each at most. -func groupBlocksByCompactableRanges(blocks []*metadata.Meta, blocksPartitionInfo map[ulid.ULID]*thanosblock.PartitionInfo, ranges []int64) []blocksGroup { +func groupBlocksByCompactableRanges(blocks []*metadata.Meta, ranges []int64) []blocksGroup { if len(blocks) == 0 { return nil } @@ -648,14 +614,22 @@ func groupBlocksByCompactableRanges(blocks []*metadata.Meta, blocksPartitionInfo } } - firstBlockPartitionInfo, ok := blocksPartitionInfo[group.blocks[0].ULID] - if !ok { - continue nextGroup + firstBlockPartitionInfo := group.blocks[0].Thanos.PartitionInfo + if firstBlockPartitionInfo == nil { + firstBlockPartitionInfo = &metadata.PartitionInfo{ + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + } } for _, block := range group.blocks { - blockPartitionInfo, ok := blocksPartitionInfo[block.ULID] - if !ok { - continue nextGroup + blockPartitionInfo := block.Thanos.PartitionInfo + if blockPartitionInfo == nil { + blockPartitionInfo = &metadata.PartitionInfo{ + PartitionedGroupID: 0, + PartitionCount: 1, + PartitionID: 0, + } } if blockPartitionInfo.PartitionedGroupID <= 0 || blockPartitionInfo.PartitionedGroupID != firstBlockPartitionInfo.PartitionedGroupID { groups = append(groups, group) diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go index b3716248f72..666beb44e1c 100644 --- a/pkg/compactor/shuffle_sharding_grouper_test.go +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -358,15 +358,6 @@ func TestShuffleShardingGrouper_Groups(t *testing.T) { visitMarkerFileContent, _ := json.Marshal(blockVisitMarker) bkt.MockGet(visitMarkerFile, string(visitMarkerFileContent), nil) } - for _, block := range testData.blocks { - partitionInfo := thanosblock.PartitionInfo{ - PartitionedGroupID: 0, - PartitionCount: 1, - PartitionID: 0, - } - partitionInfoContent, _ := json.Marshal(partitionInfo) - bkt.MockGet(getBlockPartitionInfoFile(block.ULID), string(partitionInfoContent), nil) - } bkt.MockUpload(mock.Anything, nil) bkt.MockGet(mock.Anything, "", nil) @@ -424,45 +415,65 @@ func TestGroupBlocksByCompactableRanges(t *testing.T) { block8Ulid := ulid.MustNew(8, nil) block9Ulid := ulid.MustNew(9, nil) - defaultPartitionInfo := &thanosblock.PartitionInfo{ + defaultPartitionInfo := &metadata.PartitionInfo{ PartitionedGroupID: 0, PartitionCount: 1, PartitionID: 0, } + partition3ID0 := &metadata.PartitionInfo{ + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 0, + } + + partition3ID1 := &metadata.PartitionInfo{ + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 1, + } + + partition3ID2 := &metadata.PartitionInfo{ + PartitionedGroupID: uint32(12345), + PartitionCount: 3, + PartitionID: 2, + } + + partition2ID0 := &metadata.PartitionInfo{ + PartitionedGroupID: uint32(54321), + PartitionCount: 2, + PartitionID: 0, + } + + partition2ID1 := &metadata.PartitionInfo{ + PartitionedGroupID: uint32(54321), + PartitionCount: 2, + PartitionID: 1, + } + tests := map[string]struct { - ranges []int64 - blocks []*metadata.Meta - blocksPartitionInfo map[ulid.ULID]*thanosblock.PartitionInfo - expected []blocksGroup + ranges []int64 + blocks []*metadata.Meta + expected []blocksGroup }{ "no input blocks": { - ranges: []int64{20}, - blocks: nil, - blocksPartitionInfo: nil, - expected: nil, + ranges: []int64{20}, + blocks: nil, + expected: nil, }, "only 1 block in input": { ranges: []int64{20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: nil, }, "only 1 block for each range (single range)": { ranges: []int64{20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 40, MaxTime: 60}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 40, MaxTime: 60}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: nil, }, @@ -478,230 +489,154 @@ func TestGroupBlocksByCompactableRanges(t *testing.T) { "input blocks can be compacted on the 1st range only": { ranges: []int64{20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 25, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 40, MaxTime: 50}}, - {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 50, MaxTime: 60}}, - {BlockMeta: tsdb.BlockMeta{ULID: block7Ulid, MinTime: 60, MaxTime: 70}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, - block4Ulid: defaultPartitionInfo, - block5Ulid: defaultPartitionInfo, - block6Ulid: defaultPartitionInfo, - block7Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 25, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 40, MaxTime: 50}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 50, MaxTime: 60}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block7Ulid, MinTime: 60, MaxTime: 70}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 25, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 25, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, {rangeStart: 40, rangeEnd: 60, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 40, MaxTime: 50}}, - {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 50, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 40, MaxTime: 50}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 50, MaxTime: 60}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "input blocks can be compacted on the 2nd range only": { ranges: []int64{10, 20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 30, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 40, MaxTime: 60}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 60, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 70, MaxTime: 80}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, - block4Ulid: defaultPartitionInfo, - block5Ulid: defaultPartitionInfo, - block6Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 40, MaxTime: 60}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 60, MaxTime: 70}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 70, MaxTime: 80}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, {rangeStart: 60, rangeEnd: 80, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 60, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 70, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 60, MaxTime: 70}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 70, MaxTime: 80}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "input blocks can be compacted on a mix of 1st and 2nd ranges, guaranteeing no overlaps and giving preference to smaller ranges": { ranges: []int64{10, 20}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 7, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 30, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 40, MaxTime: 60}}, - {BlockMeta: tsdb.BlockMeta{ULID: block7Ulid, MinTime: 60, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{ULID: block8Ulid, MinTime: 70, MaxTime: 80}}, - {BlockMeta: tsdb.BlockMeta{ULID: block9Ulid, MinTime: 75, MaxTime: 80}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, - block4Ulid: defaultPartitionInfo, - block5Ulid: defaultPartitionInfo, - block6Ulid: defaultPartitionInfo, - block7Ulid: defaultPartitionInfo, - block8Ulid: defaultPartitionInfo, - block9Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 7, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block6Ulid, MinTime: 40, MaxTime: 60}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block7Ulid, MinTime: 60, MaxTime: 70}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block8Ulid, MinTime: 70, MaxTime: 80}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block9Ulid, MinTime: 75, MaxTime: 80}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 0, rangeEnd: 10, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 7, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 7, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, {rangeStart: 70, rangeEnd: 80, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block8Ulid, MinTime: 70, MaxTime: 80}}, - {BlockMeta: tsdb.BlockMeta{ULID: block9Ulid, MinTime: 75, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{ULID: block8Ulid, MinTime: 70, MaxTime: 80}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block9Ulid, MinTime: 75, MaxTime: 80}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "input blocks have already been compacted with the largest range": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 40, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 40, MaxTime: 70}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: nil, }, "input blocks match the largest range but can be compacted because overlapping": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 40, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, - block4Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 40, MaxTime: 70}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 80, rangeEnd: 120, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "a block with time range crossing two 1st level ranges should be NOT considered for 1st level compaction": { ranges: []int64{20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, - block4Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, // This block spans across two 1st level ranges. + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 30, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "a block with time range crossing two 1st level ranges should BE considered for 2nd level compaction": { ranges: []int64{20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 40}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, // This block spans across two 1st level ranges. + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 0, rangeEnd: 40, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 10, MaxTime: 30}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 20, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "a block with time range larger then the largest compaction range should NOT be considered for compaction": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 30, MaxTime: 150}}, // This block is larger then the largest compaction range. - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 40, MaxTime: 70}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 80, MaxTime: 120}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: defaultPartitionInfo, - block2Ulid: defaultPartitionInfo, - block3Ulid: defaultPartitionInfo, - block4Ulid: defaultPartitionInfo, - block5Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 40}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 30, MaxTime: 150}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, // This block is larger then the largest compaction range. + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 40, MaxTime: 70}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 80, rangeEnd: 120, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 80, MaxTime: 120}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "a group with all blocks having same partitioned group id should be ignored": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID0}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID1}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID2}}, {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}}, }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: { - PartitionedGroupID: uint32(12345), - PartitionCount: 3, - PartitionID: 0, - }, - block2Ulid: { - PartitionedGroupID: uint32(12345), - PartitionCount: 3, - PartitionID: 1, - }, - block3Ulid: { - PartitionedGroupID: uint32(12345), - PartitionCount: 3, - PartitionID: 2, - }, - block4Ulid: defaultPartitionInfo, - block5Ulid: defaultPartitionInfo, - }, expected: []blocksGroup{ {rangeStart: 10, rangeEnd: 20, blocks: []*metadata.Meta{ {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, @@ -712,86 +647,40 @@ func TestGroupBlocksByCompactableRanges(t *testing.T) { "a group with all blocks having partitioned group id is 0 should not be ignored": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: { - PartitionedGroupID: 0, - PartitionCount: 1, - PartitionID: 0, - }, - block2Ulid: { - PartitionedGroupID: 0, - PartitionCount: 1, - PartitionID: 0, - }, - block3Ulid: { - PartitionedGroupID: 0, - PartitionCount: 1, - PartitionID: 0, - }, - block4Ulid: defaultPartitionInfo, - block5Ulid: defaultPartitionInfo, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }, expected: []blocksGroup{ {rangeStart: 0, rangeEnd: 10, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, {rangeStart: 10, rangeEnd: 20, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20}, Thanos: metadata.Thanos{PartitionInfo: defaultPartitionInfo}}, }}, }, }, "a group with blocks from two different partitioned groups": { ranges: []int64{10, 20, 40}, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - }, - blocksPartitionInfo: map[ulid.ULID]*thanosblock.PartitionInfo{ - block1Ulid: { - PartitionedGroupID: uint32(12345), - PartitionCount: 3, - PartitionID: 0, - }, - block2Ulid: { - PartitionedGroupID: uint32(12345), - PartitionCount: 3, - PartitionID: 1, - }, - block3Ulid: { - PartitionedGroupID: uint32(12345), - PartitionCount: 3, - PartitionID: 2, - }, - block4Ulid: { - PartitionedGroupID: uint32(54321), - PartitionCount: 2, - PartitionID: 0, - }, - block5Ulid: { - PartitionedGroupID: uint32(54321), - PartitionCount: 2, - PartitionID: 1, - }, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID0}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID1}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID2}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition2ID0}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition2ID1}}, }, expected: []blocksGroup{ {rangeStart: 0, rangeEnd: 20, blocks: []*metadata.Meta{ - {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, - {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}}, + {BlockMeta: tsdb.BlockMeta{ULID: block1Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID0}}, + {BlockMeta: tsdb.BlockMeta{ULID: block2Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID1}}, + {BlockMeta: tsdb.BlockMeta{ULID: block3Ulid, MinTime: 0, MaxTime: 10, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition3ID2}}, + {BlockMeta: tsdb.BlockMeta{ULID: block4Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition2ID0}}, + {BlockMeta: tsdb.BlockMeta{ULID: block5Ulid, MinTime: 10, MaxTime: 20, Compaction: tsdb.BlockMetaCompaction{Level: 2}}, Thanos: metadata.Thanos{PartitionInfo: partition2ID1}}, }}, }, }, @@ -799,7 +688,7 @@ func TestGroupBlocksByCompactableRanges(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - assert.Equal(t, testData.expected, groupBlocksByCompactableRanges(testData.blocks, testData.blocksPartitionInfo, testData.ranges)) + assert.Equal(t, testData.expected, groupBlocksByCompactableRanges(testData.blocks, testData.ranges)) }) } } @@ -1264,11 +1153,10 @@ func TestGroupPartitioning(t *testing.T) { } block.Stats.NumSeries = uint64(testData.seriesCount) testBlocks = append(testBlocks, block) - partitionInfo := thanosblock.PartitionInfo{ + partitionInfo := &metadata.PartitionInfo{ PartitionID: partitionID, } - partitionInfoContent, _ := json.Marshal(partitionInfo) - bkt.MockGet(getBlockPartitionInfoFile(block.ULID), string(partitionInfoContent), nil) + block.Thanos.PartitionInfo = partitionInfo } testGroup := blocksGroup{ rangeStart: testData.rangeStart, @@ -1383,12 +1271,10 @@ func TestPartitionStrategyChange_shouldUseOriginalPartitionedGroup(t *testing.T) } block.Stats.NumSeries = uint64(seriesCount) updatedTestBlocks = append(updatedTestBlocks, block) - partitionInfo := thanosblock.PartitionInfo{ + partitionInfo := &metadata.PartitionInfo{ PartitionID: partitionID, } - partitionInfoContent, _ := json.Marshal(partitionInfo) - err := bkt.Upload(context.Background(), getBlockPartitionInfoFile(block.ULID), bytes.NewReader(partitionInfoContent)) - require.NoError(t, err) + block.Thanos.PartitionInfo = partitionInfo } testGroup := blocksGroup{ rangeStart: testRangeStart, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index ddcefc20d79..a93bd83105f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -394,10 +394,10 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { - return c.CompactWithPartition(dest, dirs, open, 1, 0) + return c.CompactWithAdditionalPostings(dest, dirs, open, nil) } -func (c *LeveledCompactor) CompactWithPartition(dest string, dirs []string, open []*Block, partitionCount int, partitionID int) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) CompactWithAdditionalPostings(dest string, dirs []string, open []*Block, additionalPostingsFunc AdditionalPostingsFunc) (uid ulid.ULID, err error) { var ( blocks []BlockReader bs []*Block @@ -441,7 +441,7 @@ func (c *LeveledCompactor) CompactWithPartition(dest string, dirs []string, open uid = ulid.MustNew(ulid.Now(), rand.Reader) meta := CompactBlockMetas(uid, metas...) - err = c.write(dest, meta, partitionID, partitionCount, blocks...) + err = c.write(dest, meta, additionalPostingsFunc, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { for _, b := range bs { @@ -507,7 +507,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p } } - err := c.write(dest, meta, 0, 1, b) + err := c.write(dest, meta, nil, b) if err != nil { return uid, err } @@ -552,7 +552,7 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error { } // write creates a new block that is the union of the provided blocks into dir. -func (c *LeveledCompactor) write(dest string, meta *BlockMeta, partitionId int, partitionCount int, blocks ...BlockReader) (err error) { +func (c *LeveledCompactor) write(dest string, meta *BlockMeta, additionalPostingsFunc AdditionalPostingsFunc, blocks ...BlockReader) (err error) { dir := filepath.Join(dest, meta.ULID.String()) tmp := dir + tmpForCreationBlockDirSuffix var closers []io.Closer @@ -600,7 +600,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, partitionId int, } closers = append(closers, indexw) - if err := c.populateBlock(blocks, meta, indexw, chunkw, partitionId, partitionCount); err != nil { + if err := c.populateBlock(blocks, meta, indexw, chunkw, additionalPostingsFunc); err != nil { return errors.Wrap(err, "populate block") } @@ -668,7 +668,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, partitionId int, // populateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, partitionId int, partitionCount int) (err error) { +func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, additionalPostingsFunc AdditionalPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } @@ -736,12 +736,15 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } all = indexr.SortedPostings(all) g.Go(func() error { - shardStart := time.Now() - shardedPosting := index.NewShardedPosting(all, uint64(partitionCount), uint64(partitionId), indexr.Series) - fmt.Printf("finished sharding, duration: %v\n", time.Since(shardStart)) + if additionalPostingsFunc != nil { + all = additionalPostingsFunc.GetPostings(all, indexr) + } + //shardStart := time.Now() + //shardedPosting := index.NewShardedPosting(all, uint64(partitionCount), uint64(partitionId), indexr.Series) + //fmt.Printf("finished sharding, duration: %v\n", time.Since(shardStart)) // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. setsMtx.Lock() - sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, shardedPosting, meta.MinTime, meta.MaxTime-1, false)) + sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) setsMtx.Unlock() return nil }) @@ -827,3 +830,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, return nil } + +type AdditionalPostingsFunc interface { + GetPostings(originalPostings index.Postings, indexReader IndexReader) index.Postings +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 6ccb7518e86..7886e5d8693 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -37,8 +37,6 @@ const ( IndexHeaderFilename = "index-header" // ChunksDirname is the known dir name for chunks with compressed samples. ChunksDirname = "chunks" - // PartitionInfoFilename is JSON filename for partition information. - PartitionInfoFilename = "partition-info.json" // DebugMetas is a directory for debug meta files that happen in the past. Useful for debugging. DebugMetas = "debug/metas" @@ -155,14 +153,6 @@ func upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return cleanUp(logger, bkt, id, errors.Wrap(err, "upload index")) } - // level 1 blocks should not have partition info file - if meta.Compaction.Level > 1 { - if err := objstore.UploadFile(ctx, logger, bkt, filepath.Join(bdir, PartitionInfoFilename), path.Join(id.String(), PartitionInfoFilename)); err != nil { - // Don't call cleanUp here. Partition info file acts in a similar way as meta file. - return errors.Wrap(err, "upload partition info") - } - } - // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file to be pending uploads. if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), strings.NewReader(metaEncoded.String())); err != nil { // Don't call cleanUp here. Despite getting error, meta.json may have been uploaded in certain cases, diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go index 787a03c241b..797ccd7860d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go @@ -90,6 +90,9 @@ type Thanos struct { // Rewrites is present when any rewrite (deletion, relabel etc) were applied to this block. Optional. Rewrites []Rewrite `json:"rewrites,omitempty"` + + // PartitionInfo is used for partitioning compaction to keep track of partition information of result block. Optional. + PartitionInfo *PartitionInfo `json:"partition_info,omitempty"` } type Rewrite struct { @@ -101,6 +104,12 @@ type Rewrite struct { RelabelsApplied []*relabel.Config `json:"relabels_applied,omitempty"` } +type PartitionInfo struct { + PartitionedGroupID uint32 `json:"partitionedGroupID"` + PartitionCount int `json:"partitionCount"` + PartitionID int `json:"partitionID"` +} + type Matchers []*labels.Matcher func (m *Matchers) UnmarshalYAML(value *yaml.Node) (err error) { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go b/vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go deleted file mode 100644 index 24602cdbd9c..00000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/block/partition_info.go +++ /dev/null @@ -1,79 +0,0 @@ -package block - -import ( - "encoding/json" - "io" - "os" - "path/filepath" - - "github.com/go-kit/log" - "github.com/prometheus/prometheus/tsdb/fileutil" - - "github.com/thanos-io/thanos/pkg/runutil" -) - -type PartitionInfo struct { - PartitionedGroupID uint32 `json:"partitionedGroupID"` - PartitionCount int `json:"partitionCount"` - PartitionID int `json:"partitionID"` -} - -// WriteToDir writes the encoded partition info into /partition-info.json. -func (p PartitionInfo) WriteToDir(logger log.Logger, dir string) error { - // Make any changes to the file appear atomic. - path := filepath.Join(dir, PartitionInfoFilename) - tmp := path + ".tmp" - - f, err := os.Create(tmp) - if err != nil { - return err - } - - if err := p.Write(f); err != nil { - runutil.CloseWithLogOnErr(logger, f, "close partition info") - return err - } - if err := f.Close(); err != nil { - return err - } - return renameFile(logger, tmp, path) -} - -// Write writes the given encoded partition info to writer. -func (p PartitionInfo) Write(w io.Writer) error { - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - return enc.Encode(&p) -} - -func renameFile(logger log.Logger, from, to string) error { - if err := os.RemoveAll(to); err != nil { - return err - } - if err := os.Rename(from, to); err != nil { - return err - } - - // Directory was renamed; sync parent dir to persist rename. - pdir, err := fileutil.OpenDir(filepath.Dir(to)) - if err != nil { - return err - } - - if err = fileutil.Fdatasync(pdir); err != nil { - runutil.CloseWithLogOnErr(logger, pdir, "close dir") - return err - } - return pdir.Close() -} - -// Read the block partition info from the given reader. -func ReadPartitionInfo(rc io.ReadCloser) (_ *PartitionInfo, err error) { - defer runutil.ExhaustCloseWithErrCapture(&err, rc, "close partition info JSON") - - var p PartitionInfo - if err = json.NewDecoder(rc).Decode(&p); err != nil { - return nil, err - } - return &p, nil -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 9f70700fecc..26a447834cf 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -764,6 +764,22 @@ func (c DefaultCompactionCompleteChecker) IsComplete(_ *Group, _ ulid.ULID) bool return true } +type CompactionLifecycleCallback interface { + PreCompactionCallback(group *Group, toCompactBlocks []*metadata.Meta) error + PostCompactionCallback(group *Group, blockID ulid.ULID) error +} + +type NoopCompactionLifecycleCallback struct { +} + +func (c NoopCompactionLifecycleCallback) PreCompactionCallback(_ *Group, _ []*metadata.Meta) error { + return nil +} + +func (c NoopCompactionLifecycleCallback) PostCompactionCallback(_ *Group, _ ulid.ULID) error { + return nil +} + // Compactor provides compaction against an underlying storage of time series data. // This is similar to tsdb.Compactor just without Plan method. // TODO(bwplotka): Split the Planner from Compactor on upstream as well, so we can import it. @@ -781,13 +797,12 @@ type Compactor interface { // * The source dirs are marked Deletable. // * Returns empty ulid.ULID{}. Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) - - CompactWithPartition(dest string, dirs []string, open []*tsdb.Block, partitionCount int, partitionID int) (ulid.ULID, error) + CompactWithAdditionalPostings(dest string, dirs []string, open []*tsdb.Block, additionalPostingsProvider tsdb.AdditionalPostingsProvider) (ulid.ULID, error) } // Compact plans and runs a single compaction against the group. The compacted result // is uploaded into the bucket the blocks were retrieved from. -func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor, completeChecker ExtraCompactionCompleteChecker) (shouldRerun bool, compID ulid.ULID, rerr error) { +func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor, completeChecker ExtraCompactionCompleteChecker, compactionLifecycleCallback CompactionLifecycleCallback) (shouldRerun bool, compID ulid.ULID, rerr error) { cg.compactionRunsStarted.Inc() subDir := filepath.Join(dir, cg.Key()) @@ -809,7 +824,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp errChan := make(chan error, 1) err := tracing.DoInSpanWithErr(ctx, "compaction_group", func(ctx context.Context) (err error) { - shouldRerun, compID, err = cg.compact(ctx, subDir, planner, comp, completeChecker, errChan) + shouldRerun, compID, err = cg.compact(ctx, subDir, planner, comp, completeChecker, compactionLifecycleCallback, errChan) return err }, opentracing.Tags{"group.key": cg.Key()}) errChan <- err @@ -1013,7 +1028,7 @@ func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, return nil } -func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor, completeChecker ExtraCompactionCompleteChecker, errChan chan error) (shouldRerun bool, compID ulid.ULID, _ error) { +func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor, completeChecker ExtraCompactionCompleteChecker, compactionLifecycleCallback CompactionLifecycleCallback, errChan chan error) (shouldRerun bool, compID ulid.ULID, _ error) { cg.mtx.Lock() defer cg.mtx.Unlock() @@ -1046,6 +1061,12 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp return false, ulid.ULID{}, nil } + level.Info(cg.logger).Log("msg", "running pre compaction callback", "plan", fmt.Sprintf("%v", toCompact)) + if err := compactionLifecycleCallback.PreCompactionCallback(cg, toCompact); err != nil { + return false, ulid.ULID{}, errors.Wrapf(err, "failed to run pre compaction callback for plan: %s", fmt.Sprintf("%v", toCompact)) + } + level.Info(cg.logger).Log("msg", "finished running pre compaction callback", "plan", fmt.Sprintf("%v", toCompact)) + level.Info(cg.logger).Log("msg", "compaction available and planned; downloading blocks", "plan", fmt.Sprintf("%v", toCompact)) // Due to #183 we verify that none of the blocks in the plan have overlapping sources. @@ -1116,7 +1137,11 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp begin = time.Now() if err := tracing.DoInSpanWithErr(ctx, "compaction", func(ctx context.Context) (e error) { - compID, e = comp.CompactWithPartition(dir, toCompactDirs, nil, cg.partitionCount, cg.partitionID) + additionalPostingsFunc := &ShardedPostingsFunc{ + partitionID: uint64(cg.partitionID), + partitionCount: uint64(cg.partitionCount), + } + compID, e = comp.CompactWithAdditionalPostings(dir, toCompactDirs, nil, additionalPostingsFunc) return e }); err != nil { return false, ulid.ULID{}, halt(errors.Wrapf(err, "compact blocks %v", toCompactDirs)) @@ -1149,20 +1174,16 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Downsample: metadata.ThanosDownsample{Resolution: cg.resolution}, Source: metadata.CompactorSource, SegmentFiles: block.GetSegmentFiles(bdir), + PartitionInfo: &metadata.PartitionInfo{ + PartitionedGroupID: cg.partitionedGroupID, + PartitionCount: cg.partitionCount, + PartitionID: cg.partitionID, + }, }, nil) if err != nil { return false, ulid.ULID{}, errors.Wrapf(err, "failed to finalize the block %s", bdir) } - partitionInfo := block.PartitionInfo{ - PartitionedGroupID: cg.partitionedGroupID, - PartitionCount: cg.partitionCount, - PartitionID: cg.partitionID, - } - if err := partitionInfo.WriteToDir(cg.logger, bdir); err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "failed to put partition info for the block %s", bdir) - } - if err = os.Remove(filepath.Join(bdir, "tombstones")); err != nil { return false, ulid.ULID{}, errors.Wrap(err, "remove tombstones") } @@ -1193,6 +1214,12 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp } level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) + level.Info(cg.logger).Log("msg", "running post compaction callback", "result_block", compID) + if err := compactionLifecycleCallback.PostCompactionCallback(cg, compID); err != nil { + return false, ulid.ULID{}, retry(errors.Wrapf(err, "failed to run post compaction callback for result block %s", compID)) + } + level.Info(cg.logger).Log("msg", "finished running post compaction callback", "result_block", compID) + // Mark for deletion the blocks we just compacted from the group and bucket so they do not get included // into the next planning cycle. // Eventually the block we just uploaded should get synced into the group again (including sync-delay). @@ -1236,6 +1263,7 @@ type BucketCompactor struct { comp Compactor planner Planner completeChecker ExtraCompactionCompleteChecker + compactionLifecycleCallback CompactionLifecycleCallback compactDir string bkt objstore.Bucket concurrency int @@ -1257,13 +1285,14 @@ func NewBucketCompactor( if concurrency <= 0 { return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency) } - return NewBucketCompactorWithCompleteChecker( + return NewBucketCompactorWithCheckerAndCallback( logger, sy, grouper, planner, comp, DefaultCompactionCompleteChecker{}, + NoopCompactionLifecycleCallback{}, compactDir, bkt, concurrency, @@ -1271,13 +1300,14 @@ func NewBucketCompactor( ) } -func NewBucketCompactorWithCompleteChecker( +func NewBucketCompactorWithCheckerAndCallback( logger log.Logger, sy *Syncer, grouper Grouper, planner Planner, comp Compactor, completeChecker ExtraCompactionCompleteChecker, + compactionLifecycleCallback CompactionLifecycleCallback, compactDir string, bkt objstore.Bucket, concurrency int, @@ -1293,6 +1323,7 @@ func NewBucketCompactorWithCompleteChecker( planner: planner, comp: comp, completeChecker: completeChecker, + compactionLifecycleCallback: compactionLifecycleCallback, compactDir: compactDir, bkt: bkt, concurrency: concurrency, @@ -1333,7 +1364,7 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { go func() { defer wg.Done() for g := range groupChan { - shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.planner, c.comp, c.completeChecker) + shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.planner, c.comp, c.completeChecker, c.compactionLifecycleCallback) if err == nil { if shouldRerunGroup { mtx.Lock() diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/sharded_postings.go b/vendor/github.com/thanos-io/thanos/pkg/compact/sharded_postings.go new file mode 100644 index 00000000000..5ae560e4d86 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/sharded_postings.go @@ -0,0 +1,69 @@ +package compact + +import ( + "fmt" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/index" +) + +type ShardedPostingsFunc struct { + partitionCount uint64 + partitionID uint64 +} + +func (p *ShardedPostingsFunc) GetPostings(originalPostings index.Postings, indexReader tsdb.IndexReader) index.Postings { + return NewShardedPosting(originalPostings, p.partitionCount, p.partitionID, indexReader.Series) +} + +type ShardedPostings struct { + //postings Postings + //labelsFn func(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error + series []storage.SeriesRef + cur storage.SeriesRef + initialized bool + + //bufChks []chunks.Meta + //bufLbls labels.Labels +} + +func NewShardedPosting(postings index.Postings, partitionCount uint64, partitionId uint64, labelsFn func(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error) *ShardedPostings { + bufChks := make([]chunks.Meta, 0) + bufLbls := make(labels.Labels, 0) + series := make([]storage.SeriesRef, 0) + for postings.Next() { + err := labelsFn(postings.At(), &bufLbls, &bufChks) + if err != nil { + fmt.Printf("err: %v", err) + } + if bufLbls.Hash()%partitionCount == partitionId { + posting := postings.At() + series = append(series, posting) + } + } + return &ShardedPostings{series: series, initialized: false} +} + +func (p *ShardedPostings) Next() bool { + if len(p.series) > 0 { + p.cur, p.series = p.series[0], p.series[1:] + return true + } + return false +} + +func (p *ShardedPostings) At() storage.SeriesRef { + return p.cur +} + +func (p *ShardedPostings) Seek(v storage.SeriesRef) bool { + fmt.Println("ran seek") + return false +} + +func (p *ShardedPostings) Err() error { + return nil +} From e83e0793bf4b8c9900a85621576ef74b24f93571 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Fri, 9 Dec 2022 12:15:57 -0800 Subject: [PATCH 004/133] clean up - https://github.com/cortexproject/cortex/pull/2599 (#5031) Signed-off-by: Alex Le --- pkg/storage/tsdb/config.go | 4 ---- pkg/storegateway/bucket_stores.go | 1 - pkg/storegateway/gateway_test.go | 1 - 3 files changed, 6 deletions(-) diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index c8009066364..11a1976b3de 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -24,10 +24,6 @@ const ( // set when shipping blocks to the storage. IngesterIDExternalLabel = "__ingester_id__" - // ShardIDExternalLabel is the external label containing the shard ID - // and can be used to shard blocks. - ShardIDExternalLabel = "__shard_id__" - // How often are open TSDBs checked for being idle and closed. DefaultCloseIdleTSDBInterval = 5 * time.Minute diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index 22b7c6bea5f..859499862f8 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -431,7 +431,6 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro NewReplicaLabelRemover(userLogger, []string{ tsdb.TenantIDExternalLabel, tsdb.IngesterIDExternalLabel, - tsdb.ShardIDExternalLabel, }), // Remove Cortex external labels so that they're not injected when querying blocks. }...) diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index f6c3cae103b..985e2ef2954 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -826,7 +826,6 @@ func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) { Labels: map[string]string{ cortex_tsdb.TenantIDExternalLabel: userID, cortex_tsdb.IngesterIDExternalLabel: fmt.Sprintf("ingester-%d", idx), - cortex_tsdb.ShardIDExternalLabel: fmt.Sprintf("shard-%d", idx), }, Source: metadata.TestSource, } From 15a91a68de68fbc5093da197a6e0b273f9178957 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Fri, 9 Dec 2022 15:06:08 -0800 Subject: [PATCH 005/133] clean up - https://github.com/cortexproject/cortex/pull/2599 (#5031) Signed-off-by: Alex Le From 1a156759cabac6c8ff374d1aa829cbf4a85cba93 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Fri, 9 Dec 2022 16:52:53 -0800 Subject: [PATCH 006/133] increase queue duration histogram bucket (#5029) Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/frontend/v1/frontend.go | 2 +- pkg/scheduler/scheduler.go | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f254c731af..147fa5b38de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ * [ENHANCEMENT] Querier: limit series query to only ingesters if `start` param is not specified. #4976 * [ENHANCEMENT] Query-frontend/scheduler: add a new limit `frontend.max-outstanding-requests-per-tenant` for configuring queue size per tenant. Started deprecating two flags `-query-scheduler.max-outstanding-requests-per-tenant` and `-querier.max-outstanding-requests-per-tenant`, and change their value default to 0. Now if both the old flag and new flag are specified, the old flag's queue size will be picked. #5005 * [ENHANCEMENT] Query-tee: Add `/api/v1/query_exemplars` API endpoint support. #5010 +* [ENHANCEMENT] Query Frontend/Query Scheduler: Increase upper bound to 60s for queue duration histogram metric. #5029 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/pkg/frontend/v1/frontend.go b/pkg/frontend/v1/frontend.go index 541e17dc4b4..5666cbac5eb 100644 --- a/pkg/frontend/v1/frontend.go +++ b/pkg/frontend/v1/frontend.go @@ -108,7 +108,7 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_query_frontend_queue_duration_seconds", Help: "Time spend by requests queued.", - Buckets: prometheus.DefBuckets, + Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30, 60}, }), } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 115de295c31..af597c8864f 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -116,7 +116,7 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer promethe s.queueDuration = promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_query_scheduler_queue_duration_seconds", Help: "Time spend by requests in queue before getting picked up by a querier.", - Buckets: prometheus.DefBuckets, + Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30, 60}, }) s.connectedQuerierClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ Name: "cortex_query_scheduler_connected_querier_clients", From ace5cd1bece80f5d20b762730fcf938e0c64659f Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Fri, 9 Dec 2022 16:54:50 -0800 Subject: [PATCH 007/133] Add counter metrics for total requests going to queue (#5030) * add counter metrics for total requests going to queue Signed-off-by: Ben Ye * update changelog Signed-off-by: Ben Ye * fix lint Signed-off-by: Ben Ye * lint Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/frontend/v1/frontend.go | 2 +- pkg/frontend/v1/frontend_test.go | 1 + pkg/scheduler/queue/queue.go | 11 +++++++++-- pkg/scheduler/queue/queue_test.go | 3 +++ pkg/scheduler/scheduler.go | 3 ++- 6 files changed, 17 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 147fa5b38de..0a437992eb4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 * [FEATURE] Query Frontend: Log query params in query frontend even if error happens. #5005 * [FEATURE] Ingester: Enable snapshotting of In-memory TSDB on disk during shutdown via `-blocks-storage.tsdb.memory-snapshot-on-shutdown`. #5011 +* [FEATURE] Query Frontend/Scheduler: Add a new counter metric `cortex_request_queue_requests_total` for total requests going to queue. #5030 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 ## 1.14.0 2022-12-02 diff --git a/pkg/frontend/v1/frontend.go b/pkg/frontend/v1/frontend.go index 5666cbac5eb..babc5357bc5 100644 --- a/pkg/frontend/v1/frontend.go +++ b/pkg/frontend/v1/frontend.go @@ -112,7 +112,7 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist }), } - f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, f.queueLength, f.discardedRequests, f.limits) + f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, f.queueLength, f.discardedRequests, f.limits, registerer) f.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(f.cleanupInactiveUserMetrics) var err error diff --git a/pkg/frontend/v1/frontend_test.go b/pkg/frontend/v1/frontend_test.go index 3c512aef800..cb0457b0bd9 100644 --- a/pkg/frontend/v1/frontend_test.go +++ b/pkg/frontend/v1/frontend_test.go @@ -133,6 +133,7 @@ func TestFrontendCheckReady(t *testing.T) { prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}), limits, + nil, ), } for i := 0; i < tt.connectedClients; i++ { diff --git a/pkg/scheduler/queue/queue.go b/pkg/scheduler/queue/queue.go index bdaad09ee6d..21fe69b25ce 100644 --- a/pkg/scheduler/queue/queue.go +++ b/pkg/scheduler/queue/queue.go @@ -7,6 +7,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/util/services" @@ -58,15 +59,20 @@ type RequestQueue struct { stopped bool queueLength *prometheus.GaugeVec // Per user and reason. + totalRequests *prometheus.CounterVec // Per user. discardedRequests *prometheus.CounterVec // Per user. } -func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, queueLength *prometheus.GaugeVec, discardedRequests *prometheus.CounterVec, limits Limits) *RequestQueue { +func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, queueLength *prometheus.GaugeVec, discardedRequests *prometheus.CounterVec, limits Limits, registerer prometheus.Registerer) *RequestQueue { q := &RequestQueue{ queues: newUserQueues(maxOutstandingPerTenant, forgetDelay, limits), connectedQuerierWorkers: atomic.NewInt32(0), queueLength: queueLength, - discardedRequests: discardedRequests, + totalRequests: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_request_queue_requests_total", + Help: "Total number of query requests going to the request queue.", + }, []string{"user"}), + discardedRequests: discardedRequests, } q.cond = sync.NewCond(&q.mtx) @@ -94,6 +100,7 @@ func (q *RequestQueue) EnqueueRequest(userID string, req Request, maxQueriers in return errors.New("no queue found") } + q.totalRequests.WithLabelValues(userID).Inc() select { case queue <- req: q.queueLength.WithLabelValues(userID).Inc() diff --git a/pkg/scheduler/queue/queue_test.go b/pkg/scheduler/queue/queue_test.go index ef02a492f3b..8e8f0a94d08 100644 --- a/pkg/scheduler/queue/queue_test.go +++ b/pkg/scheduler/queue/queue_test.go @@ -27,6 +27,7 @@ func BenchmarkGetNextRequest(b *testing.B) { prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}), MockLimits{MaxOutstanding: 100}, + nil, ) queues = append(queues, queue) @@ -85,6 +86,7 @@ func BenchmarkQueueRequest(b *testing.B) { prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}), MockLimits{MaxOutstanding: 100}, + nil, ) for ix := 0; ix < queriers; ix++ { @@ -119,6 +121,7 @@ func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBe prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}), MockLimits{MaxOutstanding: 100}, + nil, ) // Start the queue service. diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index af597c8864f..d8350c69c6b 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -111,7 +111,8 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer promethe Name: "cortex_query_scheduler_discarded_requests_total", Help: "Total number of query requests discarded.", }, []string{"user"}) - s.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, s.queueLength, s.discardedRequests, s.limits) + + s.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, s.queueLength, s.discardedRequests, s.limits, registerer) s.queueDuration = promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_query_scheduler_queue_duration_seconds", From 3c33a7d2868af4d3b24f663dee2b8ebf8864fd30 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Dec 2022 23:24:22 -0800 Subject: [PATCH 008/133] Bump github.com/lib/pq from 1.3.0 to 1.10.7 (#5023) Bumps [github.com/lib/pq](https://github.com/lib/pq) from 1.3.0 to 1.10.7. - [Release notes](https://github.com/lib/pq/releases) - [Commits](https://github.com/lib/pq/compare/v1.3.0...v1.10.7) --- updated-dependencies: - dependency-name: github.com/lib/pq dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/lib/pq/.gitignore | 2 + vendor/github.com/lib/pq/.travis.sh | 73 ------ vendor/github.com/lib/pq/.travis.yml | 44 ---- vendor/github.com/lib/pq/CONTRIBUTING.md | 29 --- vendor/github.com/lib/pq/README.md | 81 +----- vendor/github.com/lib/pq/array.go | 141 +++++++++- vendor/github.com/lib/pq/conn.go | 275 +++++++++++++++----- vendor/github.com/lib/pq/conn_go18.go | 108 +++++++- vendor/github.com/lib/pq/connector.go | 12 +- vendor/github.com/lib/pq/copy.go | 129 ++++++--- vendor/github.com/lib/pq/doc.go | 23 ++ vendor/github.com/lib/pq/encode.go | 44 +++- vendor/github.com/lib/pq/error.go | 20 +- vendor/github.com/lib/pq/krb.go | 27 ++ vendor/github.com/lib/pq/notice.go | 72 +++++ vendor/github.com/lib/pq/notify.go | 63 ++++- vendor/github.com/lib/pq/ssl.go | 35 ++- vendor/github.com/lib/pq/ssl_permissions.go | 81 +++++- vendor/github.com/lib/pq/ssl_windows.go | 1 + vendor/github.com/lib/pq/url.go | 4 +- vendor/github.com/lib/pq/user_other.go | 10 + vendor/github.com/lib/pq/user_posix.go | 3 +- vendor/modules.txt | 4 +- 25 files changed, 933 insertions(+), 354 deletions(-) delete mode 100644 vendor/github.com/lib/pq/.travis.sh delete mode 100644 vendor/github.com/lib/pq/.travis.yml delete mode 100644 vendor/github.com/lib/pq/CONTRIBUTING.md create mode 100644 vendor/github.com/lib/pq/krb.go create mode 100644 vendor/github.com/lib/pq/notice.go create mode 100644 vendor/github.com/lib/pq/user_other.go diff --git a/go.mod b/go.mod index 8b420ea7fca..05b054a393a 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.5.0 github.com/json-iterator/go v1.1.12 - github.com/lib/pq v1.3.0 + github.com/lib/pq v1.10.7 github.com/minio/minio-go/v7 v7.0.37 github.com/mitchellh/go-wordwrap v1.0.0 github.com/oklog/ulid v1.3.1 diff --git a/go.sum b/go.sum index 1e5b0943e68..a7fcaa7fd5e 100644 --- a/go.sum +++ b/go.sum @@ -661,8 +661,8 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v1.9.1 h1:29UpEPpYcGFnbwiJW8mbk/bjBZpgd/pv68io2IKTo34= diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore index 0f1d00e1196..3243952a4d6 100644 --- a/vendor/github.com/lib/pq/.gitignore +++ b/vendor/github.com/lib/pq/.gitignore @@ -2,3 +2,5 @@ *.test *~ *.swp +.idea +.vscode \ No newline at end of file diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh deleted file mode 100644 index ebf447030be..00000000000 --- a/vendor/github.com/lib/pq/.travis.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash - -set -eu - -client_configure() { - sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key -} - -pgdg_repository() { - local sourcelist='sources.list.d/postgresql.list' - - curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - - echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" - sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update -} - -postgresql_configure() { - sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config - local all all trust - hostnossl all pqgossltest 127.0.0.1/32 reject - hostnossl all pqgosslcert 127.0.0.1/32 reject - hostssl all pqgossltest 127.0.0.1/32 trust - hostssl all pqgosslcert 127.0.0.1/32 cert - host all all 127.0.0.1/32 trust - hostnossl all pqgossltest ::1/128 reject - hostnossl all pqgosslcert ::1/128 reject - hostssl all pqgossltest ::1/128 trust - hostssl all pqgosslcert ::1/128 cert - host all all ::1/128 trust - config - - xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates - certs/root.crt - certs/server.crt - certs/server.key - certificates - - sort -VCu <<-versions || - $PGVERSION - 9.2 - versions - sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config - ssl_ca_file = 'root.crt' - ssl_cert_file = 'server.crt' - ssl_key_file = 'server.key' - config - - echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null - - sudo service postgresql restart -} - -postgresql_install() { - xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages - postgresql-$PGVERSION - postgresql-server-dev-$PGVERSION - postgresql-contrib-$PGVERSION - packages -} - -postgresql_uninstall() { - sudo service postgresql stop - xargs sudo apt-get -y --purge remove <<-packages - libpq-dev - libpq5 - postgresql - postgresql-client-common - postgresql-common - packages - sudo rm -rf /var/lib/postgresql -} - -$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml deleted file mode 100644 index 8396f5d9d47..00000000000 --- a/vendor/github.com/lib/pq/.travis.yml +++ /dev/null @@ -1,44 +0,0 @@ -language: go - -go: - - 1.11.x - - 1.12.x - - master - -sudo: true - -env: - global: - - PGUSER=postgres - - PQGOSSLTESTS=1 - - PQSSLCERTTEST_PATH=$PWD/certs - - PGHOST=127.0.0.1 - matrix: - - PGVERSION=10 - - PGVERSION=9.6 - - PGVERSION=9.5 - - PGVERSION=9.4 - -before_install: - - ./.travis.sh postgresql_uninstall - - ./.travis.sh pgdg_repository - - ./.travis.sh postgresql_install - - ./.travis.sh postgresql_configure - - ./.travis.sh client_configure - - go get golang.org/x/tools/cmd/goimports - - go get golang.org/x/lint/golint - - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2019.2.1 - -before_script: - - createdb pqgotest - - createuser -DRS pqgossltest - - createuser -DRS pqgosslcert - -script: - - > - goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' - - go vet ./... - - staticcheck -go 1.11 ./... - - golint ./... - - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... - - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md deleted file mode 100644 index 84c937f1561..00000000000 --- a/vendor/github.com/lib/pq/CONTRIBUTING.md +++ /dev/null @@ -1,29 +0,0 @@ -## Contributing to pq - -`pq` has a backlog of pull requests, but contributions are still very -much welcome. You can help with patch review, submitting bug reports, -or adding new functionality. There is no formal style guide, but -please conform to the style of existing code and general Go formatting -conventions when submitting patches. - -### Patch review - -Help review existing open pull requests by commenting on the code or -proposed functionality. - -### Bug reports - -We appreciate any bug reports, but especially ones with self-contained -(doesn't depend on code outside of pq), minimal (can't be simplified -further) test cases. It's especially helpful if you can submit a pull -request with just the failing test case (you'll probably want to -pattern it after the tests in -[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). - -### New functionality - -There are a number of pending patches for new functionality, so -additional feature patches will take a while to merge. Still, patches -are generally reviewed based on usefulness and complexity in addition -to time-in-queue, so if you have a knockout idea, take a shot. Feel -free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md index 385fe73508e..126ee5d35d1 100644 --- a/vendor/github.com/lib/pq/README.md +++ b/vendor/github.com/lib/pq/README.md @@ -1,21 +1,11 @@ # pq - A pure Go postgres driver for Go's database/sql package -[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) -[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) ## Install go get github.com/lib/pq -## Docs - -For detailed documentation and basic usage examples, please see the package -documentation at . - -## Tests - -`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. - ## Features * SSL @@ -29,67 +19,18 @@ documentation at . * Unix socket support * Notifications: `LISTEN`/`NOTIFY` * pgpass support +* GSS (Kerberos) auth -## Future / Things you can help with +## Tests -* Better COPY FROM / COPY TO (see discussion in #181) +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. -## Thank you (alphabetical) +## Status -Some of these contributors are from the original library `bmizerany/pq.go` whose -code still exists in here. +This package is currently in maintenance mode, which means: +1. It generally does not accept new features. +2. It does accept bug fixes and version compatability changes provided by the community. +3. Maintainers usually do not resolve reported issues. +4. Community members are encouraged to help each other with reported issues. -* Andy Balholm (andybalholm) -* Ben Berkert (benburkert) -* Benjamin Heatwole (bheatwole) -* Bill Mill (llimllib) -* Bjørn Madsen (aeons) -* Blake Gentry (bgentry) -* Brad Fitzpatrick (bradfitz) -* Charlie Melbye (cmelbye) -* Chris Bandy (cbandy) -* Chris Gilling (cgilling) -* Chris Walsh (cwds) -* Dan Sosedoff (sosedoff) -* Daniel Farina (fdr) -* Eric Chlebek (echlebek) -* Eric Garrido (minusnine) -* Eric Urban (hydrogen18) -* Everyone at The Go Team -* Evan Shaw (edsrzf) -* Ewan Chou (coocood) -* Fazal Majid (fazalmajid) -* Federico Romero (federomero) -* Fumin (fumin) -* Gary Burd (garyburd) -* Heroku (heroku) -* James Pozdena (jpoz) -* Jason McVetta (jmcvetta) -* Jeremy Jay (pbnjay) -* Joakim Sernbrant (serbaut) -* John Gallagher (jgallagher) -* Jonathan Rudenberg (titanous) -* Joël Stemmer (jstemmer) -* Kamil Kisiel (kisielk) -* Kelly Dunn (kellydunn) -* Keith Rarick (kr) -* Kir Shatrov (kirs) -* Lann Martin (lann) -* Maciek Sakrejda (uhoh-itsmaciek) -* Marc Brinkmann (mbr) -* Marko Tiikkaja (johto) -* Matt Newberry (MattNewberry) -* Matt Robenolt (mattrobenolt) -* Martin Olsen (martinolsen) -* Mike Lewis (mikelikespie) -* Nicolas Patry (Narsil) -* Oliver Tonnhofer (olt) -* Patrick Hayes (phayes) -* Paul Hammond (paulhammond) -* Ryan Smith (ryandotsmith) -* Samuel Stauffer (samuel) -* Timothée Peignier (cyberdelia) -* Travis Cline (tmc) -* TruongSinh Tran-Nguyen (truongsinh) -* Yaismel Miranda (ympons) -* notedit (notedit) +For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development. diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go index e4933e22764..39c8f7e2e03 100644 --- a/vendor/github.com/lib/pq/array.go +++ b/vendor/github.com/lib/pq/array.go @@ -22,7 +22,7 @@ var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() // db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) // // var x []sql.NullInt64 -// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x)) // // Scanning multi-dimensional arrays is not supported. Arrays where the lower // bound is not one (such as `[0:0]={1}') are not supported. @@ -35,19 +35,31 @@ func Array(a interface{}) interface { return (*BoolArray)(&a) case []float64: return (*Float64Array)(&a) + case []float32: + return (*Float32Array)(&a) case []int64: return (*Int64Array)(&a) + case []int32: + return (*Int32Array)(&a) case []string: return (*StringArray)(&a) + case [][]byte: + return (*ByteaArray)(&a) case *[]bool: return (*BoolArray)(a) case *[]float64: return (*Float64Array)(a) + case *[]float32: + return (*Float32Array)(a) case *[]int64: return (*Int64Array)(a) + case *[]int32: + return (*Int32Array)(a) case *[]string: return (*StringArray)(a) + case *[][]byte: + return (*ByteaArray)(a) } return GenericArray{a} @@ -267,6 +279,70 @@ func (a Float64Array) Value() (driver.Value, error) { return "{}", nil } +// Float32Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float32Array []float32 + +// Scan implements the sql.Scanner interface. +func (a *Float32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float32Array", src) +} + +func (a *Float32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float32Array, len(elems)) + for i, v := range elems { + var x float64 + if x, err = strconv.ParseFloat(string(v), 32); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = float32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + // GenericArray implements the driver.Valuer and sql.Scanner interfaces for // an array or slice of any dimension. type GenericArray struct{ A interface{} } @@ -483,6 +559,69 @@ func (a Int64Array) Value() (driver.Value, error) { return "{}", nil } +// Int32Array represents a one-dimensional array of the PostgreSQL integer types. +type Int32Array []int32 + +// Scan implements the sql.Scanner interface. +func (a *Int32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int32Array", src) +} + +func (a *Int32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int32Array, len(elems)) + for i, v := range elems { + x, err := strconv.ParseInt(string(v), 10, 32) + if err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = int32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, int64(a[0]), 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, int64(a[i]), 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + // StringArray represents a one-dimensional array of the PostgreSQL character types. type StringArray []string diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index 55152b1242c..e70b386ff09 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -18,6 +18,7 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" "unicode" @@ -30,21 +31,28 @@ var ( ErrNotSupported = errors.New("pq: Unsupported command") ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") - ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") - ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + ErrSSLKeyUnknownOwnership = errors.New("pq: Could not get owner information for private key, may not be properly protected") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key has world access. Permissions should be u=rw,g=r (0640) if owned by root, or u=rw (0600), or less") + + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") errUnexpectedReady = errors.New("unexpected ReadyForQuery") errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") ) +// Compile time validation that our types implement the expected interfaces +var ( + _ driver.Driver = Driver{} +) + // Driver is the Postgres database driver. type Driver struct{} // Open opens a new connection to the database. name is a connection string. // Most users should only use it through database/sql package from the standard // library. -func (d *Driver) Open(name string) (driver.Conn, error) { +func (d Driver) Open(name string) (driver.Conn, error) { return Open(name) } @@ -134,9 +142,10 @@ type conn struct { saveMessageType byte saveMessageBuffer []byte - // If true, this connection is bad and all public-facing functions should - // return ErrBadConn. - bad bool + // If an error is set, this connection is bad and all public-facing + // functions should return the appropriate error by calling get() + // (ErrBadConn) or getForNext(). + err syncErr // If set, this connection should never use the binary format when // receiving query results from prepared statements. Only provided for @@ -149,6 +158,49 @@ type conn struct { // If true this connection is in the middle of a COPY inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS +} + +type syncErr struct { + err error + sync.Mutex +} + +// Return ErrBadConn if connection is bad. +func (e *syncErr) get() error { + e.Lock() + defer e.Unlock() + if e.err != nil { + return driver.ErrBadConn + } + return nil +} + +// Return the error set on the connection. Currently only used by rows.Next. +func (e *syncErr) getForNext() error { + e.Lock() + defer e.Unlock() + return e.err +} + +// Set error, only if it isn't set yet. +func (e *syncErr) set(err error) { + if err == nil { + panic("attempt to set nil err") + } + e.Lock() + defer e.Unlock() + if e.err == nil { + e.err = err + } } // Handle driver-side settings in parsed connection string. @@ -272,7 +324,7 @@ func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { if err != nil { return nil, err } - c.dialer = d + c.Dialer(d) return c.open(context.Background()) } @@ -283,7 +335,13 @@ func (c *Connector) open(ctx context.Context) (cn *conn, err error) { // the user. defer errRecoverNoErrBadConn(&err) - o := c.opts + // Create a new values map (copy). This makes it so maps in different + // connections do not reference the same underlying data structure, so it + // is safe for multiple connections to concurrently write to their opts. + o := make(values) + for k, v := range c.opts { + o[k] = v + } cn = &conn{ opts: o, @@ -329,10 +387,6 @@ func (c *Connector) open(ctx context.Context) (cn *conn, err error) { func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { network, address := network(o) - // SSL is not necessary or supported over UNIX domain sockets - if network == "unix" { - o["sslmode"] = "disable" - } // Zero or not specified means wait indefinitely. if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { @@ -498,7 +552,7 @@ func (cn *conn) isInTransaction() bool { func (cn *conn) checkIsInTransaction(intxn bool) { if cn.isInTransaction() != intxn { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected transaction status %v", cn.txnStatus) } } @@ -508,8 +562,8 @@ func (cn *conn) Begin() (_ driver.Tx, err error) { } func (cn *conn) begin(mode string) (_ driver.Tx, err error) { - if cn.bad { - return nil, driver.ErrBadConn + if err := cn.err.get(); err != nil { + return nil, err } defer cn.errRecover(&err) @@ -519,11 +573,11 @@ func (cn *conn) begin(mode string) (_ driver.Tx, err error) { return nil, err } if commandTag != "BEGIN" { - cn.bad = true + cn.err.set(driver.ErrBadConn) return nil, fmt.Errorf("unexpected command tag %s", commandTag) } if cn.txnStatus != txnStatusIdleInTransaction { - cn.bad = true + cn.err.set(driver.ErrBadConn) return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) } return cn, nil @@ -537,8 +591,8 @@ func (cn *conn) closeTxn() { func (cn *conn) Commit() (err error) { defer cn.closeTxn() - if cn.bad { - return driver.ErrBadConn + if err := cn.err.get(); err != nil { + return err } defer cn.errRecover(&err) @@ -559,12 +613,12 @@ func (cn *conn) Commit() (err error) { _, commandTag, err := cn.simpleExec("COMMIT") if err != nil { if cn.isInTransaction() { - cn.bad = true + cn.err.set(driver.ErrBadConn) } return err } if commandTag != "COMMIT" { - cn.bad = true + cn.err.set(driver.ErrBadConn) return fmt.Errorf("unexpected command tag %s", commandTag) } cn.checkIsInTransaction(false) @@ -573,8 +627,8 @@ func (cn *conn) Commit() (err error) { func (cn *conn) Rollback() (err error) { defer cn.closeTxn() - if cn.bad { - return driver.ErrBadConn + if err := cn.err.get(); err != nil { + return err } defer cn.errRecover(&err) return cn.rollback() @@ -585,7 +639,7 @@ func (cn *conn) rollback() (err error) { _, commandTag, err := cn.simpleExec("ROLLBACK") if err != nil { if cn.isInTransaction() { - cn.bad = true + cn.err.set(driver.ErrBadConn) } return err } @@ -625,7 +679,7 @@ func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err case 'T', 'D': // ignore any results default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unknown response for simple query: %q", t) } } @@ -647,7 +701,7 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { // the user can close, though, to avoid connections from being // leaked. A "rows" with done=true works fine for that purpose. if err != nil { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected message %q in simple query execution", t) } if res == nil { @@ -658,8 +712,11 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { // Set the result and tag to the last command complete if there wasn't a // query already run. Although queries usually return from here and cede // control to Next, a query with zero results does not. - if t == 'C' && res.colNames == nil { + if t == 'C' { res.result, res.tag = cn.parseComplete(r.string()) + if res.colNames != nil { + return + } } res.done = true case 'Z': @@ -671,7 +728,7 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { err = parseError(r) case 'D': if res == nil { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected DataRow in simple query execution") } // the query didn't fail; kick off to Next @@ -686,7 +743,7 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { // To work around a bug in QueryRow in Go 1.2 and earlier, wait // until the first DataRow has been received. default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unknown response for simple query: %q", t) } } @@ -779,8 +836,8 @@ func (cn *conn) prepareTo(q, stmtName string) *stmt { } func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { - if cn.bad { - return nil, driver.ErrBadConn + if err := cn.err.get(); err != nil { + return nil, err } defer cn.errRecover(&err) @@ -818,8 +875,8 @@ func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { } func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { - if cn.bad { - return nil, driver.ErrBadConn + if err := cn.err.get(); err != nil { + return nil, err } if cn.inCopy { return nil, errCopyInProgress @@ -852,8 +909,8 @@ func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { // Implement the optional "Execer" interface for one-shot queries func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { - if cn.bad { - return nil, driver.ErrBadConn + if err := cn.err.get(); err != nil { + return nil, err } defer cn.errRecover(&err) @@ -886,9 +943,20 @@ func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err return r, err } +type safeRetryError struct { + Err error +} + +func (se *safeRetryError) Error() string { + return se.Err.Error() +} + func (cn *conn) send(m *writeBuf) { - _, err := cn.c.Write(m.wrap()) + n, err := cn.c.Write(m.wrap()) if err != nil { + if n == 0 { + err = &safeRetryError{Err: err} + } panic(err) } } @@ -913,7 +981,7 @@ func (cn *conn) sendSimpleMessage(typ byte) (err error) { // the message yourself. func (cn *conn) saveMessage(typ byte, buf *readBuf) { if cn.saveMessageType != 0 { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected saveMessageType %d", cn.saveMessageType) } cn.saveMessageType = typ @@ -971,7 +1039,13 @@ func (cn *conn) recv() (t byte, r *readBuf) { case 'E': panic(parseError(r)) case 'N': - // ignore + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } default: return } @@ -988,8 +1062,14 @@ func (cn *conn) recv1Buf(r *readBuf) byte { } switch t { - case 'A', 'N': - // ignore + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } case 'S': cn.processParameterStatus(r) default: @@ -1047,7 +1127,7 @@ func isDriverSetting(key string) bool { return true case "password": return true - case "sslmode", "sslcert", "sslkey", "sslrootcert": + case "sslmode", "sslcert", "sslkey", "sslrootcert", "sslinline", "sslsni": return true case "fallback_application_name": return true @@ -1057,7 +1137,10 @@ func isDriverSetting(key string) bool { return true case "binary_parameters": return true - + case "krbsrvname": + return true + case "krbspn": + return true default: return false } @@ -1137,6 +1220,59 @@ func (cn *conn) auth(r *readBuf, o values) { if r.int32() != 0 { errorf("unexpected authentication response: %q", t) } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + case 10: sc := scram.NewClient(sha256.New, o["user"], o["password"]) sc.Step(nil) @@ -1215,8 +1351,8 @@ func (st *stmt) Close() (err error) { if st.closed { return nil } - if st.cn.bad { - return driver.ErrBadConn + if err := st.cn.err.get(); err != nil { + return err } defer st.cn.errRecover(&err) @@ -1229,14 +1365,14 @@ func (st *stmt) Close() (err error) { t, _ := st.cn.recv1() if t != '3' { - st.cn.bad = true + st.cn.err.set(driver.ErrBadConn) errorf("unexpected close response: %q", t) } st.closed = true t, r := st.cn.recv1() if t != 'Z' { - st.cn.bad = true + st.cn.err.set(driver.ErrBadConn) errorf("expected ready for query, but got: %q", t) } st.cn.processReadyForQuery(r) @@ -1245,8 +1381,12 @@ func (st *stmt) Close() (err error) { } func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { - if st.cn.bad { - return nil, driver.ErrBadConn + return st.query(v) +} + +func (st *stmt) query(v []driver.Value) (r *rows, err error) { + if err := st.cn.err.get(); err != nil { + return nil, err } defer st.cn.errRecover(&err) @@ -1258,8 +1398,8 @@ func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { } func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { - if st.cn.bad { - return nil, driver.ErrBadConn + if err := st.cn.err.get(); err != nil { + return nil, err } defer st.cn.errRecover(&err) @@ -1345,7 +1485,7 @@ func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { parts := strings.Split(commandTag, " ") if len(parts) != 3 { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected INSERT command tag %s", commandTag) } affectedRows = &parts[len(parts)-1] @@ -1357,7 +1497,7 @@ func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { } n, err := strconv.ParseInt(*affectedRows, 10, 64) if err != nil { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("could not parse commandTag: %s", err) } return driver.RowsAffected(n), commandTag @@ -1424,8 +1564,8 @@ func (rs *rows) Next(dest []driver.Value) (err error) { } conn := rs.cn - if conn.bad { - return driver.ErrBadConn + if err := conn.err.getForNext(); err != nil { + return err } defer conn.errRecover(&err) @@ -1449,7 +1589,7 @@ func (rs *rows) Next(dest []driver.Value) (err error) { case 'D': n := rs.rb.int16() if err != nil { - conn.bad = true + conn.err.set(driver.ErrBadConn) errorf("unexpected DataRow after error %s", err) } if n < len(dest) { @@ -1616,10 +1756,9 @@ func (cn *conn) processParameterStatus(r *readBuf) { case "server_version": var major1 int var major2 int - var minor int - _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + _, err = fmt.Sscanf(r.string(), "%d.%d", &major1, &major2) if err == nil { - cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + cn.parameterStatus.serverVersion = major1*10000 + major2*100 } case "TimeZone": @@ -1644,7 +1783,7 @@ func (cn *conn) readReadyForQuery() { cn.processReadyForQuery(r) return default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected message %q; expected ReadyForQuery", t) } } @@ -1664,7 +1803,7 @@ func (cn *conn) readParseResponse() { cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected Parse response %q", t) } } @@ -1689,7 +1828,7 @@ func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames [ cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected Describe statement response %q", t) } } @@ -1707,7 +1846,7 @@ func (cn *conn) readPortalDescribeResponse() rowsHeader { cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected Describe response %q", t) } panic("not reached") @@ -1723,7 +1862,7 @@ func (cn *conn) readBindResponse() { cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected Bind response %q", t) } } @@ -1750,7 +1889,7 @@ func (cn *conn) postExecuteWorkaround() { cn.saveMessage(t, r) return default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected message during extended query execution: %q", t) } } @@ -1763,7 +1902,7 @@ func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, co switch t { case 'C': if err != nil { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected CommandComplete after error %s", err) } res, commandTag = cn.parseComplete(r.string()) @@ -1777,7 +1916,7 @@ func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, co err = parseError(r) case 'T', 'D', 'I': if err != nil { - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unexpected %q after error %s", t, err) } if t == 'I' { @@ -1785,7 +1924,7 @@ func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, co } // ignore any results default: - cn.bad = true + cn.err.set(driver.ErrBadConn) errorf("unknown %s response: %q", protocolState, t) } } @@ -1881,6 +2020,8 @@ func parseEnviron(env []string) (out map[string]string) { accrue("sslkey") case "PGSSLROOTCERT": accrue("sslrootcert") + case "PGSSLSNI": + accrue("sslsni") case "PGREQUIRESSL", "PGSSLCRL": unsupported() case "PGREQUIREPEER": diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go index 09e2ea4648e..63d4ca6aaa0 100644 --- a/vendor/github.com/lib/pq/conn_go18.go +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -10,6 +10,10 @@ import ( "time" ) +const ( + watchCancelDialContextTimeout = time.Second * 10 +) + // Implement the "QueryerContext" interface func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { list := make([]driver.Value, len(args)) @@ -42,6 +46,14 @@ func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.Nam return cn.Exec(query, list) } +// Implement the "ConnPrepareContext" interface +func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + return cn.Prepare(query) +} + // Implement the "ConnBeginTx" interface func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { var mode string @@ -89,25 +101,37 @@ func (cn *conn) Ping(ctx context.Context) error { func (cn *conn) watchCancel(ctx context.Context) func() { if done := ctx.Done(); done != nil { - finished := make(chan struct{}) + finished := make(chan struct{}, 1) go func() { select { case <-done: + select { + case finished <- struct{}{}: + default: + // We raced with the finish func, let the next query handle this with the + // context. + return + } + + // Set the connection state to bad so it does not get reused. + cn.err.set(ctx.Err()) + // At this point the function level context is canceled, // so it must not be used for the additional network // request to cancel the query. // Create a new context to pass into the dial. - ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) defer cancel() _ = cn.cancel(ctxCancel) - finished <- struct{}{} case <-finished: } }() return func() { select { case <-finished: + cn.err.set(ctx.Err()) + cn.Close() case finished <- struct{}{}: } } @@ -116,7 +140,16 @@ func (cn *conn) watchCancel(ctx context.Context) func() { } func (cn *conn) cancel(ctx context.Context) error { - c, err := dial(ctx, cn.dialer, cn.opts) + // Create a new values map (copy). This makes sure the connection created + // in this method cannot write to the same underlying data, which could + // cause a concurrent map write panic. This is necessary because cancel + // is called from a goroutine in watchCancel. + o := make(values) + for k, v := range cn.opts { + o[k] = v + } + + c, err := dial(ctx, cn.dialer, o) if err != nil { return err } @@ -126,7 +159,7 @@ func (cn *conn) cancel(ctx context.Context) error { can := conn{ c: c, } - err = can.ssl(cn.opts) + err = can.ssl(o) if err != nil { return err } @@ -147,3 +180,68 @@ func (cn *conn) cancel(ctx context.Context) error { return err } } + +// Implement the "StmtQueryContext" interface +func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := st.watchCancel(ctx) + r, err := st.query(list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "StmtExecContext" interface +func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := st.watchCancel(ctx); finish != nil { + defer finish() + } + + return st.Exec(list) +} + +// watchCancel is implemented on stmt in order to not mark the parent conn as bad +func (st *stmt) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) + defer cancel() + + _ = st.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (st *stmt) cancel(ctx context.Context) error { + return st.cn.cancel(ctx) +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go index 2f8ced6737d..1145e12257a 100644 --- a/vendor/github.com/lib/pq/connector.go +++ b/vendor/github.com/lib/pq/connector.go @@ -27,7 +27,12 @@ func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { return c.open(ctx) } -// Driver returnst the underlying driver of this Connector. +// Dialer allows change the dialer used to open connections. +func (c *Connector) Dialer(dialer Dialer) { + c.dialer = dialer +} + +// Driver returns the underlying driver of this Connector. func (c *Connector) Driver() driver.Driver { return &Driver{} } @@ -106,5 +111,10 @@ func NewConnector(dsn string) (*Connector, error) { o["user"] = u } + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + return &Connector{opts: o, dialer: defaultDialer{}}, nil } diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go index 55378d5b119..2f5c1ec8a6c 100644 --- a/vendor/github.com/lib/pq/copy.go +++ b/vendor/github.com/lib/pq/copy.go @@ -1,6 +1,7 @@ package pq import ( + "context" "database/sql/driver" "encoding/binary" "errors" @@ -52,8 +53,11 @@ type copyin struct { closed bool - sync.Mutex // guards err - err error + mu struct { + sync.Mutex + err error + driver.Result + } } const ciBufferSize = 64 * 1024 @@ -97,13 +101,13 @@ awaitCopyInResponse: err = parseError(r) case 'Z': if err == nil { - ci.setBad() + ci.setBad(driver.ErrBadConn) errorf("unexpected ReadyForQuery in response to COPY") } cn.processReadyForQuery(r) return nil, err default: - ci.setBad() + ci.setBad(driver.ErrBadConn) errorf("unknown response for copy query: %q", t) } } @@ -122,7 +126,7 @@ awaitCopyInResponse: cn.processReadyForQuery(r) return nil, err default: - ci.setBad() + ci.setBad(driver.ErrBadConn) errorf("unknown response for CopyFail: %q", t) } } @@ -143,7 +147,7 @@ func (ci *copyin) resploop() { var r readBuf t, err := ci.cn.recvMessage(&r) if err != nil { - ci.setBad() + ci.setBad(driver.ErrBadConn) ci.setError(err) ci.done <- true return @@ -151,8 +155,12 @@ func (ci *copyin) resploop() { switch t { case 'C': // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) case 'N': - // NoticeResponse + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } case 'Z': ci.cn.processReadyForQuery(&r) ci.done <- true @@ -161,7 +169,7 @@ func (ci *copyin) resploop() { err := parseError(&r) ci.setError(err) default: - ci.setBad() + ci.setBad(driver.ErrBadConn) ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) ci.done <- true return @@ -169,34 +177,45 @@ func (ci *copyin) resploop() { } } -func (ci *copyin) setBad() { - ci.Lock() - ci.cn.bad = true - ci.Unlock() +func (ci *copyin) setBad(err error) { + ci.cn.err.set(err) } -func (ci *copyin) isBad() bool { - ci.Lock() - b := ci.cn.bad - ci.Unlock() - return b +func (ci *copyin) getBad() error { + return ci.cn.err.get() } -func (ci *copyin) isErrorSet() bool { - ci.Lock() - isSet := (ci.err != nil) - ci.Unlock() - return isSet +func (ci *copyin) err() error { + ci.mu.Lock() + err := ci.mu.err + ci.mu.Unlock() + return err } // setError() sets ci.err if one has not been set already. Caller must not be // holding ci.Mutex. func (ci *copyin) setError(err error) { - ci.Lock() - if ci.err == nil { - ci.err = err + ci.mu.Lock() + if ci.mu.err == nil { + ci.mu.err = err } - ci.Unlock() + ci.mu.Unlock() +} + +func (ci *copyin) setResult(result driver.Result) { + ci.mu.Lock() + ci.mu.Result = result + ci.mu.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.mu.Lock() + result := ci.mu.Result + ci.mu.Unlock() + if result == nil { + return driver.RowsAffected(0) + } + return result } func (ci *copyin) NumInput() int { @@ -219,17 +238,21 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { return nil, errCopyInClosed } - if ci.isBad() { - return nil, driver.ErrBadConn + if err := ci.getBad(); err != nil { + return nil, err } defer ci.cn.errRecover(&err) - if ci.isErrorSet() { - return nil, ci.err + if err := ci.err(); err != nil { + return nil, err } if len(v) == 0 { - return driver.RowsAffected(0), ci.Close() + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil } numValues := len(v) @@ -251,14 +274,51 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { return driver.RowsAffected(0), nil } +// CopyData inserts a raw string into the COPY stream. The insert is +// asynchronous and CopyData can return errors from previous CopyData calls to +// the same COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if finish := ci.cn.watchCancel(ctx); finish != nil { + defer finish() + } + + if err := ci.getBad(); err != nil { + return nil, err + } + defer ci.cn.errRecover(&err) + + if err := ci.err(); err != nil { + return nil, err + } + + ci.buffer = append(ci.buffer, []byte(line)...) + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + func (ci *copyin) Close() (err error) { if ci.closed { // Don't do anything, we're already closed return nil } ci.closed = true - if ci.isBad() { - return driver.ErrBadConn + if err := ci.getBad(); err != nil { + return err } defer ci.cn.errRecover(&err) @@ -274,8 +334,7 @@ func (ci *copyin) Close() (err error) { <-ci.done ci.cn.inCopy = false - if ci.isErrorSet() { - err = ci.err + if err := ci.err(); err != nil { return err } return nil diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go index 2a60054e2e0..b57184801ba 100644 --- a/vendor/github.com/lib/pq/doc.go +++ b/vendor/github.com/lib/pq/doc.go @@ -241,5 +241,28 @@ bytes by the PostgreSQL server. You can find a complete, working example of Listener usage at https://godoc.org/github.com/lib/pq/example/listen. + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. */ package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go index 73cafb89443..bffe6096aff 100644 --- a/vendor/github.com/lib/pq/encode.go +++ b/vendor/github.com/lib/pq/encode.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math" + "regexp" "strconv" "strings" "sync" @@ -16,6 +17,8 @@ import ( "github.com/lib/pq/oid" ) +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { switch v := x.(type) { case []byte: @@ -197,15 +200,38 @@ func appendEscapedText(buf []byte, text string) []byte { func mustParse(f string, typ oid.Oid, s []byte) time.Time { str := string(s) - // check for a 30-minute-offset timezone - if (typ == oid.T_timestamptz || typ == oid.T_timetz) && - str[len(str)-3] == ':' { - f += ":00" + // Check for a minute and second offset in the timezone. + if typ == oid.T_timestamptz || typ == oid.T_timetz { + for i := 3; i <= 6; i += 3 { + if str[len(str)-i] == ':' { + f += ":00" + continue + } + break + } + } + + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } } t, err := time.Parse(f, str) if err != nil { errorf("decode: %s", err) } + if is2400Time { + t = t.Add(24 * time.Hour) + } return t } @@ -396,7 +422,7 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro if remainderIdx < len(str) && str[remainderIdx] == '.' { fracStart := remainderIdx + 1 - fracOff := strings.IndexAny(str[fracStart:], "-+ ") + fracOff := strings.IndexAny(str[fracStart:], "-+Z ") if fracOff < 0 { fracOff = len(str) - fracStart } @@ -406,7 +432,7 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro remainderIdx += fracOff + 1 } if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { - // time zone separator is always '-' or '+' (UTC is +00) + // time zone separator is always '-' or '+' or 'Z' (UTC is +00) var tzSign int switch c := str[tzStart]; c { case '-': @@ -428,7 +454,11 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro remainderIdx += 3 } tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } else if tzStart < len(str) && str[tzStart] == 'Z' { + // time zone Z separator indicates UTC is +00 + remainderIdx += 1 } + var isoYear int if isBC { @@ -533,7 +563,7 @@ func parseBytea(s []byte) (result []byte, err error) { if len(s) < 4 { return nil, fmt.Errorf("invalid bytea sequence %v", s) } - r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + r, err := strconv.ParseUint(string(s[1:4]), 8, 8) if err != nil { return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) } diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go index 3d66ba7c52e..f67c5a5fa64 100644 --- a/vendor/github.com/lib/pq/error.go +++ b/vendor/github.com/lib/pq/error.go @@ -402,6 +402,11 @@ func (err *Error) Fatal() bool { return err.Severity == Efatal } +// SQLState returns the SQLState of the error. +func (err *Error) SQLState() string { + return string(err.Code) +} + // Get implements the legacy PGError interface. New code should use the fields // of the Error struct directly. func (err *Error) Get(k byte) (v string) { @@ -444,7 +449,7 @@ func (err *Error) Get(k byte) (v string) { return "" } -func (err Error) Error() string { +func (err *Error) Error() string { return "pq: " + err.Message } @@ -484,7 +489,7 @@ func (cn *conn) errRecover(err *error) { case nil: // Do nothing case runtime.Error: - cn.bad = true + cn.err.set(driver.ErrBadConn) panic(v) case *Error: if v.Fatal() { @@ -493,23 +498,26 @@ func (cn *conn) errRecover(err *error) { *err = v } case *net.OpError: - cn.bad = true + cn.err.set(driver.ErrBadConn) *err = v + case *safeRetryError: + cn.err.set(driver.ErrBadConn) + *err = driver.ErrBadConn case error: - if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + if v == io.EOF || v.Error() == "remote error: handshake failure" { *err = driver.ErrBadConn } else { *err = v } default: - cn.bad = true + cn.err.set(driver.ErrBadConn) panic(fmt.Sprintf("unknown error: %#v", e)) } // Any time we return ErrBadConn, we need to remember it since *Tx doesn't // mark the connection bad in database/sql. if *err == driver.ErrBadConn { - cn.bad = true + cn.err.set(driver.ErrBadConn) } } diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 00000000000..408ec01f977 --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 00000000000..70ad122a7d6 --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,72 @@ +//go:build go1.10 +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go index 850bb9040c3..5c421fdb8b5 100644 --- a/vendor/github.com/lib/pq/notify.go +++ b/vendor/github.com/lib/pq/notify.go @@ -4,6 +4,8 @@ package pq // This module contains support for Postgres LISTEN/NOTIFY. import ( + "context" + "database/sql/driver" "errors" "fmt" "sync" @@ -29,6 +31,61 @@ func recvNotification(r *readBuf) *Notification { return &Notification{bePid, channel, extra} } +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + const ( connStateIdle int32 = iota connStateExpectResponse @@ -174,8 +231,12 @@ func (l *ListenerConn) listenerConnLoop() (err error) { } l.replyChan <- message{t, nil} - case 'N', 'S': + case 'S': // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } default: return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) } diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go index d9020845585..36b61ba45be 100644 --- a/vendor/github.com/lib/pq/ssl.go +++ b/vendor/github.com/lib/pq/ssl.go @@ -8,6 +8,7 @@ import ( "os" "os/user" "path/filepath" + "strings" ) // ssl generates a function to upgrade a net.Conn based on the "sslmode" and @@ -50,6 +51,16 @@ func ssl(o values) (func(net.Conn) (net.Conn, error), error) { return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) } + // Set Server Name Indication (SNI), if enabled by connection parameters. + // By default SNI is on, any value which is not starting with "1" disables + // SNI -- that is the same check vanilla libpq uses. + if sslsni := o["sslsni"]; sslsni == "" || strings.HasPrefix(sslsni, "1") { + // RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4 + // or IPv6). This check is coded already crypto.tls.hostnameInSNI, so + // just always set ServerName here and let crypto/tls do the filtering. + tlsConf.ServerName = o["host"] + } + err := sslClientCertificates(&tlsConf, o) if err != nil { return nil, err @@ -83,6 +94,16 @@ func ssl(o values) (func(net.Conn) (net.Conn, error), error) { // in the user's home directory. The configured files must exist and have // the correct permissions. func sslClientCertificates(tlsConf *tls.Config, o values) error { + sslinline := o["sslinline"] + if sslinline == "true" { + cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"])) + if err != nil { + return err + } + tlsConf.Certificates = []tls.Certificate{cert} + return nil + } + // user.Current() might fail when cross-compiling. We have to ignore the // error and continue without home directory defaults, since we wouldn't // know from where to load them. @@ -137,9 +158,17 @@ func sslCertificateAuthority(tlsConf *tls.Config, o values) error { if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { tlsConf.RootCAs = x509.NewCertPool() - cert, err := ioutil.ReadFile(sslrootcert) - if err != nil { - return err + sslinline := o["sslinline"] + + var cert []byte + if sslinline == "true" { + cert = []byte(sslrootcert) + } else { + var err error + cert, err = ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } } if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go index 3b7c3a2a319..d587f102edb 100644 --- a/vendor/github.com/lib/pq/ssl_permissions.go +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -1,8 +1,30 @@ +//go:build !windows // +build !windows package pq -import "os" +import ( + "errors" + "os" + "syscall" +) + +const ( + rootUserID = uint32(0) + + // The maximum permissions that a private key file owned by a regular user + // is allowed to have. This translates to u=rw. + maxUserOwnedKeyPermissions os.FileMode = 0600 + + // The maximum permissions that a private key file owned by root is allowed + // to have. This translates to u=rw,g=r. + maxRootOwnedKeyPermissions os.FileMode = 0640 +) + +var ( + errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less") + errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less") +) // sslKeyPermissions checks the permissions on user-supplied ssl key files. // The key file should have very little access. @@ -13,8 +35,59 @@ func sslKeyPermissions(sslkey string) error { if err != nil { return err } - if info.Mode().Perm()&0077 != 0 { - return ErrSSLKeyHasWorldPermissions + + err = hasCorrectPermissions(info) + + // return ErrSSLKeyHasWorldPermissions for backwards compatability with + // existing code. + if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions { + err = ErrSSLKeyHasWorldPermissions } - return nil + return err +} + +// hasCorrectPermissions checks the file info (and the unix-specific stat_t +// output) to verify that the permissions on the file are correct. +// +// If the file is owned by the same user the process is running as, +// the file should only have 0600 (u=rw). If the file is owned by root, +// and the group matches the group that the process is running in, the +// permissions cannot be more than 0640 (u=rw,g=r). The file should +// never have world permissions. +// +// Returns an error when the permission check fails. +func hasCorrectPermissions(info os.FileInfo) error { + // if file's permission matches 0600, allow access. + userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions) + + // regardless of if we're running as root or not, 0600 is acceptable, + // so we return if we match the regular user permission mask. + if info.Mode().Perm()&userPermissionMask == 0 { + return nil + } + + // We need to pull the Unix file information to get the file's owner. + // If we can't access it, there's some sort of operating system level error + // and we should fail rather than attempting to use faulty information. + sysInfo := info.Sys() + if sysInfo == nil { + return ErrSSLKeyUnknownOwnership + } + + unixStat, ok := sysInfo.(*syscall.Stat_t) + if !ok { + return ErrSSLKeyUnknownOwnership + } + + // if the file is owned by root, we allow 0640 (u=rw,g=r) to match what + // Postgres does. + if unixStat.Uid == rootUserID { + rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions) + if info.Mode().Perm()&rootPermissionMask != 0 { + return errSSLKeyHasUnacceptableRootPermissions + } + return nil + } + + return errSSLKeyHasUnacceptableUserPermissions } diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go index 5d2c763cebc..73663c8f157 100644 --- a/vendor/github.com/lib/pq/ssl_windows.go +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package pq diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go index f4d8a7c2062..aec6e95be8b 100644 --- a/vendor/github.com/lib/pq/url.go +++ b/vendor/github.com/lib/pq/url.go @@ -40,10 +40,10 @@ func ParseURL(url string) (string, error) { } var kvs []string - escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) accrue := func(k, v string) { if v != "" { - kvs = append(kvs, k+"="+escaper.Replace(v)) + kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") } } diff --git a/vendor/github.com/lib/pq/user_other.go b/vendor/github.com/lib/pq/user_other.go new file mode 100644 index 00000000000..3dae8f5572b --- /dev/null +++ b/vendor/github.com/lib/pq/user_other.go @@ -0,0 +1,10 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +//go:build js || android || hurd || zos +// +build js android hurd zos + +package pq + +func userCurrent() (string, error) { + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go index bf982524f93..5f2d439bc42 100644 --- a/vendor/github.com/lib/pq/user_posix.go +++ b/vendor/github.com/lib/pq/user_posix.go @@ -1,6 +1,7 @@ // Package pq is a pure Go Postgres driver for the database/sql package. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun +//go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos +// +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos package pq diff --git a/vendor/modules.txt b/vendor/modules.txt index a50b02ae51f..8a285e4e75e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -510,8 +510,8 @@ github.com/lann/builder # github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 ## explicit github.com/lann/ps -# github.com/lib/pq v1.3.0 -## explicit +# github.com/lib/pq v1.10.7 +## explicit; go 1.13 github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram From ba8b3e889a6943a797923ca0b0f672d6051b68f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Dec 2022 23:24:59 -0800 Subject: [PATCH 009/133] Bump github.com/spf13/afero from 1.6.0 to 1.9.3 (#5022) Bumps [github.com/spf13/afero](https://github.com/spf13/afero) from 1.6.0 to 1.9.3. - [Release notes](https://github.com/spf13/afero/releases) - [Commits](https://github.com/spf13/afero/compare/v1.6.0...v1.9.3) --- updated-dependencies: - dependency-name: github.com/spf13/afero dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 20 +- vendor/github.com/spf13/afero/.travis.yml | 26 -- vendor/github.com/spf13/afero/README.md | 18 +- vendor/github.com/spf13/afero/afero.go | 4 +- vendor/github.com/spf13/afero/appveyor.yml | 9 +- vendor/github.com/spf13/afero/basepath.go | 14 +- .../github.com/spf13/afero/cacheOnReadFs.go | 6 +- vendor/github.com/spf13/afero/const_bsds.go | 1 + .../github.com/spf13/afero/const_win_unix.go | 8 +- vendor/github.com/spf13/afero/httpFs.go | 2 +- .../spf13/afero/internal/common/adapters.go | 27 ++ vendor/github.com/spf13/afero/iofs.go | 38 +- vendor/github.com/spf13/afero/ioutil.go | 10 +- vendor/github.com/spf13/afero/mem/file.go | 32 +- vendor/github.com/spf13/afero/memmap.go | 7 +- vendor/github.com/spf13/afero/unionFile.go | 30 +- vendor/github.com/spf13/afero/util.go | 10 +- vendor/golang.org/x/text/runes/cond.go | 187 +++++++++ vendor/golang.org/x/text/runes/runes.go | 355 ++++++++++++++++++ vendor/modules.txt | 6 +- 21 files changed, 718 insertions(+), 94 deletions(-) delete mode 100644 vendor/github.com/spf13/afero/.travis.yml create mode 100644 vendor/github.com/spf13/afero/internal/common/adapters.go create mode 100644 vendor/golang.org/x/text/runes/cond.go create mode 100644 vendor/golang.org/x/text/runes/runes.go diff --git a/go.mod b/go.mod index 05b054a393a..d0e0c1b3026 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/prometheus/prometheus v0.39.1 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 - github.com/spf13/afero v1.6.0 + github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.0 github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d diff --git a/go.sum b/go.sum index a7fcaa7fd5e..81ec26e9e10 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7h cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,6 +16,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -56,6 +58,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= @@ -472,6 +475,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -499,6 +503,7 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -801,7 +806,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -901,8 +906,8 @@ github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1060,13 +1065,14 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1153,6 +1159,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -1272,6 +1279,7 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1279,6 +1287,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1392,6 +1401,7 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1505,7 +1515,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index e944f594745..00000000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -sudo: false -language: go -arch: - - amd64 - - ppc64e - -go: - - "1.14" - - "1.15" - - "1.16" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build -v ./... - - go test -count=1 -cover -race -v ./... - - go vet ./... - - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index fb8eaaf896f..3bafbfdfcaf 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -2,7 +2,7 @@ A FileSystem Abstraction System for Go -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Overview @@ -79,11 +79,11 @@ would. So if my application before had: ```go -os.Open('/tmp/foo') +os.Open("/tmp/foo") ``` We would replace it with: ```go -AppFs.Open('/tmp/foo') +AppFs.Open("/tmp/foo") ``` `AppFs` being the variable we defined above. @@ -259,6 +259,18 @@ system using InMemoryFile. Afero has experimental support for secure file transfer protocol (sftp). Which can be used to perform file operations over a encrypted channel. +### GCSFs + +Afero has experimental support for Google Cloud Storage (GCS). You can either set the +`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in +`NewGcsFS` to configure access to your GCS bucket. + +Some known limitations of the existing implementation: +* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. +* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. +* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. + + ## Filtering Backends ### BasePathFs diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go index 469ff7d2dcc..199480cd05e 100644 --- a/vendor/github.com/spf13/afero/afero.go +++ b/vendor/github.com/spf13/afero/afero.go @@ -103,8 +103,8 @@ type Fs interface { var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml index 5d2f34bf167..65e20e8ca3f 100644 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -1,3 +1,5 @@ +# This currently does nothing. We have moved to GitHub action, but this is kept +# until spf13 has disabled this project in AppVeyor. version: '{build}' clone_folder: C:\gopath\src\github.com\spf13\afero environment: @@ -6,10 +8,3 @@ build_script: - cmd: >- go version - go env - - go get -v github.com/spf13/afero/... - - go build -v github.com/spf13/afero/... -test_script: -- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go index 4f9832829d6..70a1d91680b 100644 --- a/vendor/github.com/spf13/afero/basepath.go +++ b/vendor/github.com/spf13/afero/basepath.go @@ -1,6 +1,7 @@ package afero import ( + "io/fs" "os" "path/filepath" "runtime" @@ -8,7 +9,10 @@ import ( "time" ) -var _ Lstater = (*BasePathFs)(nil) +var ( + _ Lstater = (*BasePathFs)(nil) + _ fs.ReadDirFile = (*BasePathFile)(nil) +) // The BasePathFs restricts all operations to a given path within an Fs. // The given file name to the operations on this Fs will be prepended with @@ -33,6 +37,14 @@ func (f *BasePathFile) Name() string { return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) } +func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { + if rdf, ok := f.File.(fs.ReadDirFile); ok { + return rdf.ReadDir(n) + + } + return readDirFile{f.File}.ReadDir(n) +} + func NewBasePathFs(source Fs, path string) Fs { return &BasePathFs{source: source, path: path} } diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go index 71471aa25c5..017d344fd53 100644 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -75,6 +75,10 @@ func (u *CacheOnReadFs) copyToLayer(name string) error { return copyToLayer(u.base, u.layer, name) } +func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { + return copyFileToLayer(u.base, u.layer, name, flag, perm) +} + func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { st, _, err := u.cacheStatus(name) if err != nil { @@ -212,7 +216,7 @@ func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, switch st { case cacheLocal, cacheHit: default: - if err := u.copyToLayer(name); err != nil { + if err := u.copyFileToLayer(name, flag, perm); err != nil { return nil, err } } diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go index 18b45824be9..eed0f225fd8 100644 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly // +build aix darwin openbsd freebsd netbsd dragonfly package afero diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go index 2b850e4ddba..004d57e2ff8 100644 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -10,12 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd -// +build !aix +//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix +// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix package afero diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go index 2b86e30d1ef..ac0de6d51f7 100644 --- a/vendor/github.com/spf13/afero/httpFs.go +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -29,7 +29,7 @@ type httpDir struct { } func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || strings.Contains(name, "\x00") { return nil, errors.New("http: invalid character in file path") } diff --git a/vendor/github.com/spf13/afero/internal/common/adapters.go b/vendor/github.com/spf13/afero/internal/common/adapters.go new file mode 100644 index 00000000000..60685caa54d --- /dev/null +++ b/vendor/github.com/spf13/afero/internal/common/adapters.go @@ -0,0 +1,27 @@ +// Copyright © 2022 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import "io/fs" + +// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry +type FileInfoDirEntry struct { + fs.FileInfo +} + +var _ fs.DirEntry = FileInfoDirEntry{} + +func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } + +func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index c80345536dd..938b9316e6b 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -1,3 +1,4 @@ +//go:build go1.16 // +build go1.16 package afero @@ -7,7 +8,10 @@ import ( "io/fs" "os" "path" + "sort" "time" + + "github.com/spf13/afero/internal/common" ) // IOFS adopts afero.Fs to stdlib io/fs.FS @@ -66,14 +70,31 @@ func (iofs IOFS) Glob(pattern string) ([]string, error) { } func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { - items, err := ReadDir(iofs.Fs, name) + f, err := iofs.Fs.Open(name) if err != nil { return nil, iofs.wrapError("readdir", name, err) } + defer f.Close() + + if rdf, ok := f.(fs.ReadDirFile); ok { + items, err := rdf.ReadDir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) + return items, nil + } + + items, err := f.Readdir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Sort(byName(items)) + ret := make([]fs.DirEntry, len(items)) for i := range items { - ret[i] = dirEntry{items[i]} + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} } return ret, nil @@ -108,17 +129,6 @@ func (IOFS) wrapError(op, path string, err error) error { } } -// dirEntry provides adapter from os.FileInfo to fs.DirEntry -type dirEntry struct { - fs.FileInfo -} - -var _ fs.DirEntry = dirEntry{} - -func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } - -func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } - // readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open type readDirFile struct { File @@ -134,7 +144,7 @@ func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { ret := make([]fs.DirEntry, len(items)) for i := range items { - ret[i] = dirEntry{items[i]} + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} } return ret, nil diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go index a403133e274..386c9cdc227 100644 --- a/vendor/github.com/spf13/afero/ioutil.go +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -141,7 +141,7 @@ func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in // TempFile to a minimum. -var rand uint32 +var randNum uint32 var randmu sync.Mutex func reseed() uint32 { @@ -150,12 +150,12 @@ func reseed() uint32 { func nextRandom() string { randmu.Lock() - r := rand + r := randNum if r == 0 { r = reseed() } r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r + randNum = r randmu.Unlock() return strconv.Itoa(int(1e9 + r%1e9))[1:] } @@ -194,7 +194,7 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue @@ -226,7 +226,7 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) { if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 5a20730c2f6..3cf4693b5a2 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -18,15 +18,20 @@ import ( "bytes" "errors" "io" + "io/fs" "os" "path/filepath" "sync" "sync/atomic" "time" + + "github.com/spf13/afero/internal/common" ) const FilePathSeparator = string(filepath.Separator) +var _ fs.ReadDirFile = &File{} + type File struct { // atomic requires 64-bit alignment for struct field access at int64 @@ -71,7 +76,7 @@ func CreateFile(name string) *FileData { } func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} + return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} } func ChangeFileName(f *FileData, newname string) { @@ -183,10 +188,23 @@ func (f *File) Readdirnames(n int) (names []string, err error) { return names, err } +// Implements fs.ReadDirFile +func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { + fi, err := f.Readdir(n) + if err != nil { + return nil, err + } + di := make([]fs.DirEntry, len(fi)) + for i, f := range fi { + di[i] = common.FileInfoDirEntry{FileInfo: f} + } + return di, nil +} + func (f *File) Read(b []byte) (n int, err error) { f.fileData.Lock() defer f.fileData.Unlock() - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if len(b) > 0 && int(f.at) == len(f.fileData.data) { @@ -214,7 +232,7 @@ func (f *File) ReadAt(b []byte, off int64) (n int, err error) { } func (f *File) Truncate(size int64) error { - if f.closed == true { + if f.closed { return ErrFileClosed } if f.readOnly { @@ -236,7 +254,7 @@ func (f *File) Truncate(size int64) error { } func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } switch whence { @@ -251,7 +269,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { } func (f *File) Write(b []byte) (n int, err error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if f.readOnly { @@ -330,8 +348,8 @@ func (s *FileInfo) Size() int64 { var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index 5c265f92b23..d06975e7127 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -142,6 +142,11 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { } m.mu.Lock() + // Dobule check that it doesn't exist. + if _, ok := m.getData()[name]; ok { + m.mu.Unlock() + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } item := mem.CreateDir(name) mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item @@ -279,7 +284,7 @@ func (m *MemMapFs) RemoveAll(path string) error { defer m.mu.RUnlock() for p := range m.getData() { - if strings.HasPrefix(p, path) { + if p == path || strings.HasPrefix(p, path+FilePathSeparator) { m.mu.RUnlock() m.mu.Lock() delete(m.getData(), p) diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 985363eea7e..333d367f48b 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -65,7 +65,7 @@ func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { if f.Layer != nil { n, err := f.Layer.ReadAt(s, o) if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + _, err = f.Base.Seek(o+int64(n), io.SeekStart) } return n, err } @@ -268,13 +268,7 @@ func (f *UnionFile) WriteString(s string) (n int, err error) { return 0, BADFD } -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - +func copyFile(base Fs, layer Fs, name string, bfh File) error { // First make sure the directory exists exists, err := Exists(layer, filepath.Dir(name)) if err != nil { @@ -315,3 +309,23 @@ func copyToLayer(base Fs, layer Fs, name string) error { } return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) } + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} + +func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { + bfh, err := base.OpenFile(name, flag, perm) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 4f253f481ed..cb7de23f269 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -25,6 +25,7 @@ import ( "strings" "unicode" + "golang.org/x/text/runes" "golang.org/x/text/transform" "golang.org/x/text/unicode/norm" ) @@ -158,16 +159,12 @@ func UnicodeSanitize(s string) string { // Transform characters with accents into plain forms. func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) result, _, _ := transform.String(t, string(s)) return result } -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { return FileContainsBytes(a.Fs, filename, subslice) } @@ -299,6 +296,9 @@ func IsEmpty(fs Fs, path string) (bool, error) { } defer f.Close() list, err := f.Readdir(-1) + if err != nil { + return false, err + } return len(list) == 0, nil } return fi.Size() == 0, nil diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go new file mode 100644 index 00000000000..df7aa02db6d --- /dev/null +++ b/vendor/golang.org/x/text/runes/cond.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runes + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. +// This is done for various reasons: +// - To retain the semantics of the Nop transformer: if input is passed to a Nop +// one would expect it to be unchanged. +// - It would be very expensive to pass a converted RuneError to a transformer: +// a transformer might need more source bytes after RuneError, meaning that +// the only way to pass it safely is to create a new buffer and manage the +// intermingling of RuneErrors and normal input. +// - Many transformers leave ill-formed UTF-8 as is, so this is not +// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a +// logical consequence of the operation (as for Map) or if it otherwise would +// pose security concerns (as for Remove). +// - An alternative would be to return an error on ill-formed UTF-8, but this +// would be inconsistent with other operations. + +// If returns a transformer that applies tIn to consecutive runes for which +// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset +// is called on tIn and tNotIn at the start of each run. A Nop transformer will +// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated +// to RuneError to determine which transformer to apply, but is passed as is to +// the respective transformer. +func If(s Set, tIn, tNotIn transform.Transformer) Transformer { + if tIn == nil && tNotIn == nil { + return Transformer{transform.Nop} + } + if tIn == nil { + tIn = transform.Nop + } + if tNotIn == nil { + tNotIn = transform.Nop + } + sIn, ok := tIn.(transform.SpanningTransformer) + if !ok { + sIn = dummySpan{tIn} + } + sNotIn, ok := tNotIn.(transform.SpanningTransformer) + if !ok { + sNotIn = dummySpan{tNotIn} + } + + a := &cond{ + tIn: sIn, + tNotIn: sNotIn, + f: s.Contains, + } + a.Reset() + return Transformer{a} +} + +type dummySpan struct{ transform.Transformer } + +func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { + return 0, transform.ErrEndOfSpan +} + +type cond struct { + tIn, tNotIn transform.SpanningTransformer + f func(rune) bool + check func(rune) bool // current check to perform + t transform.SpanningTransformer // current transformer to use +} + +// Reset implements transform.Transformer. +func (t *cond) Reset() { + t.check = t.is + t.t = t.tIn + t.t.Reset() // notIn will be reset on first usage. +} + +func (t *cond) is(r rune) bool { + if t.f(r) { + return true + } + t.check = t.isNot + t.t = t.tNotIn + t.tNotIn.Reset() + return false +} + +func (t *cond) isNot(r rune) bool { + if !t.f(r) { + return true + } + t.check = t.is + t.t = t.tIn + t.tIn.Reset() + return false +} + +// This implementation of Span doesn't help all too much, but it needs to be +// there to satisfy this package's Transformer interface. +// TODO: there are certainly room for improvements, though. For example, if +// t.t == transform.Nop (which will a common occurrence) it will save a bundle +// to special-case that loop. +func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { + p := 0 + for n < len(src) && err == nil { + // Don't process too much at a time as the Spanner that will be + // called on this block may terminate early. + const maxChunk = 4096 + max := len(src) + if v := n + maxChunk; v < max { + max = v + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) + n += n2 + if err2 != nil { + return n, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = n + size + } + return n, err +} + +func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + p := 0 + for nSrc < len(src) && err == nil { + // Don't process too much at a time, as the work might be wasted if the + // destination buffer isn't large enough to hold the result or a + // transform returns an error early. + const maxChunk = 4096 + max := len(src) + if n := nSrc + maxChunk; n < len(src) { + max = n + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) + nDst += nDst2 + nSrc += nSrc2 + if err2 != nil { + return nDst, nSrc, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = nSrc + size + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go new file mode 100644 index 00000000000..930e87fedb0 --- /dev/null +++ b/vendor/golang.org/x/text/runes/runes.go @@ -0,0 +1,355 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package runes provide transforms for UTF-8 encoded text. +package runes // import "golang.org/x/text/runes" + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Set is a collection of runes. +type Set interface { + // Contains returns true if r is contained in the set. + Contains(r rune) bool +} + +type setFunc func(rune) bool + +func (s setFunc) Contains(r rune) bool { + return s(r) +} + +// Note: using funcs here instead of wrapping types result in cleaner +// documentation and a smaller API. + +// In creates a Set with a Contains method that returns true for all runes in +// the given RangeTable. +func In(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) +} + +// NotIn creates a Set with a Contains method that returns true for all runes not +// in the given RangeTable. +func NotIn(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) +} + +// Predicate creates a Set with a Contains method that returns f(r). +func Predicate(f func(rune) bool) Set { + return setFunc(f) +} + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { + return t.t.Span(b, atEOF) +} + +func (t Transformer) Reset() { t.t.Reset() } + +// Bytes returns a new byte slice with the result of converting b using t. It +// calls Reset on t. It returns nil if any error was found. This can only happen +// if an error-producing Transformer is passed to If. +func (t Transformer) Bytes(b []byte) []byte { + b, _, err := transform.Bytes(t, b) + if err != nil { + return nil + } + return b +} + +// String returns a string with the result of converting s using t. It calls +// Reset on t. It returns the empty string if any error was found. This can only +// happen if an error-producing Transformer is passed to If. +func (t Transformer) String(s string) string { + s, _, err := transform.String(t, s) + if err != nil { + return "" + } + return s +} + +// TODO: +// - Copy: copying strings and bytes in whole-rune units. +// - Validation (maybe) +// - Well-formed-ness (maybe) + +const runeErrorString = string(utf8.RuneError) + +// Remove returns a Transformer that removes runes r for which s.Contains(r). +// Illegal input bytes are replaced by RuneError before being passed to f. +func Remove(s Set) Transformer { + if f, ok := s.(setFunc); ok { + // This little trick cuts the running time of BenchmarkRemove for sets + // created by Predicate roughly in half. + // TODO: special-case RangeTables as well. + return Transformer{remove(f)} + } + return Transformer{remove(s.Contains)} +} + +// TODO: remove transform.RemoveFunc. + +type remove func(r rune) bool + +func (remove) Reset() {} + +// Span implements transform.Spanner. +func (t remove) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) { + err = transform.ErrEndOfSpan + break + } + n += size + } + return +} + +// Transform implements transform.Transformer. +func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(utf8.RuneError) { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + } + nSrc++ + continue + } + if t(r) { + nSrc += size + continue + } + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + } + return +} + +// Map returns a Transformer that maps the runes in the input using the given +// mapping. Illegal bytes in the input are converted to utf8.RuneError before +// being passed to the mapping func. +func Map(mapping func(rune) rune) Transformer { + return Transformer{mapper(mapping)} +} + +type mapper func(rune) rune + +func (mapper) Reset() {} + +// Span implements transform.Spanner. +func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); n += size { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) != r { + err = transform.ErrEndOfSpan + break + } + } + return n, err +} + +// Transform implements transform.Transformer. +func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var replacement rune + var b [utf8.UTFMax]byte + + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + if replacement = t(r); replacement < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(replacement) + nDst++ + nSrc++ + continue + } + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + if replacement = t(utf8.RuneError); replacement == utf8.RuneError { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + continue + } + } else if replacement = t(r); replacement == r { + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + continue + } + + n := utf8.EncodeRune(b[:], replacement) + + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < n; i++ { + dst[nDst] = b[i] + nDst++ + } + nSrc += size + } + return +} + +// ReplaceIllFormed returns a transformer that replaces all input bytes that are +// not part of a well-formed UTF-8 code sequence with utf8.RuneError. +func ReplaceIllFormed() Transformer { + return Transformer{&replaceIllFormed{}} +} + +type replaceIllFormed struct{ transform.NopResetter } + +func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // ASCII fast path. + if src[n] < utf8.RuneSelf { + n++ + continue + } + + r, size := utf8.DecodeRune(src[n:]) + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + n += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + err = transform.ErrEndOfSpan + break + } + return n, err +} + +func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // ASCII fast path. + if r := src[nSrc]; r < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = r + nDst++ + nSrc++ + continue + } + + // Look for a valid non-ASCII rune. + if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + err = transform.ErrShortDst + break + } + nDst += size + nSrc += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + } + return nDst, nSrc, err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8a285e4e75e..65d66ef495b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -753,9 +753,10 @@ github.com/sirupsen/logrus # github.com/sony/gobreaker v0.5.0 ## explicit; go 1.12 github.com/sony/gobreaker -# github.com/spf13/afero v1.6.0 -## explicit; go 1.13 +# github.com/spf13/afero v1.9.3 +## explicit; go 1.16 github.com/spf13/afero +github.com/spf13/afero/internal/common github.com/spf13/afero/mem # github.com/stretchr/objx v0.4.0 ## explicit; go 1.12 @@ -1068,6 +1069,7 @@ golang.org/x/sys/windows golang.org/x/sys/windows/registry # golang.org/x/text v0.4.0 ## explicit; go 1.17 +golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi From 30bee471f1d69c93360b84fed0a32f3df3a60f10 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 11 Dec 2022 07:54:47 +0000 Subject: [PATCH 010/133] Bump github.com/stretchr/testify from 1.8.0 to 1.8.1 (#5020) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.0...v1.8.1) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 4 ++-- go.sum | 6 ++++-- vendor/github.com/stretchr/objx/Taskfile.yml | 2 +- vendor/modules.txt | 4 ++-- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index d0e0c1b3026..153d7492e3d 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.1 github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d github.com/uber/jaeger-client-go v2.30.0+incompatible @@ -180,7 +180,7 @@ require ( github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/sirupsen/logrus v1.9.0 // indirect - github.com/stretchr/objx v0.4.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a // indirect github.com/weaveworks/promrus v1.2.0 // indirect diff --git a/go.sum b/go.sum index 81ec26e9e10..391d36cd172 100644 --- a/go.sum +++ b/go.sum @@ -918,16 +918,18 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index a749ac5492e..7746f516da2 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -25,6 +25,6 @@ tasks: - go test -race ./... test-coverage: - desc: Runs go tests and calucates test coverage + desc: Runs go tests and calculates test coverage cmds: - go test -race -coverprofile=c.out ./... diff --git a/vendor/modules.txt b/vendor/modules.txt index 65d66ef495b..2e50d08d4dc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -758,10 +758,10 @@ github.com/sony/gobreaker github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/stretchr/objx v0.4.0 +# github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.8.0 +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/mock From 35078ff76268e15f0196f51bc9d6b7dadd72d332 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 11 Dec 2022 02:42:08 -0800 Subject: [PATCH 011/133] Bump github.com/mitchellh/go-wordwrap from 1.0.0 to 1.0.1 (#5019) Bumps [github.com/mitchellh/go-wordwrap](https://github.com/mitchellh/go-wordwrap) from 1.0.0 to 1.0.1. - [Release notes](https://github.com/mitchellh/go-wordwrap/releases) - [Commits](https://github.com/mitchellh/go-wordwrap/compare/v1.0.0...v1.0.1) --- updated-dependencies: - dependency-name: github.com/mitchellh/go-wordwrap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 3 ++- .../mitchellh/go-wordwrap/wordwrap.go | 26 +++++++++++++------ vendor/modules.txt | 4 +-- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 153d7492e3d..80ded6973a3 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/lib/pq v1.10.7 github.com/minio/minio-go/v7 v7.0.37 - github.com/mitchellh/go-wordwrap v1.0.0 + github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing-contrib/go-stdlib v1.0.0 diff --git a/go.sum b/go.sum index 391d36cd172..88ab24d6fd1 100644 --- a/go.sum +++ b/go.sum @@ -716,8 +716,9 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go index ac67205bc2e..f7bedda3882 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -5,6 +5,8 @@ import ( "unicode" ) +const nbsp = 0xA0 + // WrapString wraps the given string within lim width in characters. // // Wrapping is currently naive and only happens at white-space. A future @@ -18,50 +20,58 @@ func WrapString(s string, lim uint) string { var current uint var wordBuf, spaceBuf bytes.Buffer + var wordBufLen, spaceBufLen uint for _, char := range s { if char == '\n' { if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) > lim { + if current+spaceBufLen > lim { current = 0 } else { - current += uint(spaceBuf.Len()) + current += spaceBufLen spaceBuf.WriteTo(buf) } spaceBuf.Reset() + spaceBufLen = 0 } else { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } buf.WriteRune(char) current = 0 - } else if unicode.IsSpace(char) { + } else if unicode.IsSpace(char) && char != nbsp { if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } spaceBuf.WriteRune(char) + spaceBufLen++ } else { - wordBuf.WriteRune(char) + wordBufLen++ - if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + if current+wordBufLen+spaceBufLen > lim && wordBufLen < lim { buf.WriteRune('\n') current = 0 spaceBuf.Reset() + spaceBufLen = 0 } } } if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) <= lim { + if current+spaceBufLen <= lim { spaceBuf.WriteTo(buf) } } else { diff --git a/vendor/modules.txt b/vendor/modules.txt index 2e50d08d4dc..0cddf14b592 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -554,8 +554,8 @@ github.com/minio/sha256-simd # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/go-wordwrap v1.0.0 -## explicit +# github.com/mitchellh/go-wordwrap v1.0.1 +## explicit; go 1.14 github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 From 9c5445cd8f4aa07d96d2e3932e2dc87b56fd9569 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 10:09:20 -0800 Subject: [PATCH 012/133] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc (#5036) Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) from 1.11.0 to 1.11.2. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.0...v1.11.2) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 16 ++-- go.sum | 28 +++---- .../cenkalti/backoff/v4/.travis.yml | 10 --- .../github.com/cenkalti/backoff/v4/retry.go | 50 ++++++++++-- vendor/go.opentelemetry.io/otel/.golangci.yml | 6 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 63 ++++++++++++++- vendor/go.opentelemetry.io/otel/Makefile | 2 +- .../otel/attribute/value.go | 44 ++++++++-- .../go.opentelemetry.io/otel/codes/codes.go | 10 +++ .../otlp/internal/envconfig/envconfig.go | 81 +++++++++++++++---- .../exporters/otlp/internal/partialsuccess.go | 34 ++++---- .../otel/exporters/otlp/otlptrace/README.md | 4 +- .../internal/otlpconfig/envconfig.go | 27 ++++++- .../otlp/otlptrace/otlptracegrpc/client.go | 11 +-- vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 4 +- vendor/modules.txt | 18 ++--- 18 files changed, 302 insertions(+), 110 deletions(-) delete mode 100644 vendor/github.com/cenkalti/backoff/v4/.travis.yml diff --git a/go.mod b/go.mod index 80ded6973a3..0eca110113b 100644 --- a/go.mod +++ b/go.mod @@ -55,17 +55,17 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.6 go.etcd.io/etcd/client/v3 v3.5.4 go.opentelemetry.io/contrib/propagators/aws v1.11.1 - go.opentelemetry.io/otel v1.11.1 + go.opentelemetry.io/otel v1.11.2 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 - go.opentelemetry.io/otel/sdk v1.11.1 - go.opentelemetry.io/otel/trace v1.11.1 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 + go.opentelemetry.io/otel/sdk v1.11.2 + go.opentelemetry.io/otel/trace v1.11.2 go.uber.org/atomic v1.10.0 golang.org/x/net v0.2.0 golang.org/x/sync v0.0.0-20220907140024-f12130a52804 golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 - google.golang.org/grpc v1.50.1 + google.golang.org/grpc v1.51.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/yaml v1.3.0 @@ -99,7 +99,7 @@ require ( github.com/aws/smithy-go v1.11.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cenkalti/backoff/v4 v4.2.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect @@ -192,7 +192,7 @@ require ( go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect go.opentelemetry.io/otel/metric v0.32.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/goleak v1.2.0 // indirect diff --git a/go.sum b/go.sum index 88ab24d6fd1..3ff20add840 100644 --- a/go.sum +++ b/go.sum @@ -176,8 +176,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= +github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -1017,22 +1017,22 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 h1:edJTgwezAtLKUINAXfjxllJ go.opentelemetry.io/contrib/propagators/jaeger v1.9.0/go.mod h1:Q/AXutvrBTfEDSeRLwOmKhyviX5adJvTesg6JFTybYg= go.opentelemetry.io/contrib/propagators/ot v1.9.0 h1:+pYoqyFoA3H6EZ7Wie2ZQdqS4ZfG42PAGvj3eLUukHE= go.opentelemetry.io/contrib/propagators/ot v1.9.0/go.mod h1:D2GfaecHHX67fXT93/5iKl2DArjt8+H0XWtFD8b4Z+k= -go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4= -go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE= +go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= +go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 h1:jrqVQNRKglMRBUrSf40J5mBYnUyleAKL+g0iuyEyaGA= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1/go.mod h1:zZWeMK8RgtSJddl2Oox9MfjMRgD/HqzuLujCFdF0CZU= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 h1:X2GndnMCsUPh6CiY2a+frAbNsXaPLbB0soHRYhAZ5Ig= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1/go.mod h1:i8vjiSzbiUC7wOQplijSXMYUpNM93DtlS5CbUT+C6oQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 h1:MEQNafcNCB0uQIti/oHgU7CZpUMYQ7qigBwMVKycHvc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1/go.mod h1:19O5I2U5iys38SsmT2uDJja/300woyzE1KPIQxEUBUc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 h1:j2RFV0Qdt38XQ2Jvi4WIsQ56w8T7eSirYbMw19VXRDg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0/go.mod h1:pILgiTEtrqvZpoiuGdblDgS5dbIaTgDrkIuKfEFkt+A= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= go.opentelemetry.io/otel/metric v0.32.0 h1:lh5KMDB8xlMM4kwE38vlZJ3rZeiWrjw3As1vclfC01k= go.opentelemetry.io/otel/metric v0.32.0/go.mod h1:PVDNTt297p8ehm949jsIzd+Z2bIZJYQQG/uuHTeWFHY= -go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs= -go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys= -go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ= -go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk= +go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= +go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= +go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= +go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml deleted file mode 100644 index c79105c2fbe..00000000000 --- a/vendor/github.com/cenkalti/backoff/v4/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.13 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go index 1ce2507ebc8..b9c0c51cd75 100644 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -5,10 +5,20 @@ import ( "time" ) +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + // An Operation is executing by Retry() or RetryNotify(). // The operation will be retried using a backoff policy if it returns an error. type Operation func() error +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + // Notify is a notify-on-error function. It receives an operation error and // backoff delay if the operation failed (with an error). // @@ -28,18 +38,41 @@ func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + // RetryNotify calls notify function with the error and wait duration // for each failed attempt before sleep. func RetryNotify(operation Operation, b BackOff, notify Notify) error { return RetryNotifyWithTimer(operation, b, notify, nil) } +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + // RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer // for each failed attempt before sleep. // A default timer that uses system timer is used when nil is passed. func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - var err error - var next time.Duration + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) if t == nil { t = &defaultTimer{} } @@ -52,21 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer b.Reset() for { - if err = operation(); err == nil { - return nil + res, err = operation() + if err == nil { + return res, nil } var permanent *PermanentError if errors.As(err, &permanent) { - return permanent.Err + return res, permanent.Err } if next = b.NextBackOff(); next == Stop { if cerr := ctx.Err(); cerr != nil { - return cerr + return res, cerr } - return err + return res, err } if notify != nil { @@ -77,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer select { case <-ctx.Done(): - return ctx.Err() + return res, ctx.Err() case <-t.C(): } } diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 253e3b35b52..0f099f57595 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,7 +9,6 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: - - deadcode - depguard - errcheck - godot @@ -21,10 +20,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: # Maximum issues count per one linter. @@ -114,8 +111,9 @@ linters-settings: - name: constant-logical-expr disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - name: context-as-argument - disabled: false + disabled: true arguments: allowTypesBefore: "*testing.T" # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index faae85f292b..9f130b8be10 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,66 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Reenabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +## Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + ## [1.11.1/0.33.0] 2022-10-19 ### Added @@ -2027,7 +2087,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.1...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...HEAD +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 [1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 [1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 [0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 07e31965c30..68cdfef7d96 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools ALL_DOCS := $(shell find . -name '*.md' -type f | sort) ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) GO = go TIMEOUT = 60 diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 80a37bd6ff3..34a4e548dd1 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -142,6 +142,13 @@ func (v Value) AsBool() bool { // AsBoolSlice returns the []bool value. Make sure that the Value's type is // BOOLSLICE. func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { return attribute.AsSlice[bool](v.slice) } @@ -154,6 +161,13 @@ func (v Value) AsInt64() int64 { // AsInt64Slice returns the []int64 value. Make sure that the Value's type is // INT64SLICE. func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { return attribute.AsSlice[int64](v.slice) } @@ -166,6 +180,13 @@ func (v Value) AsFloat64() float64 { // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // FLOAT64SLICE. func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { return attribute.AsSlice[float64](v.slice) } @@ -178,6 +199,13 @@ func (v Value) AsString() string { // AsStringSlice returns the []string value. Make sure that the Value's type is // STRINGSLICE. func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { return attribute.AsSlice[string](v.slice) } @@ -189,19 +217,19 @@ func (v Value) AsInterface() interface{} { case BOOL: return v.AsBool() case BOOLSLICE: - return v.AsBoolSlice() + return v.asBoolSlice() case INT64: return v.AsInt64() case INT64SLICE: - return v.AsInt64Slice() + return v.asInt64Slice() case FLOAT64: return v.AsFloat64() case FLOAT64SLICE: - return v.AsFloat64Slice() + return v.asFloat64Slice() case STRING: return v.stringly case STRINGSLICE: - return v.AsStringSlice() + return v.asStringSlice() } return unknownValueType{} } @@ -210,19 +238,19 @@ func (v Value) AsInterface() interface{} { func (v Value) Emit() string { switch v.Type() { case BOOLSLICE: - return fmt.Sprint(v.AsBoolSlice()) + return fmt.Sprint(v.asBoolSlice()) case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.AsInt64Slice()) + return fmt.Sprint(v.asInt64Slice()) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.AsFloat64Slice()) + return fmt.Sprint(v.asFloat64Slice()) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.AsStringSlice()) + return fmt.Sprint(v.asStringSlice()) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 064a9279fd1..587ebae4e30 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -23,10 +23,20 @@ import ( const ( // Unset is the default status code. Unset Code = 0 + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. Error Code = 1 + // Ok indicates operation has been validated by an Application developers // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. Ok Code = 2 maxCode = 3 diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go index 67003c4a2fa..53ff3126b6e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go @@ -23,6 +23,8 @@ import ( "strconv" "strings" "time" + + "go.opentelemetry.io/otel/internal/global" ) // ConfigFn is the generic function used to set a config. @@ -59,13 +61,26 @@ func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { } } +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { - if d, err := strconv.Atoi(v); err == nil { - fn(time.Duration(d) * time.Millisecond) + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return } + fn(time.Duration(d) * time.Millisecond) } } } @@ -83,23 +98,59 @@ func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { - if u, err := url.Parse(v); err == nil { - fn(u) + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return } + fn(u) } } } -// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config. -func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) { +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { - if b, err := e.ReadFile(v); err == nil { - if c, err := createTLSConfig(b); err == nil { - fn(c) - } + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) } } @@ -117,15 +168,18 @@ func stringToHeader(value string) map[string]string { for _, header := range headersPairs { nameValue := strings.SplitN(header, "=", 2) if len(nameValue) < 2 { + global.Error(errors.New("missing '="), "parse headers", "input", nameValue) continue } name, err := url.QueryUnescape(nameValue[0]) if err != nil { + global.Error(err, "escape header key", "key", nameValue[0]) continue } trimmedName := strings.TrimSpace(name) value, err := url.QueryUnescape(nameValue[1]) if err != nil { + global.Error(err, "escape header value", "value", nameValue[1]) continue } trimmedValue := strings.TrimSpace(value) @@ -136,13 +190,10 @@ func stringToHeader(value string) map[string]string { return headers } -func createTLSConfig(certBytes []byte) (*tls.Config, error) { +func createCertPool(certBytes []byte) (*x509.CertPool, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } - - return &tls.Config{ - RootCAs: cp, - }, nil + return cp, nil } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go index 7994706ab51..9ab89b37574 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go @@ -16,19 +16,6 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" import "fmt" -// PartialSuccessDropKind indicates the kind of partial success error -// received by an OTLP exporter, which corresponds with the signal -// being exported. -type PartialSuccessDropKind string - -const ( - // TracingPartialSuccess indicates that some spans were rejected. - TracingPartialSuccess PartialSuccessDropKind = "spans" - - // MetricsPartialSuccess indicates that some metric data points were rejected. - MetricsPartialSuccess PartialSuccessDropKind = "metric data points" -) - // PartialSuccess represents the underlying error for all handling // OTLP partial success messages. Use `errors.Is(err, // PartialSuccess{})` to test whether an error passed to the OTel @@ -36,7 +23,7 @@ const ( type PartialSuccess struct { ErrorMessage string RejectedItems int64 - RejectedKind PartialSuccessDropKind + RejectedKind string } var _ error = PartialSuccess{} @@ -56,13 +43,22 @@ func (ps PartialSuccess) Is(err error) bool { return ok } -// PartialSuccessToError produces an error suitable for passing to -// `otel.Handle()` out of the fields in a partial success response, -// independent of which signal produced the outcome. -func PartialSuccessToError(kind PartialSuccessDropKind, itemsRejected int64, errorMessage string) error { +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, - RejectedKind: kind, + RejectedKind: "metric data points", } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md index ca91fd4f489..6e9cc0366ba 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -12,8 +12,8 @@ go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace ## Examples -- [Exporter setup and examples](./otlptracehttp/example_test.go) -- [Full example sending telemetry to a local collector](../../../example/otel-collector) +- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go) +- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector) ## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go index b29f618e3de..62c5029db2a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go @@ -16,6 +16,7 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ import ( "crypto/tls" + "crypto/x509" "net/url" "os" "path" @@ -53,6 +54,7 @@ func ApplyHTTPEnvConfigs(cfg Config) Config { func getOptionsFromEnv() []GenericOption { opts := []GenericOption{} + tlsConf := &tls.Config{} DefaultEnvOptionsReader.Apply( envconfig.WithURL("ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) @@ -81,8 +83,13 @@ func getOptionsFromEnv() []GenericOption { return cfg }, withEndpointForGRPC(u))) }), - envconfig.WithTLSConfig("CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), - envconfig.WithTLSConfig("TRACES_CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), @@ -125,3 +132,19 @@ func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOpt } } } + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 9d6e1898b14..fe23f8e3766 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -202,11 +202,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc ResourceSpans: protoSpans, }) if resp != nil && resp.PartialSuccess != nil { - otel.Handle(internal.PartialSuccessToError( - internal.TracingPartialSuccess, - resp.PartialSuccess.RejectedSpans, - resp.PartialSuccess.ErrorMessage, - )) + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } } // nil is converted to OK. if status.Code(err) == codes.OK { diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 391417718f5..ab0346f9664 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the OpenTelemetry API. To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. It its simplest form: +operation being performed as part of a traced workflow. In its simplest form: var tracer trace.Tracer diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 942e484f848..00b79bcc25c 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.11.1" + return "1.11.2" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index a2905787a55..611879def4f 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,7 +14,7 @@ module-sets: stable-v1: - version: v1.11.1 + version: v1.11.2 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing @@ -34,7 +34,7 @@ module-sets: - go.opentelemetry.io/otel/trace - go.opentelemetry.io/otel/sdk experimental-metrics: - version: v0.33.0 + version: v0.34.0 modules: - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus diff --git a/vendor/modules.txt b/vendor/modules.txt index 0cddf14b592..fce90f0fb24 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -226,8 +226,8 @@ github.com/blang/semver/v4 # github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab ## explicit github.com/bradfitz/gomemcache/memcache -# github.com/cenkalti/backoff/v4 v4.1.3 -## explicit; go 1.13 +# github.com/cenkalti/backoff/v4 v4.2.0 +## explicit; go 1.18 github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash v1.1.0 ## explicit @@ -941,7 +941,7 @@ go.opentelemetry.io/contrib/propagators/jaeger # go.opentelemetry.io/contrib/propagators/ot v1.9.0 ## explicit; go 1.17 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.11.1 +# go.opentelemetry.io/otel v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -960,15 +960,15 @@ go.opentelemetry.io/otel/semconv/v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc # go.opentelemetry.io/otel/metric v0.32.0 @@ -982,7 +982,7 @@ go.opentelemetry.io/otel/metric/instrument/syncfloat64 go.opentelemetry.io/otel/metric/instrument/syncint64 go.opentelemetry.io/otel/metric/internal/global go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/sdk v1.11.1 +# go.opentelemetry.io/otel/sdk v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal @@ -990,7 +990,7 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.11.1 +# go.opentelemetry.io/otel/trace v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/trace # go.opentelemetry.io/proto/otlp v0.19.0 @@ -1140,7 +1140,7 @@ google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.50.1 => google.golang.org/grpc v1.45.0 +# google.golang.org/grpc v1.51.0 => google.golang.org/grpc v1.45.0 ## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes From 38a0b43b480b8e785b840eeb42851c6811dfb135 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 11:47:23 -0800 Subject: [PATCH 013/133] Bump github.com/alicebob/miniredis/v2 from 2.22.0 to 2.23.1 (#5032) Bumps [github.com/alicebob/miniredis/v2](https://github.com/alicebob/miniredis) from 2.22.0 to 2.23.1. - [Release notes](https://github.com/alicebob/miniredis/releases) - [Changelog](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md) - [Commits](https://github.com/alicebob/miniredis/compare/v2.22.0...v2.23.1) --- updated-dependencies: - dependency-name: github.com/alicebob/miniredis/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 4 +- go.sum | 8 +- .../alicebob/miniredis/v2/.gitignore | 2 + .../alicebob/miniredis/v2/CHANGELOG.md | 16 + .../alicebob/miniredis/v2/README.md | 12 +- .../alicebob/miniredis/v2/cmd_cluster.go | 9 +- .../alicebob/miniredis/v2/cmd_command.go | 2039 ++++++++++++++++- .../alicebob/miniredis/v2/cmd_connection.go | 81 +- .../alicebob/miniredis/v2/cmd_generic.go | 108 +- .../alicebob/miniredis/v2/cmd_geo.go | 178 +- .../alicebob/miniredis/v2/cmd_hash.go | 124 +- .../alicebob/miniredis/v2/cmd_hll.go | 2 +- .../alicebob/miniredis/v2/cmd_info.go | 40 + .../alicebob/miniredis/v2/cmd_list.go | 363 ++- .../alicebob/miniredis/v2/cmd_server.go | 6 +- .../alicebob/miniredis/v2/cmd_set.go | 68 +- .../alicebob/miniredis/v2/cmd_sorted_set.go | 210 +- .../alicebob/miniredis/v2/cmd_stream.go | 642 +++++- .../alicebob/miniredis/v2/cmd_string.go | 213 +- .../github.com/alicebob/miniredis/v2/geo.go | 8 +- .../github.com/alicebob/miniredis/v2/lua.go | 2 + .../alicebob/miniredis/v2/miniredis.go | 26 +- .../github.com/alicebob/miniredis/v2/opts.go | 25 +- .../github.com/alicebob/miniredis/v2/redis.go | 12 +- .../alicebob/miniredis/v2/server/server.go | 33 +- .../alicebob/miniredis/v2/stream.go | 94 +- vendor/github.com/yuin/gopher-lua/README.rst | 1 + vendor/github.com/yuin/gopher-lua/_vm.go | 2 +- vendor/github.com/yuin/gopher-lua/ast/expr.go | 1 + vendor/github.com/yuin/gopher-lua/baselib.go | 13 +- vendor/github.com/yuin/gopher-lua/compile.go | 10 +- vendor/github.com/yuin/gopher-lua/config.go | 7 + vendor/github.com/yuin/gopher-lua/loadlib.go | 3 + vendor/github.com/yuin/gopher-lua/oslib.go | 37 +- .../github.com/yuin/gopher-lua/parse/Makefile | 3 + .../github.com/yuin/gopher-lua/parse/lexer.go | 14 +- .../yuin/gopher-lua/parse/parser.go | 897 +++++--- .../yuin/gopher-lua/parse/parser.go.y | 3 + vendor/github.com/yuin/gopher-lua/pm/pm.go | 2 +- vendor/github.com/yuin/gopher-lua/table.go | 2 +- vendor/github.com/yuin/gopher-lua/vm.go | 2 +- vendor/modules.txt | 6 +- 42 files changed, 4281 insertions(+), 1047 deletions(-) create mode 100644 vendor/github.com/alicebob/miniredis/v2/cmd_info.go diff --git a/go.mod b/go.mod index 0eca110113b..e1e90df5c20 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 github.com/NYTimes/gziphandler v1.1.1 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/alicebob/miniredis/v2 v2.22.0 + github.com/alicebob/miniredis/v2 v2.23.1 github.com/armon/go-metrics v0.4.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 @@ -184,7 +184,7 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a // indirect github.com/weaveworks/promrus v1.2.0 // indirect - github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect + github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect go.mongodb.org/mongo-driver v1.10.2 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 // indirect diff --git a/go.sum b/go.sum index 3ff20add840..35580801521 100644 --- a/go.sum +++ b/go.sum @@ -114,8 +114,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.22.0 h1:lIHHiSkEyS1MkKHCHzN+0mWrA4YdbGdimE5iZ2sHSzo= -github.com/alicebob/miniredis/v2 v2.22.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= +github.com/alicebob/miniredis/v2 v2.23.1 h1:jR6wZggBxwWygeXcdNyguCOCIjPsZyNUNlAkTx2fu0U= +github.com/alicebob/miniredis/v2 v2.23.1/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -975,8 +975,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= -github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= +github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= diff --git a/vendor/github.com/alicebob/miniredis/v2/.gitignore b/vendor/github.com/alicebob/miniredis/v2/.gitignore index 7ba06b06ce6..8016b4be34e 100644 --- a/vendor/github.com/alicebob/miniredis/v2/.gitignore +++ b/vendor/github.com/alicebob/miniredis/v2/.gitignore @@ -2,3 +2,5 @@ /integration/dump.rdb *.swp /integration/nodes.conf +.idea/ +miniredis.iml diff --git a/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md index 0c1fb87ebf2..b70c3df6220 100644 --- a/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md +++ b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md @@ -1,6 +1,22 @@ ## Changelog +### v2.23.1 +- resolve $ to latest ID in XREAD (thanks @josh-hook) +- handle disconnect in blocking functions (thanks @jgirtakovskis) +- fix type conversion bug in redisToLua (thanks Sandy Harvie) +- BRPOP{LPUSH} timeout can be float since 6.0 + + +### v2.23.0 + +- basic INFO support (thanks @kirill-a-belov) +- support COUNT in SSCAN (thanks @Abdi-dd) +- test and support Go 1.19 +- support LPOS (thanks @ianstarz) +- support XPENDING, XGROUP {CREATECONSUMER,DESTROY,DELCONSUMER}, XINFO {CONSUMERS,GROUPS}, XCLAIM (thanks @sandyharvie) + + ### v2.22.0 - set miniredis.DumpMaxLineLen to get more Dump() info (thanks @afjoseph) diff --git a/vendor/github.com/alicebob/miniredis/v2/README.md b/vendor/github.com/alicebob/miniredis/v2/README.md index 60072f4e0d8..cf9bde444bf 100644 --- a/vendor/github.com/alicebob/miniredis/v2/README.md +++ b/vendor/github.com/alicebob/miniredis/v2/README.md @@ -64,6 +64,8 @@ Implemented commands: - FLUSHALL - FLUSHDB - TIME -- returns time.Now() or value set by SetTime() + - COMMAND -- partly + - INFO -- partly, returns only "clients" section with one field "connected_clients" - String keys (complete) - APPEND - BITCOUNT @@ -178,9 +180,15 @@ Implemented commands: - XACK - XADD - XAUTOCLAIM + - XCLAIM - XDEL - XGROUP CREATE + - XGROUP CREATECONSUMER + - XGROUP DESTROY + - XGROUP DELCONSUMER - XINFO STREAM -- partly + - XINFO GROUPS + - XINFO CONSUMERS -- partly - XLEN - XRANGE - XREAD @@ -203,8 +211,6 @@ Implemented commands: - GEORADIUS_RO - GEORADIUSBYMEMBER - GEORADIUSBYMEMBER_RO - - Server - - COMMAND -- partly - Cluster - CLUSTER SLOTS - CLUSTER KEYSLOT @@ -302,7 +308,6 @@ Commands which will probably not be implemented: - ~~CLIENT *~~ - ~~CONFIG *~~ - ~~DEBUG *~~ - - ~~INFO~~ - ~~LASTSAVE~~ - ~~MONITOR~~ - ~~ROLE~~ @@ -325,5 +330,4 @@ If you want to test Redis Sentinel have a look at [minisentinel](https://github. A changelog is kept at [CHANGELOG.md](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md). -[![Build Status](https://travis-ci.com/alicebob/miniredis.svg?branch=master)](https://travis-ci.com/alicebob/miniredis) [![Go Reference](https://pkg.go.dev/badge/github.com/alicebob/miniredis/v2.svg)](https://pkg.go.dev/github.com/alicebob/miniredis/v2) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go b/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go index 083c4ecf7d7..9951f3dd3b7 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go @@ -4,13 +4,14 @@ package miniredis import ( "fmt" - "github.com/alicebob/miniredis/v2/server" "strings" + + "github.com/alicebob/miniredis/v2/server" ) // commandsCluster handles some cluster operations. func commandsCluster(m *Miniredis) { - _ = m.srv.Register("CLUSTER", m.cmdCluster) + m.srv.Register("CLUSTER", m.cmdCluster) } func (m *Miniredis) cmdCluster(c *server.Peer, cmd string, args []string) { @@ -51,14 +52,14 @@ func (m *Miniredis) cmdClusterSlots(c *server.Peer, cmd string, args []string) { }) } -//CLUSTER KEYSLOT +// CLUSTER KEYSLOT func (m *Miniredis) cmdClusterKeySlot(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { c.WriteInt(163) }) } -//CLUSTER NODES +// CLUSTER NODES func (m *Miniredis) cmdClusterNodes(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { c.WriteBulk("e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:7000@7000 myself,master - 0 0 1 connected 0-16383") diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_command.go b/vendor/github.com/alicebob/miniredis/v2/cmd_command.go index 59abefd382a..d82174f2f2e 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_command.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_command.go @@ -4,15 +4,2042 @@ package miniredis import "github.com/alicebob/miniredis/v2/server" -func commandsCommand(m *Miniredis) { - _ = m.srv.Register("COMMAND", m.cmdCommand) -} - func (m *Miniredis) cmdCommand(c *server.Peer, cmd string, args []string) { // Got from redis 5.0.7 with // echo 'COMMAND' | nc redis_addr redis_port // - res := "*200\r\n*6\r\n$12\r\nhincrbyfloat\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\nxreadgroup\r\n:-7\r\n*3\r\n+write\r\n+noscript\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\nsdiffstore\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$8\r\nlastsave\r\n:1\r\n*2\r\n+random\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsetnx\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nbzpopmax\r\n:-3\r\n*3\r\n+write\r\n+noscript\r\n+fast\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$12\r\npunsubscribe\r\n:-1\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nxack\r\n:-4\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\npfselftest\r\n:1\r\n*1\r\n+admin\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nsubstr\r\n:4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nsmembers\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nunsubscribe\r\n:-1\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$11\r\nzinterstore\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nstrlen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\npfmerge\r\n:-2\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$9\r\nrandomkey\r\n:1\r\n*2\r\n+readonly\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nlolwut\r\n:-1\r\n*1\r\n+readonly\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nrpop\r\n:2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nhkeys\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nclient\r\n:-2\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nmodule\r\n:-2\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nslowlog\r\n:-2\r\n*2\r\n+admin\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ngeohash\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nlrange\r\n:4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nping\r\n:-1\r\n*2\r\n+stale\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nbitcount\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\npubsub\r\n:-2\r\n*4\r\n+pubsub\r\n+random\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nrole\r\n:1\r\n*3\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nhget\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nobject\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:2\r\n:2\r\n:1\r\n*6\r\n$9\r\nzrevrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nhincrby\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nzlexcount\r\n:4\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nscard\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nappend\r\n:3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nhstrlen\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nconfig\r\n:-2\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nhset\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$16\r\nzrevrangebyscore\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nincr\r\n:2\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nsetbit\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nrpoplpush\r\n:3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:2\r\n:1\r\n*6\r\n$6\r\nxclaim\r\n:-6\r\n*3\r\n+write\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nsinterstore\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$7\r\npublish\r\n:3\r\n*4\r\n+pubsub\r\n+loading\r\n+stale\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhscan\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nmulti\r\n:1\r\n*2\r\n+noscript\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$3\r\nset\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nlpushx\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$16\r\nzremrangebyscore\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\npexpireat\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nhdel\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$12\r\nbgrewriteaof\r\n:1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nmigrate\r\n:-6\r\n*3\r\n+write\r\n+random\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$9\r\nreplicaof\r\n:3\r\n*3\r\n+admin\r\n+noscript\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\ntouch\r\n:-2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nxsetid\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nbitop\r\n:-4\r\n*2\r\n+write\r\n+denyoom\r\n:2\r\n:-1\r\n:1\r\n*6\r\n$6\r\nswapdb\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsdiff\r\n:-2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$6\r\nlindex\r\n:3\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nwait\r\n:3\r\n*1\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nlrem\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nhsetnx\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\ngetrange\r\n:4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nhlen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\npost\r\n:-1\r\n*2\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$9\r\nsismember\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nunwatch\r\n:1\r\n*2\r\n+noscript\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nlpush\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nscan\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsmove\r\n:4\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:2\r\n:1\r\n*6\r\n$7\r\ncluster\r\n:-2\r\n*1\r\n+admin\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nbgsave\r\n:-1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\ndump\r\n:2\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nlatency\r\n:-2\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nbzpopmin\r\n:-3\r\n*3\r\n+write\r\n+noscript\r\n+fast\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$6\r\ngetbit\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nhgetall\r\n:2\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nrename\r\n:3\r\n*1\r\n+write\r\n:1\r\n:2\r\n:1\r\n*6\r\n$9\r\nsubscribe\r\n:-2\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nxdel\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$15\r\nzremrangebyrank\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\ntype\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nscript\r\n:-2\r\n*1\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhmset\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nsunion\r\n:-2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$4\r\nmget\r\n:-2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$10\r\nbrpoplpush\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+noscript\r\n:1\r\n:2\r\n:1\r\n*6\r\n$6\r\ngeoadd\r\n:-5\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\ndecrby\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\necho\r\n:2\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\ndbsize\r\n:1\r\n*2\r\n+readonly\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nzcard\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nselect\r\n:2\r\n*2\r\n+loading\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nsadd\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nhost:\r\n:-1\r\n*2\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsscan\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$12\r\ngeoradius_ro\r\n:-6\r\n*2\r\n+readonly\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nmonitor\r\n:1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$14\r\nzremrangebylex\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nsunionstore\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$5\r\nzscan\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nreadwrite\r\n:1\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nxgroup\r\n:-2\r\n*2\r\n+write\r\n+denyoom\r\n:2\r\n:2\r\n:1\r\n*6\r\n$5\r\nsetex\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nsave\r\n:1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhvals\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nwatch\r\n:-2\r\n*2\r\n+noscript\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$7\r\nhexists\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\ninfo\r\n:-1\r\n*3\r\n+random\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\npsync\r\n:3\r\n*3\r\n+readonly\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$11\r\nzrangebylex\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nzadd\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nxlen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nauth\r\n:2\r\n*4\r\n+noscript\r\n+loading\r\n+stale\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nsrem\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\ngeoradius\r\n:-6\r\n*2\r\n+write\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nexec\r\n:1\r\n*2\r\n+noscript\r\n+skip_monitor\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\npfcount\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$7\r\nzpopmin\r\n:-2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nmove\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nxtrim\r\n:-2\r\n*3\r\n+write\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nasking\r\n:1\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\npttl\r\n:2\r\n*3\r\n+readonly\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nsrandmember\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nflushall\r\n:-1\r\n*1\r\n+write\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nsort\r\n:-2\r\n*3\r\n+write\r\n+denyoom\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$3\r\ndel\r\n:-2\r\n*1\r\n+write\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$14\r\nrestore-asking\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+asking\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\npsubscribe\r\n:-2\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\ndecr\r\n:2\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nincrby\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$14\r\nzrevrangebylex\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nbitfield\r\n:-2\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nexists\r\n:-2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$8\r\nreplconf\r\n:-1\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nzincrby\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nblpop\r\n:-3\r\n*2\r\n+write\r\n+noscript\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$4\r\nlpop\r\n:2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$3\r\nttl\r\n:2\r\n*3\r\n+readonly\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nxread\r\n:-4\r\n*3\r\n+readonly\r\n+noscript\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nrpush\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nzrevrank\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nincrbyfloat\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nbrpop\r\n:-3\r\n*2\r\n+write\r\n+noscript\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$4\r\nxadd\r\n:-5\r\n*4\r\n+write\r\n+denyoom\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nsetrange\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$17\r\ngeoradiusbymember\r\n:-5\r\n*2\r\n+write\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nunlink\r\n:-2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$8\r\nexpireat\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\ndebug\r\n:-2\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$20\r\ngeoradiusbymember_ro\r\n:-5\r\n*2\r\n+readonly\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nlset\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nzscore\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nllen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\ntime\r\n:1\r\n*2\r\n+random\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nshutdown\r\n:-1\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nevalsha\r\n:-3\r\n*2\r\n+noscript\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nzcount\r\n:4\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nmemory\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nxinfo\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:2\r\n:2\r\n:1\r\n*6\r\n$8\r\nxpending\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\neval\r\n:-3\r\n*2\r\n+noscript\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nxrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nrestore\r\n:-4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nzpopmax\r\n:-2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nmset\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:2\r\n*6\r\n$4\r\nspop\r\n:-2\r\n*3\r\n+write\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nltrim\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nzrank\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nxrevrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$3\r\nget\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nflushdb\r\n:-1\r\n*1\r\n+write\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhmget\r\n:-3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nmsetnx\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:2\r\n*6\r\n$7\r\npersist\r\n:2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nzunionstore\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ncommand\r\n:0\r\n*3\r\n+random\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nrenamenx\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:2\r\n:1\r\n*6\r\n$6\r\nzrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\npexpire\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nkeys\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nzrem\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\npfadd\r\n:-2\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\npsetex\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$13\r\nzrangebyscore\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nsync\r\n:1\r\n*3\r\n+readonly\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\npfdebug\r\n:-3\r\n*1\r\n+write\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ndiscard\r\n:1\r\n*2\r\n+noscript\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nreadonly\r\n:1\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ngeodist\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\ngeopos\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nbitpos\r\n:-3\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nsinter\r\n:-2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$6\r\ngetset\r\n:3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nslaveof\r\n:3\r\n*3\r\n+admin\r\n+noscript\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nrpushx\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nlinsert\r\n:5\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nexpire\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n" + res := ` +*200 +*6 +$12 +hincrbyfloat +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$10 +xreadgroup +:-7 +*3 ++write ++noscript ++movablekeys +:1 +:1 +:1 +*6 +$10 +sdiffstore +:-3 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$8 +lastsave +:1 +*2 ++random ++fast +:0 +:0 +:0 +*6 +$5 +setnx +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$8 +bzpopmax +:-3 +*3 ++write ++noscript ++fast +:1 +:-2 +:1 +*6 +$12 +punsubscribe +:-1 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +xack +:-4 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$10 +pfselftest +:1 +*1 ++admin +:0 +:0 +:0 +*6 +$6 +substr +:4 +*1 ++readonly +:1 +:1 +:1 +*6 +$8 +smembers +:2 +*2 ++readonly ++sort_for_script +:1 +:1 +:1 +*6 +$11 +unsubscribe +:-1 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$11 +zinterstore +:-4 +*3 ++write ++denyoom ++movablekeys +:0 +:0 +:0 +*6 +$6 +strlen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +pfmerge +:-2 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$9 +randomkey +:1 +*2 ++readonly ++random +:0 +:0 +:0 +*6 +$6 +lolwut +:-1 +*1 ++readonly +:0 +:0 +:0 +*6 +$4 +rpop +:2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +hkeys +:2 +*2 ++readonly ++sort_for_script +:1 +:1 +:1 +*6 +$6 +client +:-2 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$6 +module +:-2 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$7 +slowlog +:-2 +*2 ++admin ++random +:0 +:0 +:0 +*6 +$7 +geohash +:-2 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +lrange +:4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +ping +:-1 +*2 ++stale ++fast +:0 +:0 +:0 +*6 +$8 +bitcount +:-2 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +pubsub +:-2 +*4 ++pubsub ++random ++loading ++stale +:0 +:0 +:0 +*6 +$4 +role +:1 +*3 ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +hget +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +object +:-2 +*2 ++readonly ++random +:2 +:2 +:1 +*6 +$9 +zrevrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$7 +hincrby +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$9 +zlexcount +:4 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$5 +scard +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +append +:3 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$7 +hstrlen +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +config +:-2 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +hset +:-4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$16 +zrevrangebyscore +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +incr +:2 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +setbit +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$9 +rpoplpush +:3 +*2 ++write ++denyoom +:1 +:2 +:1 +*6 +$6 +xclaim +:-6 +*3 ++write ++random ++fast +:1 +:1 +:1 +*6 +$11 +sinterstore +:-3 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$7 +publish +:3 +*4 ++pubsub ++loading ++stale ++fast +:0 +:0 +:0 +*6 +$5 +hscan +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$5 +multi +:1 +*2 ++noscript ++fast +:0 +:0 +:0 +*6 +$3 +set +:-3 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +lpushx +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$16 +zremrangebyscore +:4 +*1 ++write +:1 +:1 +:1 +*6 +$9 +pexpireat +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +hdel +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$12 +bgrewriteaof +:1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$7 +migrate +:-6 +*3 ++write ++random ++movablekeys +:0 +:0 +:0 +*6 +$9 +replicaof +:3 +*3 ++admin ++noscript ++stale +:0 +:0 +:0 +*6 +$5 +touch +:-2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +xsetid +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +bitop +:-4 +*2 ++write ++denyoom +:2 +:-1 +:1 +*6 +$6 +swapdb +:3 +*2 ++write ++fast +:0 +:0 +:0 +*6 +$5 +sdiff +:-2 +*2 ++readonly ++sort_for_script +:1 +:-1 +:1 +*6 +$6 +lindex +:3 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +wait +:3 +*1 ++noscript +:0 +:0 +:0 +*6 +$4 +lrem +:4 +*1 ++write +:1 +:1 +:1 +*6 +$6 +hsetnx +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$8 +getrange +:4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +hlen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +post +:-1 +*2 ++loading ++stale +:0 +:0 +:0 +*6 +$9 +sismember +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +unwatch +:1 +*2 ++noscript ++fast +:0 +:0 +:0 +*6 +$5 +lpush +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$4 +scan +:-2 +*2 ++readonly ++random +:0 +:0 +:0 +*6 +$5 +smove +:4 +*2 ++write ++fast +:1 +:2 +:1 +*6 +$7 +cluster +:-2 +*1 ++admin +:0 +:0 +:0 +*6 +$6 +bgsave +:-1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$4 +dump +:2 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$7 +latency +:-2 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$8 +bzpopmin +:-3 +*3 ++write ++noscript ++fast +:1 +:-2 +:1 +*6 +$6 +getbit +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +hgetall +:2 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$6 +rename +:3 +*1 ++write +:1 +:2 +:1 +*6 +$9 +subscribe +:-2 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +xdel +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$15 +zremrangebyrank +:4 +*1 ++write +:1 +:1 +:1 +*6 +$4 +type +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +script +:-2 +*1 ++noscript +:0 +:0 +:0 +*6 +$5 +hmset +:-4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +sunion +:-2 +*2 ++readonly ++sort_for_script +:1 +:-1 +:1 +*6 +$4 +mget +:-2 +*2 ++readonly ++fast +:1 +:-1 +:1 +*6 +$10 +brpoplpush +:4 +*3 ++write ++denyoom ++noscript +:1 +:2 +:1 +*6 +$6 +geoadd +:-5 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +decrby +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$4 +echo +:2 +*1 ++fast +:0 +:0 +:0 +*6 +$6 +dbsize +:1 +*2 ++readonly ++fast +:0 +:0 +:0 +*6 +$5 +zcard +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +select +:2 +*2 ++loading ++fast +:0 +:0 +:0 +*6 +$4 +sadd +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +host: +:-1 +*2 ++loading ++stale +:0 +:0 +:0 +*6 +$5 +sscan +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$12 +georadius_ro +:-6 +*2 ++readonly ++movablekeys +:1 +:1 +:1 +*6 +$7 +monitor +:1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$14 +zremrangebylex +:4 +*1 ++write +:1 +:1 +:1 +*6 +$11 +sunionstore +:-3 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$5 +zscan +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$9 +readwrite +:1 +*1 ++fast +:0 +:0 +:0 +*6 +$6 +xgroup +:-2 +*2 ++write ++denyoom +:2 +:2 +:1 +*6 +$5 +setex +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$4 +save +:1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$5 +hvals +:2 +*2 ++readonly ++sort_for_script +:1 +:1 +:1 +*6 +$5 +watch +:-2 +*2 ++noscript ++fast +:1 +:-1 +:1 +*6 +$7 +hexists +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +info +:-1 +*3 ++random ++loading ++stale +:0 +:0 +:0 +*6 +$5 +psync +:3 +*3 ++readonly ++admin ++noscript +:0 +:0 +:0 +*6 +$11 +zrangebylex +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +zadd +:-4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$4 +xlen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +auth +:2 +*4 ++noscript ++loading ++stale ++fast +:0 +:0 +:0 +*6 +$4 +srem +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$9 +georadius +:-6 +*2 ++write ++movablekeys +:1 +:1 +:1 +*6 +$4 +exec +:1 +*2 ++noscript ++skip_monitor +:0 +:0 +:0 +*6 +$7 +pfcount +:-2 +*1 ++readonly +:1 +:-1 +:1 +*6 +$7 +zpopmin +:-2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +move +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +xtrim +:-2 +*3 ++write ++random ++fast +:1 +:1 +:1 +*6 +$6 +asking +:1 +*1 ++fast +:0 +:0 +:0 +*6 +$4 +pttl +:2 +*3 ++readonly ++random ++fast +:1 +:1 +:1 +*6 +$11 +srandmember +:-2 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$8 +flushall +:-1 +*1 ++write +:0 +:0 +:0 +*6 +$4 +sort +:-2 +*3 ++write ++denyoom ++movablekeys +:1 +:1 +:1 +*6 +$3 +del +:-2 +*1 ++write +:1 +:-1 +:1 +*6 +$14 +restore-asking +:-4 +*3 ++write ++denyoom ++asking +:1 +:1 +:1 +*6 +$10 +psubscribe +:-2 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +decr +:2 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +incrby +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$14 +zrevrangebylex +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$8 +bitfield +:-2 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +exists +:-2 +*2 ++readonly ++fast +:1 +:-1 +:1 +*6 +$8 +replconf +:-1 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$7 +zincrby +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +blpop +:-3 +*2 ++write ++noscript +:1 +:-2 +:1 +*6 +$4 +lpop +:2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$3 +ttl +:2 +*3 ++readonly ++random ++fast +:1 +:1 +:1 +*6 +$5 +xread +:-4 +*3 ++readonly ++noscript ++movablekeys +:1 +:1 +:1 +*6 +$5 +rpush +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$8 +zrevrank +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$11 +incrbyfloat +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +brpop +:-3 +*2 ++write ++noscript +:1 +:-2 +:1 +*6 +$4 +xadd +:-5 +*4 ++write ++denyoom ++random ++fast +:1 +:1 +:1 +*6 +$8 +setrange +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$17 +georadiusbymember +:-5 +*2 ++write ++movablekeys +:1 +:1 +:1 +*6 +$6 +unlink +:-2 +*2 ++write ++fast +:1 +:-1 +:1 +*6 +$8 +expireat +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +debug +:-2 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$20 +georadiusbymember_ro +:-5 +*2 ++readonly ++movablekeys +:1 +:1 +:1 +*6 +$4 +lset +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +zscore +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +llen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +time +:1 +*2 ++random ++fast +:0 +:0 +:0 +*6 +$8 +shutdown +:-1 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$7 +evalsha +:-3 +*2 ++noscript ++movablekeys +:0 +:0 +:0 +*6 +$6 +zcount +:4 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +memory +:-2 +*2 ++readonly ++random +:0 +:0 +:0 +*6 +$5 +xinfo +:-2 +*2 ++readonly ++random +:2 +:2 +:1 +*6 +$8 +xpending +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$4 +eval +:-3 +*2 ++noscript ++movablekeys +:0 +:0 +:0 +*6 +$6 +xrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$7 +restore +:-4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$7 +zpopmax +:-2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +mset +:-3 +*2 ++write ++denyoom +:1 +:-1 +:2 +*6 +$4 +spop +:-2 +*3 ++write ++random ++fast +:1 +:1 +:1 +*6 +$5 +ltrim +:4 +*1 ++write +:1 +:1 +:1 +*6 +$5 +zrank +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$9 +xrevrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$3 +get +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +flushdb +:-1 +*1 ++write +:0 +:0 +:0 +*6 +$5 +hmget +:-3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +msetnx +:-3 +*2 ++write ++denyoom +:1 +:-1 +:2 +*6 +$7 +persist +:2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$11 +zunionstore +:-4 +*3 ++write ++denyoom ++movablekeys +:0 +:0 +:0 +*6 +$7 +command +:0 +*3 ++random ++loading ++stale +:0 +:0 +:0 +*6 +$8 +renamenx +:3 +*2 ++write ++fast +:1 +:2 +:1 +*6 +$6 +zrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$7 +pexpire +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +keys +:2 +*2 ++readonly ++sort_for_script +:0 +:0 +:0 +*6 +$4 +zrem +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +pfadd +:-2 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +psetex +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$13 +zrangebyscore +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +sync +:1 +*3 ++readonly ++admin ++noscript +:0 +:0 +:0 +*6 +$7 +pfdebug +:-3 +*1 ++write +:0 +:0 +:0 +*6 +$7 +discard +:1 +*2 ++noscript ++fast +:0 +:0 +:0 +*6 +$8 +readonly +:1 +*1 ++fast +:0 +:0 +:0 +*6 +$7 +geodist +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +geopos +:-2 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +bitpos +:-3 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +sinter +:-2 +*2 ++readonly ++sort_for_script +:1 +:-1 +:1 +*6 +$6 +getset +:3 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$7 +slaveof +:3 +*3 ++admin ++noscript ++stale +:0 +:0 +:0 +*6 +$6 +rpushx +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$7 +linsert +:5 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +expire +:3 +*2 ++write ++fast +:1 +:1 +:1 + ` - c.WriteRaw(res) + c.WriteBulk(res) } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go index defbbccab9f..cca060a7218 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go @@ -4,7 +4,6 @@ package miniredis import ( "fmt" - "strconv" "strings" "github.com/alicebob/miniredis/v2/server" @@ -75,23 +74,29 @@ func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) { c.WriteError(msgNotFromScripts) return } - username := "default" - pw := args[0] + + var opts = struct { + username string + password string + }{ + username: "default", + password: args[0], + } if len(args) == 2 { - username, pw = args[0], args[1] + opts.username, opts.password = args[0], args[1] } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if len(m.passwords) == 0 && username == "default" { + if len(m.passwords) == 0 && opts.username == "default" { c.WriteError("ERR AUTH called without any password configured for the default user. Are you sure your configuration is correct?") return } - setPW, ok := m.passwords[username] + setPW, ok := m.passwords[opts.username] if !ok { c.WriteError("WRONGPASS invalid username-password pair") return } - if setPW != pw { + if setPW != opts.password { c.WriteError("WRONGPASS invalid username-password pair") return } @@ -109,17 +114,16 @@ func (m *Miniredis) cmdHello(c *server.Peer, cmd string, args []string) { } var opts struct { - version int - username, password string + version int + username string + password string } - versionArg, args := args[0], args[1:] - var err error - opts.version, err = strconv.Atoi(versionArg) - if err != nil { - c.WriteError("ERR Protocol version is not an integer or out of range") + if ok := optIntErr(c, args[0], &opts.version, "ERR Protocol version is not an integer or out of range"); !ok { return } + args = args[1:] + switch opts.version { case 2, 3: default: @@ -199,8 +203,9 @@ func (m *Miniredis) cmdEcho(c *server.Peer, cmd string, args []string) { return } + msg := args[0] + withTx(m, c, func(c *server.Peer, ctx *connCtx) { - msg := args[0] c.WriteBulk(msg) }) } @@ -212,27 +217,25 @@ func (m *Miniredis) cmdSelect(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } - if !m.handleAuth(c) { + if !m.isValidCMD(c, cmd) { return } - if m.checkPubsub(c, cmd) { + + var opts struct { + id int + } + if ok := optInt(c, args[0], &opts.id); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - id, err := strconv.Atoi(args[0]) - if err != nil { - c.WriteError(msgInvalidInt) - setDirty(c) - return - } - if id < 0 { + if opts.id < 0 { c.WriteError(msgDBIndexOutOfRange) setDirty(c) return } - ctx.selectedDB = id + ctx.selectedDB = opts.id c.WriteOK() }) } @@ -248,26 +251,26 @@ func (m *Miniredis) cmdSwapdb(c *server.Peer, cmd string, args []string) { return } + var opts struct { + id1 int + id2 int + } + + if ok := optIntErr(c, args[0], &opts.id1, "ERR invalid first DB index"); !ok { + return + } + if ok := optIntErr(c, args[1], &opts.id2, "ERR invalid second DB index"); !ok { + return + } + withTx(m, c, func(c *server.Peer, ctx *connCtx) { - id1, err := strconv.Atoi(args[0]) - if err != nil { - c.WriteError("ERR invalid first DB index") - setDirty(c) - return - } - id2, err := strconv.Atoi(args[1]) - if err != nil { - c.WriteError("ERR invalid second DB index") - setDirty(c) - return - } - if id1 < 0 || id2 < 0 { + if opts.id1 < 0 || opts.id2 < 0 { c.WriteError(msgDBIndexOutOfRange) setDirty(c) return } - m.swapDB(id1, id2) + m.swapDB(opts.id1, opts.id2) c.WriteOK() }) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go index f9f06bfb96e..76d6b0a8ca1 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go @@ -13,8 +13,8 @@ import ( // commandsGeneric handles EXPIRE, TTL, PERSIST, &c. func commandsGeneric(m *Miniredis) { + m.srv.Register("COPY", m.cmdCopy) m.srv.Register("DEL", m.cmdDel) - m.srv.Register("UNLINK", m.cmdDel) // DUMP m.srv.Register("EXISTS", m.cmdExists) m.srv.Register("EXPIRE", makeCmdExpire(m, false, time.Second)) @@ -31,12 +31,12 @@ func commandsGeneric(m *Miniredis) { m.srv.Register("RENAME", m.cmdRename) m.srv.Register("RENAMENX", m.cmdRenamenx) // RESTORE - // SORT m.srv.Register("TOUCH", m.cmdTouch) m.srv.Register("TTL", m.cmdTTL) m.srv.Register("TYPE", m.cmdType) m.srv.Register("SCAN", m.cmdScan) - m.srv.Register("COPY", m.cmdCopy) + // SORT + m.srv.Register("UNLINK", m.cmdDel) } // generic expire command for EXPIRE, PEXPIRE, EXPIREAT, PEXPIREAT @@ -56,12 +56,12 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, return } - key := args[0] - value := args[1] - i, err := strconv.Atoi(value) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + value int + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.value); !ok { return } @@ -69,17 +69,17 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, db := m.db(ctx.selectedDB) // Key must be present. - if _, ok := db.keys[key]; !ok { + if _, ok := db.keys[opts.key]; !ok { c.WriteInt(0) return } if unix { - db.ttl[key] = m.at(i, d) + db.ttl[opts.key] = m.at(opts.value, d) } else { - db.ttl[key] = time.Duration(i) * d + db.ttl[opts.key] = time.Duration(opts.value) * d } - db.keyVersion[key]++ - db.checkTTL(key) + db.keyVersion[opts.key]++ + db.checkTTL(opts.key) c.WriteInt(1) }) } @@ -318,21 +318,23 @@ func (m *Miniredis) cmdMove(c *server.Peer, cmd string, args []string) { return } - key := args[0] - targetDB, err := strconv.Atoi(args[1]) - if err != nil { - targetDB = 0 + var opts struct { + key string + targetDB int } + opts.key = args[0] + opts.targetDB, _ = strconv.Atoi(args[1]) + withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if ctx.selectedDB == targetDB { + if ctx.selectedDB == opts.targetDB { c.WriteError("ERR source and destination objects are the same") return } db := m.db(ctx.selectedDB) - targetDB := m.db(targetDB) + targetDB := m.db(opts.targetDB) - if !db.move(key, targetDB) { + if !db.move(opts.key, targetDB) { c.WriteInt(0) return } @@ -413,17 +415,23 @@ func (m *Miniredis) cmdRename(c *server.Peer, cmd string, args []string) { return } - from, to := args[0], args[1] + opts := struct { + from string + to string + }{ + from: args[0], + to: args[1], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(from) { + if !db.exists(opts.from) { c.WriteError(msgKeyNotFound) return } - db.rename(from, to) + db.rename(opts.from, opts.to) c.WriteOK() }) } @@ -442,22 +450,28 @@ func (m *Miniredis) cmdRenamenx(c *server.Peer, cmd string, args []string) { return } - from, to := args[0], args[1] + opts := struct { + from string + to string + }{ + from: args[0], + to: args[1], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(from) { + if !db.exists(opts.from) { c.WriteError(msgKeyNotFound) return } - if db.exists(to) { + if db.exists(opts.to) { c.WriteInt(0) return } - db.rename(from, to) + db.rename(opts.from, opts.to) c.WriteInt(1) }) } @@ -476,22 +490,20 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { return } - cursor, err := strconv.Atoi(args[0]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) - return - } - args = args[1:] - - // MATCH, COUNT and TYPE options - var ( + var opts struct { + cursor int withMatch bool match string withType bool _type string - ) + } + + if ok := optIntErr(c, args[0], &opts.cursor, msgInvalidCursor); !ok { + return + } + args = args[1:] + // MATCH, COUNT and TYPE options for len(args) > 0 { if strings.ToLower(args[0]) == "count" { // we do nothing with count @@ -514,8 +526,8 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withMatch = true - match, args = args[1], args[2:] + opts.withMatch = true + opts.match, args = args[1], args[2:] continue } if strings.ToLower(args[0]) == "type" { @@ -524,8 +536,8 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withType = true - _type, args = strings.ToLower(args[1]), args[2:] + opts.withType = true + opts._type, args = strings.ToLower(args[1]), args[2:] continue } setDirty(c) @@ -537,7 +549,7 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { db := m.db(ctx.selectedDB) // We return _all_ (matched) keys every time. - if cursor != 0 { + if opts.cursor != 0 { // Invalid cursor. c.WriteLen(2) c.WriteBulk("0") // no next cursor @@ -547,11 +559,11 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { var keys []string - if withType { + if opts.withType { keys = make([]string, 0) for k, t := range db.keys { // type must be given exactly; no pattern matching is performed - if t == _type { + if t == opts._type { keys = append(keys, k) } } @@ -560,8 +572,8 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { keys = db.allKeys() } - if withMatch { - keys, _ = matchKeys(keys, match) + if opts.withMatch { + keys, _ = matchKeys(keys, opts.match) } c.WriteLen(2) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go b/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go index a6c1901d684..29a92d550f8 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go @@ -226,28 +226,28 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { } args = args[5:] - var ( - withDist = false - withCoord = false - direction = unsorted - count = 0 - withStore = false - storeKey = "" - withStoredist = false - storedistKey = "" - ) + var opts struct { + withDist bool + withCoord bool + direction direction // unsorted + count int + withStore bool + storeKey string + withStoredist bool + storedistKey string + } for len(args) > 0 { arg := args[0] args = args[1:] switch strings.ToUpper(arg) { case "WITHCOORD": - withCoord = true + opts.withCoord = true case "WITHDIST": - withDist = true + opts.withDist = true case "ASC": - direction = asc + opts.direction = asc case "DESC": - direction = desc + opts.direction = desc case "COUNT": if len(args) == 0 { setDirty(c) @@ -266,15 +266,15 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { return } args = args[1:] - count = n + opts.count = n case "STORE": if len(args) == 0 { setDirty(c) c.WriteError("ERR syntax error") return } - withStore = true - storeKey = args[0] + opts.withStore = true + opts.storeKey = args[0] args = args[1:] case "STOREDIST": if len(args) == 0 { @@ -282,8 +282,8 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { c.WriteError("ERR syntax error") return } - withStoredist = true - storedistKey = args[0] + opts.withStoredist = true + opts.storedistKey = args[0] args = args[1:] default: setDirty(c) @@ -292,14 +292,14 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { } } - if strings.ToUpper(cmd) == "GEORADIUS_RO" && (withStore || withStoredist) { + if strings.ToUpper(cmd) == "GEORADIUS_RO" && (opts.withStore || opts.withStoredist) { setDirty(c) c.WriteError("ERR syntax error") return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if (withStore || withStoredist) && (withDist || withCoord) { + if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) { c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options") return } @@ -310,9 +310,9 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { matches := withinRadius(members, longitude, latitude, radius*toMeter) // deal with ASC/DESC - if direction != unsorted { + if opts.direction != unsorted { sort.Slice(matches, func(i, j int) bool { - if direction == desc { + if opts.direction == desc { return matches[i].Distance > matches[j].Distance } return matches[i].Distance < matches[j].Distance @@ -320,25 +320,25 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { } // deal with COUNT - if count > 0 && len(matches) > count { - matches = matches[:count] + if opts.count > 0 && len(matches) > opts.count { + matches = matches[:opts.count] } // deal with "STORE x" - if withStore { - db.del(storeKey, true) + if opts.withStore { + db.del(opts.storeKey, true) for _, member := range matches { - db.ssetAdd(storeKey, member.Score, member.Name) + db.ssetAdd(opts.storeKey, member.Score, member.Name) } c.WriteInt(len(matches)) return } // deal with "STOREDIST x" - if withStoredist { - db.del(storedistKey, true) + if opts.withStoredist { + db.del(opts.storedistKey, true) for _, member := range matches { - db.ssetAdd(storedistKey, member.Distance/toMeter, member.Name) + db.ssetAdd(opts.storedistKey, member.Distance/toMeter, member.Name) } c.WriteInt(len(matches)) return @@ -346,24 +346,24 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { c.WriteLen(len(matches)) for _, member := range matches { - if !withDist && !withCoord { + if !opts.withDist && !opts.withCoord { c.WriteBulk(member.Name) continue } len := 1 - if withDist { + if opts.withDist { len++ } - if withCoord { + if opts.withCoord { len++ } c.WriteLen(len) c.WriteBulk(member.Name) - if withDist { + if opts.withDist { c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter)) } - if withCoord { + if opts.withCoord { c.WriteLen(2) c.WriteBulk(fmt.Sprintf("%f", member.Longitude)) c.WriteBulk(fmt.Sprintf("%f", member.Latitude)) @@ -386,45 +386,53 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri return } - key := args[0] - member := args[1] + opts := struct { + key string + member string + radius float64 + toMeter float64 + + withDist bool + withCoord bool + direction direction // unsorted + count int + withStore bool + storeKey string + withStoredist bool + storedistKey string + }{ + key: args[0], + member: args[1], + } - radius, err := strconv.ParseFloat(args[2], 64) - if err != nil || radius < 0 { + r, err := strconv.ParseFloat(args[2], 64) + if err != nil || r < 0 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return } - toMeter := parseUnit(args[3]) - if toMeter == 0 { + opts.radius = r + + opts.toMeter = parseUnit(args[3]) + if opts.toMeter == 0 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return } args = args[4:] - var ( - withDist = false - withCoord = false - direction = unsorted - count = 0 - withStore = false - storeKey = "" - withStoredist = false - storedistKey = "" - ) for len(args) > 0 { arg := args[0] args = args[1:] switch strings.ToUpper(arg) { case "WITHCOORD": - withCoord = true + opts.withCoord = true case "WITHDIST": - withDist = true + opts.withDist = true case "ASC": - direction = asc + opts.direction = asc case "DESC": - direction = desc + opts.direction = desc case "COUNT": if len(args) == 0 { setDirty(c) @@ -443,15 +451,15 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri return } args = args[1:] - count = n + opts.count = n case "STORE": if len(args) == 0 { setDirty(c) c.WriteError("ERR syntax error") return } - withStore = true - storeKey = args[0] + opts.withStore = true + opts.storeKey = args[0] args = args[1:] case "STOREDIST": if len(args) == 0 { @@ -459,8 +467,8 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri c.WriteError("ERR syntax error") return } - withStoredist = true - storedistKey = args[0] + opts.withStoredist = true + opts.storedistKey = args[0] args = args[1:] default: setDirty(c) @@ -469,44 +477,44 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri } } - if strings.ToUpper(cmd) == "GEORADIUSBYMEMBER_RO" && (withStore || withStoredist) { + if strings.ToUpper(cmd) == "GEORADIUSBYMEMBER_RO" && (opts.withStore || opts.withStoredist) { setDirty(c) c.WriteError("ERR syntax error") return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if (withStore || withStoredist) && (withDist || withCoord) { + if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) { c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options") return } db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteNull() return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } // get position of member - if !db.ssetExists(key, member) { + if !db.ssetExists(opts.key, opts.member) { c.WriteError("ERR could not decode requested zset member") return } - score := db.ssetScore(key, member) + score := db.ssetScore(opts.key, opts.member) longitude, latitude := fromGeohash(uint64(score)) - members := db.ssetElements(key) - matches := withinRadius(members, longitude, latitude, radius*toMeter) + members := db.ssetElements(opts.key) + matches := withinRadius(members, longitude, latitude, opts.radius*opts.toMeter) // deal with ASC/DESC - if direction != unsorted { + if opts.direction != unsorted { sort.Slice(matches, func(i, j int) bool { - if direction == desc { + if opts.direction == desc { return matches[i].Distance > matches[j].Distance } return matches[i].Distance < matches[j].Distance @@ -514,25 +522,25 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri } // deal with COUNT - if count > 0 && len(matches) > count { - matches = matches[:count] + if opts.count > 0 && len(matches) > opts.count { + matches = matches[:opts.count] } // deal with "STORE x" - if withStore { - db.del(storeKey, true) + if opts.withStore { + db.del(opts.storeKey, true) for _, member := range matches { - db.ssetAdd(storeKey, member.Score, member.Name) + db.ssetAdd(opts.storeKey, member.Score, member.Name) } c.WriteInt(len(matches)) return } // deal with "STOREDIST x" - if withStoredist { - db.del(storedistKey, true) + if opts.withStoredist { + db.del(opts.storedistKey, true) for _, member := range matches { - db.ssetAdd(storedistKey, member.Distance/toMeter, member.Name) + db.ssetAdd(opts.storedistKey, member.Distance/opts.toMeter, member.Name) } c.WriteInt(len(matches)) return @@ -540,24 +548,24 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri c.WriteLen(len(matches)) for _, member := range matches { - if !withDist && !withCoord { + if !opts.withDist && !opts.withCoord { c.WriteBulk(member.Name) continue } len := 1 - if withDist { + if opts.withDist { len++ } - if withCoord { + if opts.withCoord { len++ } c.WriteLen(len) c.WriteBulk(member.Name) - if withDist { - c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter)) + if opts.withDist { + c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/opts.toMeter)) } - if withCoord { + if opts.withCoord { c.WriteLen(2) c.WriteBulk(fmt.Sprintf("%f", member.Longitude)) c.WriteBulk(fmt.Sprintf("%f", member.Latitude)) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go b/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go index 142ba63e161..09fa4522f37 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go @@ -77,27 +77,35 @@ func (m *Miniredis) cmdHsetnx(c *server.Peer, cmd string, args []string) { return } - key, field, value := args[0], args[1], args[2] + opts := struct { + key string + field string + value string + }{ + key: args[0], + field: args[1], + value: args[2], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "hash" { + if t, ok := db.keys[opts.key]; ok && t != "hash" { c.WriteError(msgWrongType) return } - if _, ok := db.hashKeys[key]; !ok { - db.hashKeys[key] = map[string]string{} - db.keys[key] = "hash" + if _, ok := db.hashKeys[opts.key]; !ok { + db.hashKeys[opts.key] = map[string]string{} + db.keys[opts.key] = "hash" } - _, ok := db.hashKeys[key][field] + _, ok := db.hashKeys[opts.key][opts.field] if ok { c.WriteInt(0) return } - db.hashKeys[key][field] = value - db.keyVersion[key]++ + db.hashKeys[opts.key][opts.field] = opts.value + db.keyVersion[opts.key]++ c.WriteInt(1) }) } @@ -191,12 +199,18 @@ func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) { return } - key, fields := args[0], args[1:] + opts := struct { + key string + fields []string + }{ + key: args[0], + fields: args[1:], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - t, ok := db.keys[key] + t, ok := db.keys[opts.key] if !ok { // No key is zero deleted c.WriteInt(0) @@ -208,19 +222,19 @@ func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) { } deleted := 0 - for _, f := range fields { - _, ok := db.hashKeys[key][f] + for _, f := range opts.fields { + _, ok := db.hashKeys[opts.key][f] if !ok { continue } - delete(db.hashKeys[key], f) + delete(db.hashKeys[opts.key], f) deleted++ } c.WriteInt(deleted) // Nothing left. Remove the whole key. - if len(db.hashKeys[key]) == 0 { - db.del(key, true) + if len(db.hashKeys[opts.key]) == 0 { + db.del(opts.key, true) } }) } @@ -239,12 +253,18 @@ func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) { return } - key, field := args[0], args[1] + opts := struct { + key string + field string + }{ + key: args[0], + field: args[1], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - t, ok := db.keys[key] + t, ok := db.keys[opts.key] if !ok { c.WriteInt(0) return @@ -254,7 +274,7 @@ func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) { return } - if _, ok := db.hashKeys[key][field]; !ok { + if _, ok := db.hashKeys[opts.key][opts.field]; !ok { c.WriteInt(0) return } @@ -494,24 +514,27 @@ func (m *Miniredis) cmdHincrby(c *server.Peer, cmd string, args []string) { return } - key, field, deltas := args[0], args[1], args[2] - - delta, err := strconv.Atoi(deltas) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + opts := struct { + key string + field string + delta int + }{ + key: args[0], + field: args[1], + } + if ok := optInt(c, args[2], &opts.delta); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "hash" { + if t, ok := db.keys[opts.key]; ok && t != "hash" { c.WriteError(msgWrongType) return } - v, err := db.hashIncr(key, field, delta) + v, err := db.hashIncr(opts.key, opts.field, opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -534,24 +557,31 @@ func (m *Miniredis) cmdHincrbyfloat(c *server.Peer, cmd string, args []string) { return } - key, field, deltas := args[0], args[1], args[2] - - delta, _, err := big.ParseFloat(deltas, 10, 128, 0) + opts := struct { + key string + field string + delta *big.Float + }{ + key: args[0], + field: args[1], + } + delta, _, err := big.ParseFloat(args[2], 10, 128, 0) if err != nil { setDirty(c) c.WriteError(msgInvalidFloat) return } + opts.delta = delta withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "hash" { + if t, ok := db.keys[opts.key]; ok && t != "hash" { c.WriteError(msgWrongType) return } - v, err := db.hashIncrfloat(key, field, delta) + v, err := db.hashIncrfloat(opts.key, opts.field, opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -574,18 +604,20 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { return } - key := args[0] - cursor, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) + opts := struct { + key string + cursor int + withMatch bool + match string + }{ + key: args[0], + } + if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok { return } args = args[2:] // MATCH and COUNT options - var withMatch bool - var match string for len(args) > 0 { if strings.ToLower(args[0]) == "count" { // we do nothing with count @@ -609,8 +641,8 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withMatch = true - match, args = args[1], args[2:] + opts.withMatch = true + opts.match, args = args[1], args[2:] continue } setDirty(c) @@ -622,21 +654,21 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { db := m.db(ctx.selectedDB) // return _all_ (matched) keys every time - if cursor != 0 { + if opts.cursor != 0 { // Invalid cursor. c.WriteLen(2) c.WriteBulk("0") // no next cursor c.WriteLen(0) // no elements return } - if db.exists(key) && db.t(key) != "hash" { + if db.exists(opts.key) && db.t(opts.key) != "hash" { c.WriteError(ErrWrongType.Error()) return } - members := db.hashFields(key) - if withMatch { - members, _ = matchKeys(members, match) + members := db.hashFields(opts.key) + if opts.withMatch { + members, _ = matchKeys(members, opts.match) } c.WriteLen(2) @@ -645,7 +677,7 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { c.WriteLen(len(members) * 2) for _, k := range members { c.WriteBulk(k) - c.WriteBulk(db.hashGet(key, k)) + c.WriteBulk(db.hashGet(opts.key, k)) } }) } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go b/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go index bd2f90c8382..a7b483673e3 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go @@ -52,7 +52,7 @@ func (m *Miniredis) cmdPfcount(c *server.Peer, cmd string, args []string) { return } - keys := args[:] + keys := args withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_info.go b/vendor/github.com/alicebob/miniredis/v2/cmd_info.go new file mode 100644 index 00000000000..e5984a9b2c0 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_info.go @@ -0,0 +1,40 @@ +package miniredis + +import ( + "fmt" + + "github.com/alicebob/miniredis/v2/server" +) + +// Command 'INFO' from https://redis.io/commands/info/ +func (m *Miniredis) cmdInfo(c *server.Peer, cmd string, args []string) { + if !m.isValidCMD(c, cmd) { + return + } + + if len(args) > 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + const ( + clientsSectionName = "clients" + clientsSectionContent = "# Clients\nconnected_clients:%d\r\n" + ) + + var result string + + for _, key := range args { + if key != clientsSectionName { + setDirty(c) + c.WriteError(fmt.Sprintf("section (%s) is not supported", key)) + return + } + } + result = fmt.Sprintf(clientsSectionContent, m.Server().ClientsLen()) + + c.WriteBulk(result) + }) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_list.go b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go index 62f9691887b..efd0362b4be 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_list.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go @@ -23,6 +23,7 @@ func commandsList(m *Miniredis) { m.srv.Register("BRPOP", m.cmdBrpop) m.srv.Register("BRPOPLPUSH", m.cmdBrpoplpush) m.srv.Register("LINDEX", m.cmdLindex) + m.srv.Register("LPOS", m.cmdLpos) m.srv.Register("LINSERT", m.cmdLinsert) m.srv.Register("LLEN", m.cmdLlen) m.srv.Register("LPOP", m.cmdLpop) @@ -62,28 +63,23 @@ func (m *Miniredis) cmdBXpop(c *server.Peer, cmd string, args []string, lr leftr return } - timeoutS := args[len(args)-1] - keys := args[:len(args)-1] - - timeout, err := strconv.Atoi(timeoutS) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidTimeout) - return + var opts struct { + keys []string + timeout time.Duration } - if timeout < 0 { - setDirty(c) - c.WriteError(msgNegTimeout) + + if ok := optDuration(c, args[len(args)-1], &opts.timeout); !ok { return } + opts.keys = args[:len(args)-1] blocking( m, c, - time.Duration(timeout)*time.Second, + opts.timeout, func(c *server.Peer, ctx *connCtx) bool { db := m.db(ctx.selectedDB) - for _, key := range keys { + for _, key := range opts.keys { if !db.exists(key) { continue } @@ -165,6 +161,153 @@ func (m *Miniredis) cmdLindex(c *server.Peer, cmd string, args []string) { }) } +// LPOS key element [RANK rank] [COUNT num-matches] [MAXLEN len] +func (m *Miniredis) cmdLpos(c *server.Peer, cmd string, args []string) { + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + if len(args) == 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + // Extract options from arguments if present. + // + // Redis allows duplicate options and uses the last specified. + // `LPOS key term RANK 1 RANK 2` is effectively the same as + // `LPOS key term RANK 2` + if len(args)%2 == 1 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + rank, count := 1, 1 // Default values + var maxlen int // Default value is the list length (see below) + var countSpecified, maxlenSpecified bool + if len(args) > 2 { + for i := 2; i < len(args); i++ { + if i%2 == 0 { + val := args[i+1] + var err error + switch strings.ToLower(args[i]) { + case "rank": + if rank, err = strconv.Atoi(val); err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + if rank == 0 { + setDirty(c) + c.WriteError(msgRankIsZero) + return + } + case "count": + countSpecified = true + if count, err = strconv.Atoi(val); err != nil || count < 0 { + setDirty(c) + c.WriteError(msgCountIsNegative) + return + } + case "maxlen": + maxlenSpecified = true + if maxlen, err = strconv.Atoi(val); err != nil || maxlen < 0 { + setDirty(c) + c.WriteError(msgMaxLengthIsNegative) + return + } + default: + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + } + } + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + key, element := args[0], args[1] + t, ok := db.keys[key] + if !ok { + // No such key + c.WriteNull() + return + } + if t != "list" { + c.WriteError(msgWrongType) + return + } + l := db.listKeys[key] + + // RANK cannot be zero (see above). + // If RANK is positive search forward (left to right). + // If RANK is negative search backward (right to left). + // Iterator returns true to continue iterating. + iterate := func(iterator func(i int, e string) bool) { + comparisons := len(l) + // Only use max length if specified, not zero, and less than total length. + // When max length is specified, but is zero, this means "unlimited". + if maxlenSpecified && maxlen != 0 && maxlen < len(l) { + comparisons = maxlen + } + if rank > 0 { + for i := 0; i < comparisons; i++ { + if resume := iterator(i, l[i]); !resume { + return + } + } + } else if rank < 0 { + start := len(l) - 1 + end := len(l) - comparisons + for i := start; i >= end; i-- { + if resume := iterator(i, l[i]); !resume { + return + } + } + } + } + + var currentRank, currentCount int + vals := make([]int, 0, count) + iterate(func(i int, e string) bool { + if e == element { + currentRank++ + // Only collect values only after surpassing the absolute value of rank. + if rank > 0 && currentRank < rank { + return true + } + if rank < 0 && currentRank < -rank { + return true + } + vals = append(vals, i) + currentCount++ + if currentCount == count { + return false + } + } + return true + }) + + if !countSpecified && len(vals) == 0 { + c.WriteNull() + return + } + if !countSpecified && len(vals) == 1 { + c.WriteInt(vals[0]) + return + } + c.WriteLen(len(vals)) + for _, val := range vals { + c.WriteInt(val) + } + }) +} + // LINSERT func (m *Miniredis) cmdLinsert(c *server.Peer, cmd string, args []string) { if len(args) != 4 { @@ -297,18 +440,14 @@ func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftri opts.key, args = args[0], args[1:] if len(args) > 0 { - v, err := strconv.Atoi(args[0]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[0], &opts.count); !ok { return } - if v < 0 { + if opts.count < 0 { setDirty(c) c.WriteError(msgOutOfRange) return } - opts.count = v opts.withCount = true args = args[1:] } @@ -471,35 +610,35 @@ func (m *Miniredis) cmdLrange(c *server.Peer, cmd string, args []string) { return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + opts := struct { + key string + start int + end int + }{ + key: args[0], + } + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "list" { + if t, ok := db.keys[opts.key]; ok && t != "list" { c.WriteError(msgWrongType) return } - l := db.listKeys[key] + l := db.listKeys[opts.key] if len(l) == 0 { c.WriteLen(0) return } - rs, re := redisRange(len(l), start, end, false) + rs, re := redisRange(len(l), opts.start, opts.end, false) c.WriteLen(re - rs) for _, el := range l[rs:re] { c.WriteBulk(el) @@ -521,42 +660,44 @@ func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) { return } - key := args[0] - count, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + count int + value string + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.count); !ok { return } - value := args[2] + opts.value = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "list" { + if db.t(opts.key) != "list" { c.WriteError(msgWrongType) return } - l := db.listKeys[key] - if count < 0 { + l := db.listKeys[opts.key] + if opts.count < 0 { reverseSlice(l) } deleted := 0 newL := []string{} toDelete := len(l) - if count < 0 { - toDelete = -count + if opts.count < 0 { + toDelete = -opts.count } - if count > 0 { - toDelete = count + if opts.count > 0 { + toDelete = opts.count } for _, el := range l { - if el == value { + if el == opts.value { if toDelete > 0 { deleted++ toDelete-- @@ -565,14 +706,14 @@ func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) { } newL = append(newL, el) } - if count < 0 { + if opts.count < 0 { reverseSlice(newL) } if len(newL) == 0 { - db.del(key, true) + db.del(opts.key, true) } else { - db.listKeys[key] = newL - db.keyVersion[key]++ + db.listKeys[opts.key] = newL + db.keyVersion[opts.key]++ } c.WriteInt(deleted) @@ -593,28 +734,31 @@ func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) { return } - key := args[0] - index, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + index int + value string + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.index); !ok { return } - value := args[2] + opts.value = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteError(msgKeyNotFound) return } - if db.t(key) != "list" { + if db.t(opts.key) != "list" { c.WriteError(msgWrongType) return } - l := db.listKeys[key] + l := db.listKeys[opts.key] + index := opts.index if index < 0 { index = len(l) + index } @@ -622,8 +766,8 @@ func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) { c.WriteError(msgOutOfRange) return } - l[index] = value - db.keyVersion[key]++ + l[index] = opts.value + db.keyVersion[opts.key]++ c.WriteOK() }) @@ -643,24 +787,24 @@ func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) { return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + start int + end int + } + + opts.key = args[0] + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - t, ok := db.keys[key] + t, ok := db.keys[opts.key] if !ok { c.WriteOK() return @@ -670,14 +814,14 @@ func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) { return } - l := db.listKeys[key] - rs, re := redisRange(len(l), start, end, false) + l := db.listKeys[opts.key] + rs, re := redisRange(len(l), opts.start, opts.end, false) l = l[rs:re] if len(l) == 0 { - db.del(key, true) + db.del(opts.key, true) } else { - db.listKeys[key] = l - db.keyVersion[key]++ + db.listKeys[opts.key] = l + db.keyVersion[opts.key]++ } c.WriteOK() }) @@ -730,39 +874,36 @@ func (m *Miniredis) cmdBrpoplpush(c *server.Peer, cmd string, args []string) { return } - src := args[0] - dst := args[1] - timeout, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidTimeout) - return + var opts struct { + src string + dst string + timeout time.Duration } - if timeout < 0 { - setDirty(c) - c.WriteError(msgNegTimeout) + opts.src = args[0] + opts.dst = args[1] + if ok := optDuration(c, args[2], &opts.timeout); !ok { return } blocking( m, c, - time.Duration(timeout)*time.Second, + opts.timeout, func(c *server.Peer, ctx *connCtx) bool { db := m.db(ctx.selectedDB) - if !db.exists(src) { + if !db.exists(opts.src) { return false } - if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") { + if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") { c.WriteError(msgWrongType) return true } - if len(db.listKeys[src]) == 0 { + if len(db.listKeys[opts.src]) == 0 { return false } - elem := db.listPop(src) - db.listLpush(dst, elem) + elem := db.listPop(opts.src) + db.listLpush(opts.dst, elem) c.WriteBulk(elem) return true }, @@ -787,34 +928,46 @@ func (m *Miniredis) cmdLmove(c *server.Peer, cmd string, args []string) { return } - src, dst, srcDir, dstDir := args[0], args[1], strings.ToLower(args[2]), strings.ToLower(args[3]) + opts := struct { + src string + dst string + srcDir string + dstDir string + }{ + src: args[0], + dst: args[1], + srcDir: strings.ToLower(args[2]), + dstDir: strings.ToLower(args[3]), + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(src) { + if !db.exists(opts.src) { c.WriteNull() return } - if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") { + if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") { c.WriteError(msgWrongType) return } var elem string - if srcDir == "left" { - elem = db.listLpop(src) - } else if srcDir == "right" { - elem = db.listPop(src) - } else { + switch opts.srcDir { + case "left": + elem = db.listLpop(opts.src) + case "right": + elem = db.listPop(opts.src) + default: c.WriteError(msgSyntaxError) return } - if dstDir == "left" { - db.listLpush(dst, elem) - } else if dstDir == "right" { - db.listPush(dst, elem) - } else { + switch opts.dstDir { + case "left": + db.listLpush(opts.dst, elem) + case "right": + db.listPush(opts.dst, elem) + default: c.WriteError(msgSyntaxError) return } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_server.go b/vendor/github.com/alicebob/miniredis/v2/cmd_server.go index 223651d39eb..6e51727ba59 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_server.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_server.go @@ -10,9 +10,11 @@ import ( ) func commandsServer(m *Miniredis) { + m.srv.Register("COMMAND", m.cmdCommand) m.srv.Register("DBSIZE", m.cmdDbsize) m.srv.Register("FLUSHALL", m.cmdFlushall) m.srv.Register("FLUSHDB", m.cmdFlushdb) + m.srv.Register("INFO", m.cmdInfo) m.srv.Register("TIME", m.cmdTime) } @@ -100,8 +102,8 @@ func (m *Miniredis) cmdTime(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { now := m.effectiveNow() nanos := now.UnixNano() - seconds := nanos / 1000000000 - microseconds := (nanos / 1000) % 1000000 + seconds := nanos / 1_000_000_000 + microseconds := (nanos / 1_000) % 1_000_000 c.WriteLen(2) c.WriteBulk(strconv.FormatInt(seconds, 10)) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_set.go b/vendor/github.com/alicebob/miniredis/v2/cmd_set.go index a9cdf411a0e..65b3b6bfbbd 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_set.go @@ -3,6 +3,7 @@ package miniredis import ( + "fmt" "strconv" "strings" @@ -609,17 +610,22 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { return } - key := args[0] - cursor, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) + var opts struct { + key string + value int + cursor int + count int + withMatch bool + match string + } + + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok { return } args = args[2:] + // MATCH and COUNT options - var withMatch bool - var match string for len(args) > 0 { if strings.ToLower(args[0]) == "count" { if len(args) < 2 { @@ -627,13 +633,18 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - _, err := strconv.Atoi(args[1]) - if err != nil { + count, err := strconv.Atoi(args[1]) + if err != nil || count < 0 { setDirty(c) c.WriteError(msgInvalidInt) return } - // We do nothing with count. + if count == 0 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + opts.count = count args = args[2:] continue } @@ -643,8 +654,8 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withMatch = true - match = args[1] + opts.withMatch = true + opts.match = args[1] args = args[2:] continue } @@ -656,29 +667,38 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) // return _all_ (matched) keys every time - - if cursor != 0 { + if db.exists(opts.key) && db.t(opts.key) != "set" { + c.WriteError(ErrWrongType.Error()) + return + } + members := db.setMembers(opts.key) + if opts.withMatch { + members, _ = matchKeys(members, opts.match) + } + low := opts.cursor + high := low + opts.count + // validate high is correct + if high > len(members) || high == 0 { + high = len(members) + } + if opts.cursor > high { // invalid cursor c.WriteLen(2) c.WriteBulk("0") // no next cursor c.WriteLen(0) // no elements return } - if db.exists(key) && db.t(key) != "set" { - c.WriteError(ErrWrongType.Error()) - return - } - - members := db.setMembers(key) - if withMatch { - members, _ = matchKeys(members, match) + cursorValue := low + opts.count + if cursorValue > len(members) { + cursorValue = 0 // no next cursor } - + members = members[low:high] c.WriteLen(2) - c.WriteBulk("0") // no next cursor + c.WriteBulk(fmt.Sprintf("%d", cursorValue)) c.WriteLen(len(members)) for _, k := range members { c.WriteBulk(k) } + }) } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go index 75b540bafbe..178fcc13e42 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go @@ -55,42 +55,44 @@ func (m *Miniredis) cmdZadd(c *server.Peer, cmd string, args []string) { return } - key, args := args[0], args[1:] - var ( - nx = false - xx = false - gt = false - lt = false - ch = false - incr = false - elems = map[string]float64{} - ) + var opts struct { + key string + nx bool + xx bool + gt bool + lt bool + ch bool + incr bool + } + elems := map[string]float64{} + opts.key = args[0] + args = args[1:] outer: for len(args) > 0 { switch strings.ToUpper(args[0]) { case "NX": - nx = true + opts.nx = true args = args[1:] continue case "XX": - xx = true + opts.xx = true args = args[1:] continue case "GT": - gt = true + opts.gt = true args = args[1:] continue case "LT": - lt = true + opts.lt = true args = args[1:] continue case "CH": - ch = true + opts.ch = true args = args[1:] continue case "INCR": - incr = true + opts.incr = true args = args[1:] continue default: @@ -114,21 +116,21 @@ outer: args = args[2:] } - if xx && nx { + if opts.xx && opts.nx { setDirty(c) c.WriteError(msgXXandNX) return } - if gt && lt || - gt && nx || - lt && nx { + if opts.gt && opts.lt || + opts.gt && opts.nx || + opts.lt && opts.nx { setDirty(c) c.WriteError(msgGTLTandNX) return } - if incr && len(elems) > 1 { + if opts.incr && len(elems) > 1 { setDirty(c) c.WriteError(msgSingleElementPair) return @@ -137,22 +139,22 @@ outer: withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if db.exists(key) && db.t(key) != "zset" { + if db.exists(opts.key) && db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - if incr { + if opts.incr { for member, delta := range elems { - if nx && db.ssetExists(key, member) { + if opts.nx && db.ssetExists(opts.key, member) { c.WriteNull() return } - if xx && !db.ssetExists(key, member) { + if opts.xx && !db.ssetExists(opts.key, member) { c.WriteNull() return } - newScore := db.ssetIncrby(key, member, delta) + newScore := db.ssetIncrby(opts.key, member, delta) c.WriteFloat(newScore) } return @@ -160,23 +162,23 @@ outer: res := 0 for member, score := range elems { - if nx && db.ssetExists(key, member) { + if opts.nx && db.ssetExists(opts.key, member) { continue } - if xx && !db.ssetExists(key, member) { + if opts.xx && !db.ssetExists(opts.key, member) { continue } - old := db.ssetScore(key, member) - if gt && score <= old { + old := db.ssetScore(opts.key, member) + if opts.gt && score <= old { continue } - if lt && score >= old { + if opts.lt && score >= old { continue } - if db.ssetAdd(key, score, member) { + if db.ssetAdd(opts.key, score, member) { res++ } else { - if ch && old != score { + if opts.ch && old != score { // if 'CH' is specified, only count changed keys res++ } @@ -233,14 +235,25 @@ func (m *Miniredis) cmdZcount(c *server.Peer, cmd string, args []string) { return } - key := args[0] - min, minIncl, err := parseFloatRange(args[1]) + var ( + opts struct { + key string + min float64 + minIncl bool + max float64 + maxIncl bool + } + err error + ) + + opts.key = args[0] + opts.min, opts.minIncl, err = parseFloatRange(args[1]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) return } - max, maxIncl, err := parseFloatRange(args[2]) + opts.max, opts.maxIncl, err = parseFloatRange(args[2]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) @@ -250,18 +263,18 @@ func (m *Miniredis) cmdZcount(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - members := db.ssetElements(key) - members = withSSRange(members, min, minIncl, max, maxIncl) + members := db.ssetElements(opts.key) + members = withSSRange(members, opts.min, opts.minIncl, opts.max, opts.maxIncl) c.WriteInt(len(members)) }) } @@ -280,23 +293,30 @@ func (m *Miniredis) cmdZincrby(c *server.Peer, cmd string, args []string) { return } - key := args[0] - delta, err := strconv.ParseFloat(args[1], 64) + var opts struct { + key string + delta float64 + member string + } + + opts.key = args[0] + d, err := strconv.ParseFloat(args[1], 64) if err != nil { setDirty(c) c.WriteError(msgInvalidFloat) return } - member := args[2] + opts.delta = d + opts.member = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if db.exists(key) && db.t(key) != "zset" { + if db.exists(opts.key) && db.t(opts.key) != "zset" { c.WriteError(msgWrongType) return } - newScore := db.ssetIncrby(key, member, delta) + newScore := db.ssetIncrby(opts.key, opts.member, opts.delta) c.WriteFloat(newScore) }) } @@ -889,37 +909,37 @@ func (m *Miniredis) cmdZremrangebyrank(c *server.Peer, cmd string, args []string return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + start int + end int + } + + opts.key = args[0] + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - members := db.ssetMembers(key) - rs, re := redisRange(len(members), start, end, false) + members := db.ssetMembers(opts.key) + rs, re := redisRange(len(members), opts.start, opts.end, false) for _, el := range members[rs:re] { - db.ssetRem(key, el) + db.ssetRem(opts.key, el) } c.WriteInt(re - rs) }) @@ -939,14 +959,24 @@ func (m *Miniredis) cmdZremrangebyscore(c *server.Peer, cmd string, args []strin return } - key := args[0] - min, minIncl, err := parseFloatRange(args[1]) + var ( + opts struct { + key string + min float64 + minIncl bool + max float64 + maxIncl bool + } + err error + ) + opts.key = args[0] + opts.min, opts.minIncl, err = parseFloatRange(args[1]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) return } - max, maxIncl, err := parseFloatRange(args[2]) + opts.max, opts.maxIncl, err = parseFloatRange(args[2]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) @@ -956,21 +986,21 @@ func (m *Miniredis) cmdZremrangebyscore(c *server.Peer, cmd string, args []strin withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - members := db.ssetElements(key) - members = withSSRange(members, min, minIncl, max, maxIncl) + members := db.ssetElements(opts.key) + members = withSSRange(members, opts.min, opts.minIncl, opts.max, opts.maxIncl) for _, el := range members { - db.ssetRem(key, el.member) + db.ssetRem(opts.key, el.member) } c.WriteInt(len(members)) }) @@ -1371,17 +1401,19 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { return } - key := args[0] - cursor, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) + var opts struct { + key string + cursor int + withMatch bool + match string + } + + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok { return } args = args[2:] // MATCH and COUNT options - var withMatch bool - var match string for len(args) > 0 { if strings.ToLower(args[0]) == "count" { if len(args) < 2 { @@ -1389,8 +1421,7 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - _, err := strconv.Atoi(args[1]) - if err != nil { + if _, err := strconv.Atoi(args[1]); err != nil { setDirty(c) c.WriteError(msgInvalidInt) return @@ -1405,8 +1436,8 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withMatch = true - match = args[1] + opts.withMatch = true + opts.match = args[1] args = args[2:] continue } @@ -1418,21 +1449,21 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) // Paging is not implementend, all results are returned for cursor 0. - if cursor != 0 { + if opts.cursor != 0 { // Invalid cursor. c.WriteLen(2) c.WriteBulk("0") // no next cursor c.WriteLen(0) // no elements return } - if db.exists(key) && db.t(key) != "zset" { + if db.exists(opts.key) && db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - members := db.ssetMembers(key) - if withMatch { - members, _ = matchKeys(members, match) + members := db.ssetMembers(opts.key) + if opts.withMatch { + members, _ = matchKeys(members, opts.match) } c.WriteLen(2) @@ -1441,7 +1472,7 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { c.WriteLen(len(members) * 2) for _, k := range members { c.WriteBulk(k) - c.WriteFloat(db.ssetScore(key, k)) + c.WriteFloat(db.ssetScore(opts.key, k)) } }) } @@ -1536,17 +1567,12 @@ func (m *Miniredis) cmdZrandmember(c *server.Peer, cmd string, args []string) { args = args[1:] if len(args) > 0 { - count := args[0] - args = args[1:] - - n, err := strconv.Atoi(count) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + // can be negative + if ok := optInt(c, args[0], &opts.count); !ok { return } opts.withCount = true - opts.count = n // can be negative + args = args[1:] } if len(args) > 0 && strings.ToUpper(args[0]) == "WITHSCORES" { diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go index bc0991ba504..be7067b3784 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go @@ -28,6 +28,7 @@ func commandsStream(m *Miniredis) { m.srv.Register("XPENDING", m.cmdXpending) m.srv.Register("XTRIM", m.cmdXtrim) m.srv.Register("XAUTOCLAIM", m.cmdXautoclaim) + m.srv.Register("XCLAIM", m.cmdXclaim) } // XADD @@ -168,26 +169,30 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { return } - var ( - key = args[0] - startKey = args[1] - endKey = args[2] + opts := struct { + key string + startKey string startExclusive bool + endKey string endExclusive bool - ) - if strings.HasPrefix(startKey, "(") { - startExclusive = true - startKey = startKey[1:] - if startKey == "-" || startKey == "+" { + }{ + key: args[0], + startKey: args[1], + endKey: args[2], + } + if strings.HasPrefix(opts.startKey, "(") { + opts.startExclusive = true + opts.startKey = opts.startKey[1:] + if opts.startKey == "-" || opts.startKey == "+" { setDirty(c) c.WriteError(msgInvalidStreamID) return } } - if strings.HasPrefix(endKey, "(") { - endExclusive = true - endKey = endKey[1:] - if endKey == "-" || endKey == "+" { + if strings.HasPrefix(opts.endKey, "(") { + opts.endExclusive = true + opts.endKey = opts.endKey[1:] + if opts.endKey == "-" || opts.endKey == "+" { setDirty(c) c.WriteError(msgInvalidStreamID) return @@ -205,12 +210,12 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - start, err := formatStreamRangeBound(startKey, true, reverse) + start, err := formatStreamRangeBound(opts.startKey, true, reverse) if err != nil { c.WriteError(msgInvalidStreamID) return } - end, err := formatStreamRangeBound(endKey, false, reverse) + end, err := formatStreamRangeBound(opts.endKey, false, reverse) if err != nil { c.WriteError(msgInvalidStreamID) return @@ -223,17 +228,17 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteLen(0) return } - if db.t(key) != "stream" { + if db.t(opts.key) != "stream" { c.WriteError(ErrWrongType.Error()) return } - var entries = db.streamKeys[key].entries + var entries = db.streamKeys[opts.key].entries if reverse { entries = reversedStreamEntries(entries) } @@ -270,11 +275,11 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { } // Continue if start exclusive and entry ID == start - if startExclusive && streamCmp(entry.ID, start) == 0 { + if opts.startExclusive && streamCmp(entry.ID, start) == 0 { continue } // Continue if end exclusive and entry ID == end - if endExclusive && streamCmp(entry.ID, end) == 0 { + if opts.endExclusive && streamCmp(entry.ID, end) == 0 { continue } @@ -296,19 +301,44 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { // XGROUP func (m *Miniredis) cmdXgroup(c *server.Peer, cmd string, args []string) { - if (len(args) == 4 || len(args) == 5) && strings.ToUpper(args[0]) == "CREATE" { + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + subCmd, args := strings.ToUpper(args[0]), args[1:] + switch subCmd { + case "CREATE": m.cmdXgroupCreate(c, cmd, args) - } else { - j := strings.Join(args, " ") - err := fmt.Sprintf("ERR 'XGROUP %s' not supported", j) + case "DESTROY": + m.cmdXgroupDestroy(c, cmd, args) + case "CREATECONSUMER": + m.cmdXgroupCreateconsumer(c, cmd, args) + case "DELCONSUMER": + m.cmdXgroupDelconsumer(c, cmd, args) + case "HELP", + "SETID": + err := fmt.Sprintf("ERR 'XGROUP %s' not supported", subCmd) setDirty(c) c.WriteError(err) + default: + setDirty(c) + c.WriteError(fmt.Sprintf( + "ERR Unknown subcommand or wrong number of arguments for '%s'. Try XGROUP HELP.", + subCmd, + )) } } // XGROUP CREATE func (m *Miniredis) cmdXgroupCreate(c *server.Peer, cmd string, args []string) { - stream, group, id := args[1], args[2], args[3] + if len(args) != 3 && len(args) != 4 { + setDirty(c) + c.WriteError(errWrongNumber("CREATE")) + return + } + stream, group, id := args[0], args[1], args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -318,7 +348,7 @@ func (m *Miniredis) cmdXgroupCreate(c *server.Peer, cmd string, args []string) { c.WriteError(err.Error()) return } - if s == nil && len(args) == 5 && strings.ToUpper(args[4]) == "MKSTREAM" { + if s == nil && len(args) == 4 && strings.ToUpper(args[3]) == "MKSTREAM" { if s, err = db.newStream(stream); err != nil { c.WriteError(err.Error()) return @@ -338,6 +368,124 @@ func (m *Miniredis) cmdXgroupCreate(c *server.Peer, cmd string, args []string) { }) } +// XGROUP DESTROY +func (m *Miniredis) cmdXgroupDestroy(c *server.Peer, cmd string, args []string) { + if len(args) != 2 { + setDirty(c) + c.WriteError(errWrongNumber("DESTROY")) + return + } + stream, groupName := args[0], args[1] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(stream) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgXgroupKeyNotFound) + return + } + + if _, ok := s.groups[groupName]; !ok { + c.WriteInt(0) + return + } + delete(s.groups, groupName) + c.WriteInt(1) + }) +} + +// XGROUP CREATECONSUMER +func (m *Miniredis) cmdXgroupCreateconsumer(c *server.Peer, cmd string, args []string) { + if len(args) != 3 { + setDirty(c) + c.WriteError(errWrongNumber("CREATECONSUMER")) + return + } + key, groupName, consumerName := args[0], args[1], args[2] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgXgroupKeyNotFound) + return + } + + g, ok := s.groups[groupName] + if !ok { + err := fmt.Sprintf("NOGROUP No such consumer group '%s' for key name '%s'", groupName, key) + c.WriteError(err) + return + } + + if _, ok = g.consumers[consumerName]; ok { + c.WriteInt(0) + return + } + g.consumers[consumerName] = &consumer{} + c.WriteInt(1) + }) +} + +// XGROUP DELCONSUMER +func (m *Miniredis) cmdXgroupDelconsumer(c *server.Peer, cmd string, args []string) { + if len(args) != 3 { + setDirty(c) + c.WriteError(errWrongNumber("DELCONSUMER")) + return + } + key, groupName, consumerName := args[0], args[1], args[2] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgXgroupKeyNotFound) + return + } + + g, ok := s.groups[groupName] + if !ok { + err := fmt.Sprintf("NOGROUP No such consumer group '%s' for key name '%s'", groupName, key) + c.WriteError(err) + return + } + + consumer, ok := g.consumers[consumerName] + if !ok { + c.WriteInt(0) + return + } + defer delete(g.consumers, consumerName) + + if consumer.numPendingEntries > 0 { + newPending := make([]pendingEntry, 0) + for _, entry := range g.pending { + if entry.consumer != consumerName { + newPending = append(newPending, entry) + } + } + g.pending = newPending + } + c.WriteInt(consumer.numPendingEntries) + }) +} + // XINFO func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { if len(args) < 1 { @@ -349,7 +497,11 @@ func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { switch subCmd { case "STREAM": m.cmdXinfoStream(c, args) - case "CONSUMERS", "GROUPS", "HELP": + case "CONSUMERS": + m.cmdXinfoConsumers(c, args) + case "GROUPS": + m.cmdXinfoGroups(c, args) + case "HELP": err := fmt.Sprintf("'XINFO %s' not supported", strings.Join(args, " ")) setDirty(c) c.WriteError(err) @@ -360,7 +512,6 @@ func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { subCmd, )) } - } // XINFO STREAM @@ -368,10 +519,11 @@ func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { func (m *Miniredis) cmdXinfoStream(c *server.Peer, args []string) { if len(args) < 1 { setDirty(c) - c.WriteError(errWrongNumber("XINFO")) + c.WriteError(errWrongNumber("STREAM")) return } key := args[0] + withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -391,6 +543,100 @@ func (m *Miniredis) cmdXinfoStream(c *server.Peer, args []string) { }) } +// XINFO GROUPS +func (m *Miniredis) cmdXinfoGroups(c *server.Peer, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber("GROUPS")) + return + } + key := args[0] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgKeyNotFound) + return + } + + c.WriteLen(len(s.groups)) + for name, g := range s.groups { + c.WriteMapLen(4) + + c.WriteBulk("name") + c.WriteBulk(name) + + c.WriteBulk("consumers") + c.WriteInt(len(g.consumers)) + + c.WriteBulk("pending") + c.WriteInt(len(g.pending)) + + c.WriteBulk("last-delivered-id") + c.WriteBulk(g.lastID) + } + }) +} + +// XINFO CONSUMERS +// Please note that this is only a partial implementation, for it does not +// return each consumer's "idle" value, which indicates "the number of +// milliseconds that have passed since the consumer last interacted with the +// server." +func (m *Miniredis) cmdXinfoConsumers(c *server.Peer, args []string) { + if len(args) != 2 { + setDirty(c) + c.WriteError(errWrongNumber("CONSUMERS")) + return + } + key := args[0] + groupName := args[1] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgKeyNotFound) + return + } + + g, ok := s.groups[groupName] + if !ok { + err := fmt.Sprintf("NOGROUP No such consumer group '%s' for key name '%s'", groupName, key) + c.WriteError(err) + return + } + + consumerNames := make([]string, 0) + for name := range g.consumers { + consumerNames = append(consumerNames, name) + } + sort.Strings(consumerNames) + + c.WriteLen(len(consumerNames)) + for _, name := range consumerNames { + c.WriteMapLen(2) + + c.WriteBulk("name") + c.WriteBulk(name) + + c.WriteBulk("pending") + c.WriteInt(g.consumers[name].numPendingEntries) + } + }) +} + // XREADGROUP func (m *Miniredis) cmdXreadgroup(c *server.Peer, cmd string, args []string) { // XREADGROUP GROUP group consumer STREAMS key ID @@ -638,14 +884,16 @@ func (m *Miniredis) cmdXread(c *server.Peer, cmd string, args []string) { return } - var opts struct { - count int - streams []string - ids []string - block bool - blockTimeout time.Duration - } - var err error + var ( + opts struct { + count int + streams []string + ids []string + block bool + blockTimeout time.Duration + } + err error + ) parsing: for len(args) > 0 { @@ -676,11 +924,14 @@ parsing: } opts.streams, opts.ids = args[0:len(args)/2], args[len(args)/2:] - for _, id := range opts.ids { + for i, id := range opts.ids { if _, err := parseStreamID(id); id != `$` && err != nil { setDirty(c) c.WriteError(msgInvalidStreamID) return + } else if id == "$" { + db := m.DB(getCtx(c).selectedDB) + opts.ids[i] = db.streamKeys[opts.streams[i]].lastID() } } args = nil @@ -797,45 +1048,61 @@ func (m *Miniredis) cmdXpending(c *server.Peer, cmd string, args []string) { return } - key, group, args := args[0], args[1], args[2:] - summary := true - if len(args) > 0 && strings.ToUpper(args[0]) == "IDLE" { - setDirty(c) - c.WriteError("ERR IDLE is unsupported") - return - } - var ( + var opts struct { + key string + group string + summary bool + idle time.Duration start, end string count int consumer *string - ) + } + + opts.key, opts.group, args = args[0], args[1], args[2:] + opts.summary = true if len(args) >= 3 { - summary = false + opts.summary = false + + if strings.ToUpper(args[0]) == "IDLE" { + idleMs, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + opts.idle = time.Duration(idleMs) * time.Millisecond - start_, err := formatStreamRangeBound(args[0], true, false) + args = args[2:] + if len(args) < 3 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + } + + var err error + opts.start, err = formatStreamRangeBound(args[0], true, false) if err != nil { + setDirty(c) c.WriteError(msgInvalidStreamID) return } - start = start_ - end_, err := formatStreamRangeBound(args[1], false, false) + opts.end, err = formatStreamRangeBound(args[1], false, false) if err != nil { + setDirty(c) c.WriteError(msgInvalidStreamID) return } - end = end_ - n, err := strconv.Atoi(args[2]) // negative is allowed + opts.count, err = strconv.Atoi(args[2]) // negative is allowed if err != nil { + setDirty(c) c.WriteError(msgInvalidInt) return } - count = n args = args[3:] if len(args) == 1 { - var c string - c, args = args[0], args[1:] - consumer = &c + opts.consumer, args = &args[0], args[1:] } } if len(args) != 0 { @@ -846,21 +1113,21 @@ func (m *Miniredis) cmdXpending(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - g, err := db.streamGroup(key, group) + g, err := db.streamGroup(opts.key, opts.group) if err != nil { c.WriteError(err.Error()) return } if g == nil { - c.WriteError(errReadgroup(key, group).Error()) + c.WriteError(errReadgroup(opts.key, opts.group).Error()) return } - if summary { + if opts.summary { writeXpendingSummary(c, *g) return } - writeXpending(m.effectiveNow(), c, *g, start, end, count, consumer) + writeXpending(m.effectiveNow(), c, *g, opts.idle, opts.start, opts.end, opts.count, opts.consumer) }) } @@ -907,6 +1174,7 @@ func writeXpending( now time.Time, c *server.Peer, g streamGroup, + idle time.Duration, start, end string, count int, @@ -942,12 +1210,19 @@ func writeXpending( if streamCmp(p.id, end) > 0 { continue } - res = append(res, entry{ - id: p.id, - consumer: p.consumer, - millis: int(now.Sub(p.lastDelivery).Milliseconds()), - count: p.deliveryCount, - }) + timeSinceLastDelivery := now.Sub(p.lastDelivery) + if timeSinceLastDelivery >= idle { + res = append(res, entry{ + id: p.id, + consumer: p.consumer, + millis: int(timeSinceLastDelivery.Milliseconds()), + count: p.deliveryCount, + }) + } + } + if len(res) == 0 { + c.WriteLen(-1) + return } c.WriteLen(len(res)) for _, e := range res { @@ -1075,27 +1350,35 @@ func (m *Miniredis) cmdXautoclaim(c *server.Peer, cmd string, args []string) { return } - key, group, consumer := args[0], args[1], args[2] + var opts struct { + key string + group string + consumer string + minIdleTime time.Duration + start string + justId bool + count int + } + opts.key, opts.group, opts.consumer = args[0], args[1], args[2] n, err := strconv.Atoi(args[3]) if err != nil { setDirty(c) c.WriteError("ERR Invalid min-idle-time argument for XAUTOCLAIM") return } - minIdleTime := time.Millisecond * time.Duration(n) + opts.minIdleTime = time.Millisecond * time.Duration(n) start_, err := formatStreamRangeBound(args[4], true, false) if err != nil { c.WriteError(msgInvalidStreamID) return } - start := start_ + opts.start = start_ args = args[5:] - count := 100 - var justId bool + opts.count = 100 parsing: for len(args) > 0 { switch strings.ToUpper(args[0]) { @@ -1105,7 +1388,7 @@ parsing: break parsing } - count, err = strconv.Atoi(args[1]) + opts.count, err = strconv.Atoi(args[1]) if err != nil { break parsing } @@ -1113,7 +1396,7 @@ parsing: args = args[2:] case "JUSTID": args = args[1:] - justId = true + opts.justId = true default: err = errors.New(msgSyntaxError) break parsing @@ -1128,18 +1411,18 @@ parsing: withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - g, err := db.streamGroup(key, group) + g, err := db.streamGroup(opts.key, opts.group) if err != nil { c.WriteError(err.Error()) return } if g == nil { - c.WriteError(errReadgroup(key, group).Error()) + c.WriteError(errReadgroup(opts.key, opts.group).Error()) return } - nextCallId, entries := xautoclaim(m.effectiveNow(), *g, minIdleTime, start, count, consumer) - writeXautoclaim(c, nextCallId, entries, justId) + nextCallId, entries := xautoclaim(m.effectiveNow(), *g, opts.minIdleTime, opts.start, opts.count, opts.consumer) + writeXautoclaim(c, nextCallId, entries, opts.justId) }) } @@ -1162,8 +1445,13 @@ func xautoclaim( if minIdleTime > 0 && now.Before(p.lastDelivery.Add(minIdleTime)) { continue } - g.consumers[consumerID] = consumer{} + + prevConsumerID := p.consumer + if _, ok := g.consumers[consumerID]; !ok { + g.consumers[consumerID] = &consumer{} + } p.consumer = consumerID + _, entry := g.stream.get(p.id) // not found. Weird? if entry == nil { @@ -1172,8 +1460,13 @@ func xautoclaim( // (Introduced in Redis 7.0) continue } + p.deliveryCount += 1 p.lastDelivery = now + + g.consumers[prevConsumerID].numPendingEntries-- + g.consumers[consumerID].numPendingEntries++ + msgs[i] = p res = append(res, *entry) @@ -1206,6 +1499,192 @@ func writeXautoclaim(c *server.Peer, nextCallId string, res []StreamEntry, justI } } +// XCLAIM +func (m *Miniredis) cmdXclaim(c *server.Peer, cmd string, args []string) { + if len(args) < 5 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + var opts struct { + key string + groupName string + consumerName string + minIdleTime time.Duration + newLastDelivery time.Time + ids []string + retryCount *int + force bool + justId bool + } + + opts.key, opts.groupName, opts.consumerName = args[0], args[1], args[2] + + minIdleTimeMillis, err := strconv.Atoi(args[3]) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid min-idle-time argument for XCLAIM") + return + } + opts.minIdleTime = time.Millisecond * time.Duration(minIdleTimeMillis) + + opts.newLastDelivery = m.effectiveNow() + opts.ids = append(opts.ids, args[4]) + + args = args[5:] + for len(args) > 0 { + arg := strings.ToUpper(args[0]) + if arg == "IDLE" || + arg == "TIME" || + arg == "RETRYCOUNT" || + arg == "FORCE" || + arg == "JUSTID" { + break + } + opts.ids = append(opts.ids, arg) + args = args[1:] + } + + for len(args) > 0 { + arg := strings.ToUpper(args[0]) + switch arg { + case "IDLE": + idleMs, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid IDLE option argument for XCLAIM") + return + } + if idleMs < 0 { + idleMs = 0 + } + opts.newLastDelivery = m.effectiveNow().Add(time.Millisecond * time.Duration(-idleMs)) + args = args[2:] + case "TIME": + timeMs, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid TIME option argument for XCLAIM") + return + } + opts.newLastDelivery = unixMilli(timeMs) + args = args[2:] + case "RETRYCOUNT": + retryCount, err := strconv.Atoi(args[1]) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid RETRYCOUNT option argument for XCLAIM") + return + } + opts.retryCount = &retryCount + args = args[2:] + case "FORCE": + opts.force = true + args = args[1:] + case "JUSTID": + opts.justId = true + args = args[1:] + default: + setDirty(c) + c.WriteError(fmt.Sprintf("ERR Unrecognized XCLAIM option '%s'", args[0])) + return + } + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + g, err := db.streamGroup(opts.key, opts.groupName) + if err != nil { + c.WriteError(err.Error()) + return + } + if g == nil { + c.WriteError(errReadgroup(opts.key, opts.groupName).Error()) + return + } + + claimedEntryIDs := m.xclaim(g, opts.consumerName, opts.minIdleTime, opts.newLastDelivery, opts.ids, opts.retryCount, opts.force) + writeXclaim(c, g.stream, claimedEntryIDs, opts.justId) + }) +} + +func (m *Miniredis) xclaim( + group *streamGroup, + consumerName string, + minIdleTime time.Duration, + newLastDelivery time.Time, + ids []string, + retryCount *int, + force bool, +) (claimedEntryIDs []string) { + for _, id := range ids { + pelPos, pelEntry := group.searchPending(id) + if pelEntry == nil { + if !force { + continue + } + + if pelPos < len(group.pending) { + group.pending = append(group.pending[:pelPos+1], group.pending[pelPos:]...) + } else { + group.pending = append(group.pending, pendingEntry{}) + } + pelEntry = &group.pending[pelPos] + + *pelEntry = pendingEntry{ + id: id, + consumer: consumerName, + deliveryCount: 1, + } + } else { + group.consumers[pelEntry.consumer].numPendingEntries-- + pelEntry.consumer = consumerName + } + + if retryCount != nil { + pelEntry.deliveryCount = *retryCount + } else { + pelEntry.deliveryCount++ + } + pelEntry.lastDelivery = newLastDelivery + + claimedEntryIDs = append(claimedEntryIDs, id) + } + if len(claimedEntryIDs) == 0 { + return + } + + if _, ok := group.consumers[consumerName]; !ok { + group.consumers[consumerName] = &consumer{} + } + consumer := group.consumers[consumerName] + consumer.numPendingEntries += len(claimedEntryIDs) + + return +} + +func writeXclaim(c *server.Peer, stream *streamKey, claimedEntryIDs []string, justId bool) { + c.WriteLen(len(claimedEntryIDs)) + for _, id := range claimedEntryIDs { + if justId { + c.WriteBulk(id) + continue + } + + _, entry := stream.get(id) + if entry == nil { + c.WriteNull() + continue + } + + c.WriteLen(2) + c.WriteBulk(entry.ID) + c.WriteStrings(entry.Values) + } +} + func parseBlock(cmd string, args []string, block *bool, timeout *time.Duration) error { if len(args) < 2 { return errors.New(errWrongNumber(cmd)) @@ -1221,3 +1700,8 @@ func parseBlock(cmd string, args []string, block *bool, timeout *time.Duration) (*timeout) = time.Millisecond * time.Duration(ms) return nil } + +// taken from Go's time package. Can be dropped if miniredis supports >= 1.17 +func unixMilli(msec int64) time.Time { + return time.Unix(msec/1e3, (msec%1e3)*1e6) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_string.go b/vendor/github.com/alicebob/miniredis/v2/cmd_string.go index 29c2c22734b..cec9d48343c 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_string.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_string.go @@ -227,26 +227,29 @@ func (m *Miniredis) cmdPsetex(c *server.Peer, cmd string, args []string) { return } - key := args[0] - ttl, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + ttl int + value string + } + + opts.key = args[0] + if ok := optInt(c, args[1], &opts.ttl); !ok { return } - if ttl <= 0 { + if opts.ttl <= 0 { setDirty(c) c.WriteError(msgInvalidPSETEXTime) return } - value := args[2] + opts.value = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - db.del(key, true) // Clear any existing keys. - db.stringSet(key, value) - db.ttl[key] = time.Duration(ttl) * time.Millisecond + db.del(opts.key, true) // Clear any existing keys. + db.stringSet(opts.key, opts.value) + db.ttl[opts.key] = time.Duration(opts.ttl) * time.Millisecond c.WriteOK() }) } @@ -634,23 +637,24 @@ func (m *Miniredis) cmdIncrby(c *server.Peer, cmd string, args []string) { return } - key := args[0] - delta, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + delta int + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.delta); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v, err := db.stringIncr(key, delta) + v, err := db.stringIncr(opts.key, opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -746,23 +750,24 @@ func (m *Miniredis) cmdDecrby(c *server.Peer, cmd string, args []string) { return } - key := args[0] - delta, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + delta int + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.delta); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v, err := db.stringIncr(key, -delta) + v, err := db.stringIncr(opts.key, -opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -845,30 +850,29 @@ func (m *Miniredis) cmdGetrange(c *server.Peer, cmd string, args []string) { return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + start int + end int + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v := db.stringKeys[key] - c.WriteBulk(withRange(v, start, end)) + v := db.stringKeys[opts.key] + c.WriteBulk(withRange(v, opts.start, opts.end)) }) } @@ -886,36 +890,39 @@ func (m *Miniredis) cmdSetrange(c *server.Peer, cmd string, args []string) { return } - key := args[0] - pos, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + pos int + subst string + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.pos); !ok { return } - if pos < 0 { + if opts.pos < 0 { setDirty(c) c.WriteError("ERR offset is out of range") return } - subst := args[2] + opts.subst = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v := []byte(db.stringKeys[key]) - if len(v) < pos+len(subst) { - newV := make([]byte, pos+len(subst)) + v := []byte(db.stringKeys[opts.key]) + end := opts.pos + len(opts.subst) + if len(v) < end { + newV := make([]byte, end) copy(newV, v) v = newV } - copy(v[pos:pos+len(subst)], subst) - db.stringSet(key, string(v)) + copy(v[opts.pos:end], opts.subst) + db.stringSet(opts.key, string(v)) c.WriteInt(len(v)) }) } @@ -935,28 +942,20 @@ func (m *Miniredis) cmdBitcount(c *server.Peer, cmd string, args []string) { } var opts struct { - useRange bool - start, end int - key string + useRange bool + start int + end int + key string } opts.key, args = args[0], args[1:] if len(args) >= 2 { opts.useRange = true - var err error - n, err := strconv.Atoi(args[0]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[0], &opts.start); !ok { return } - opts.start = n - n, err = strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[1], &opts.end); !ok { return } - opts.end = n args = args[2:] } @@ -1001,25 +1000,28 @@ func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) { return } - var ( - op = strings.ToUpper(args[0]) - target = args[1] - input = args[2:] - ) + var opts struct { + op string + target string + input []string + } + opts.op = strings.ToUpper(args[0]) + opts.target = args[1] + opts.input = args[2:] // 'op' is tested when the transaction is executed. withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - switch op { + switch opts.op { case "AND", "OR", "XOR": - first := input[0] + first := opts.input[0] if t, ok := db.keys[first]; ok && t != "string" { c.WriteError(msgWrongType) return } res := []byte(db.stringKeys[first]) - for _, vk := range input[1:] { + for _, vk := range opts.input[1:] { if t, ok := db.keys[vk]; ok && t != "string" { c.WriteError(msgWrongType) return @@ -1029,23 +1031,23 @@ func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) { "AND": func(a, b byte) byte { return a & b }, "OR": func(a, b byte) byte { return a | b }, "XOR": func(a, b byte) byte { return a ^ b }, - }[op] + }[opts.op] res = sliceBinOp(cb, res, []byte(v)) } - db.del(target, false) // Keep TTL + db.del(opts.target, false) // Keep TTL if len(res) == 0 { - db.del(target, true) + db.del(opts.target, true) } else { - db.stringSet(target, string(res)) + db.stringSet(opts.target, string(res)) } c.WriteInt(len(res)) case "NOT": // NOT only takes a single argument. - if len(input) != 1 { + if len(opts.input) != 1 { c.WriteError("ERR BITOP NOT must be called with a single source key.") return } - key := input[0] + key := opts.input[0] if t, ok := db.keys[key]; ok && t != "string" { c.WriteError(msgWrongType) return @@ -1054,11 +1056,11 @@ func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) { for i := range value { value[i] = ^value[i] } - db.del(target, false) // Keep TTL + db.del(opts.target, false) // Keep TTL if len(value) == 0 { - db.del(target, true) + db.del(opts.target, true) } else { - db.stringSet(target, string(value)) + db.stringSet(opts.target, string(value)) } c.WriteInt(len(value)) default: @@ -1182,9 +1184,15 @@ func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) { return } - key := args[0] - bit, err := strconv.Atoi(args[1]) - if err != nil { + var opts struct { + key string + bit int + } + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.bit, "ERR bit offset is not an integer or out of range"); !ok { + return + } + if opts.bit < 0 { setDirty(c) c.WriteError("ERR bit offset is not an integer or out of range") return @@ -1193,13 +1201,13 @@ func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - value := db.stringKeys[key] + value := db.stringKeys[opts.key] - ourByteNr := bit / 8 + ourByteNr := opts.bit / 8 var ourByte byte if ourByteNr > len(value)-1 { ourByte = '\x00' @@ -1207,7 +1215,7 @@ func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) { ourByte = value[ourByteNr] } res := 0 - if toBits(ourByte)[bit%8] { + if toBits(ourByte)[opts.bit%8] { res = 1 } c.WriteInt(res) @@ -1228,15 +1236,24 @@ func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) { return } - key := args[0] - bit, err := strconv.Atoi(args[1]) - if err != nil || bit < 0 { + var opts struct { + key string + bit int + newBit int + } + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.bit, "ERR bit offset is not an integer or out of range"); !ok { + return + } + if opts.bit < 0 { setDirty(c) c.WriteError("ERR bit offset is not an integer or out of range") return } - newBit, err := strconv.Atoi(args[2]) - if err != nil || (newBit != 0 && newBit != 1) { + if ok := optIntErr(c, args[2], &opts.newBit, "ERR bit is not an integer or out of range"); !ok { + return + } + if opts.newBit != 0 && opts.newBit != 1 { setDirty(c) c.WriteError("ERR bit is not an integer or out of range") return @@ -1245,14 +1262,14 @@ func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - value := []byte(db.stringKeys[key]) + value := []byte(db.stringKeys[opts.key]) - ourByteNr := bit / 8 - ourBitNr := bit % 8 + ourByteNr := opts.bit / 8 + ourBitNr := opts.bit % 8 if ourByteNr > len(value)-1 { // Too short. Expand. newValue := make([]byte, ourByteNr+1) @@ -1263,12 +1280,12 @@ func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) { if toBits(value[ourByteNr])[ourBitNr] { old = 1 } - if newBit == 0 { + if opts.newBit == 0 { value[ourByteNr] &^= 1 << uint8(7-ourBitNr) } else { value[ourByteNr] |= 1 << uint8(7-ourBitNr) } - db.stringSet(key, string(value)) + db.stringSet(opts.key, string(value)) c.WriteInt(old) }) diff --git a/vendor/github.com/alicebob/miniredis/v2/geo.go b/vendor/github.com/alicebob/miniredis/v2/geo.go index bc8e929270b..3028a167010 100644 --- a/vendor/github.com/alicebob/miniredis/v2/geo.go +++ b/vendor/github.com/alicebob/miniredis/v2/geo.go @@ -21,12 +21,10 @@ func hsin(theta float64) float64 { } // distance function returns the distance (in meters) between two points of -// a given longitude and latitude relatively accurately (using a spherical -// approximation of the Earth) through the Haversin Distance Formula for -// great arc distance on a sphere with accuracy for small distances -// +// a given longitude and latitude relatively accurately (using a spherical +// approximation of the Earth) through the Haversin Distance Formula for +// great arc distance on a sphere with accuracy for small distances // point coordinates are supplied in degrees and converted into rad. in the func -// // distance returned is meters // http://en.wikipedia.org/wiki/Haversine_formula // Source: https://gist.github.com/cdipaolo/d3f8db3848278b49db68 diff --git a/vendor/github.com/alicebob/miniredis/v2/lua.go b/vendor/github.com/alicebob/miniredis/v2/lua.go index 42222dce8e3..bc86eed7d46 100644 --- a/vendor/github.com/alicebob/miniredis/v2/lua.go +++ b/vendor/github.com/alicebob/miniredis/v2/lua.go @@ -217,6 +217,8 @@ func redisToLua(l *lua.LState, res []interface{}) *lua.LTable { v = lua.LFalse } else { switch et := e.(type) { + case int: + v = lua.LNumber(et) case int64: v = lua.LNumber(et) case []uint8: diff --git a/vendor/github.com/alicebob/miniredis/v2/miniredis.go b/vendor/github.com/alicebob/miniredis/v2/miniredis.go index 697dec0652d..c86e2fe8b30 100644 --- a/vendor/github.com/alicebob/miniredis/v2/miniredis.go +++ b/vendor/github.com/alicebob/miniredis/v2/miniredis.go @@ -13,7 +13,6 @@ // // For direct use you can select a Redis database with either `s.Select(12); // s.Get("foo")` or `s.DB(12).Get("foo")`. -// package miniredis import ( @@ -194,7 +193,6 @@ func (m *Miniredis) start(s *server.Server) error { commandsScripting(m) commandsGeo(m) commandsCluster(m) - commandsCommand(m) commandsHll(m) return nil @@ -347,9 +345,9 @@ func (m *Miniredis) Server() *server.Server { // Dump limits the maximum length of each key:value to "DumpMaxLineLen" characters. // To increase that, call something like: // -// miniredis.DumpMaxLineLen = 1024 -// mr, _ = miniredis.Run() -// mr.Dump() +// miniredis.DumpMaxLineLen = 1024 +// mr, _ = miniredis.Run() +// mr.Dump() func (m *Miniredis) Dump() string { m.Lock() defer m.Unlock() @@ -419,8 +417,10 @@ func (m *Miniredis) SetTime(t time.Time) { } // make every command return this message. For example: -// LOADING Redis is loading the dataset in memory -// MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'. +// +// LOADING Redis is loading the dataset in memory +// MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'. +// // Clear it with an empty string. Don't add newlines. func (m *Miniredis) SetError(msg string) { cb := server.Hook(nil) @@ -433,6 +433,18 @@ func (m *Miniredis) SetError(msg string) { m.srv.SetPreHook(cb) } +// isValidCMD returns true if command is valid and can be executed. +func (m *Miniredis) isValidCMD(c *server.Peer, cmd string) bool { + if !m.handleAuth(c) { + return false + } + if m.checkPubsub(c, cmd) { + return false + } + + return true +} + // handleAuth returns false if connection has no access. It sends the reply. func (m *Miniredis) handleAuth(c *server.Peer) bool { if getCtx(c).nested { diff --git a/vendor/github.com/alicebob/miniredis/v2/opts.go b/vendor/github.com/alicebob/miniredis/v2/opts.go index 016d2682000..de91386015d 100644 --- a/vendor/github.com/alicebob/miniredis/v2/opts.go +++ b/vendor/github.com/alicebob/miniredis/v2/opts.go @@ -1,7 +1,9 @@ package miniredis import ( + "math" "strconv" + "time" "github.com/alicebob/miniredis/v2/server" ) @@ -10,12 +12,33 @@ import ( // Writes "invalid integer" error to c if it's not a valid integer. Returns // whether or not things were okay. func optInt(c *server.Peer, src string, dest *int) bool { + return optIntErr(c, src, dest, msgInvalidInt) +} + +func optIntErr(c *server.Peer, src string, dest *int, errMsg string) bool { n, err := strconv.Atoi(src) if err != nil { setDirty(c) - c.WriteError(msgInvalidInt) + c.WriteError(errMsg) return false } *dest = n return true } + +func optDuration(c *server.Peer, src string, dest *time.Duration) bool { + n, err := strconv.ParseFloat(src, 64) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidTimeout) + return false + } + if n < 0 || math.IsInf(n, 0) { + setDirty(c) + c.WriteError(msgNegTimeout) + return false + } + + *dest = time.Duration(n*1_000_000) * time.Microsecond + return true +} diff --git a/vendor/github.com/alicebob/miniredis/v2/redis.go b/vendor/github.com/alicebob/miniredis/v2/redis.go index d4a0cd8d441..7e78ee47066 100644 --- a/vendor/github.com/alicebob/miniredis/v2/redis.go +++ b/vendor/github.com/alicebob/miniredis/v2/redis.go @@ -49,6 +49,9 @@ const ( msgXtrimInvalidLimit = "ERR syntax error, LIMIT cannot be used without the special ~ option" msgDBIndexOutOfRange = "ERR DB index is out of range" msgLimitCombination = "ERR syntax error, LIMIT is only supported in combination with either BYSCORE or BYLEX" + msgRankIsZero = "ERR RANK can't be zero: use 1 to start from the first match, 2 from the second ... or use negative to start from the end of the list" + msgCountIsNegative = "ERR COUNT can't be negative" + msgMaxLengthIsNegative = "ERR MAXLEN can't be negative" ) func errWrongNumber(cmd string) string { @@ -134,14 +137,19 @@ func blocking( m.Lock() defer m.Unlock() for { - done := cb(c, ctx) - if done { + if c.Closed() { return } if m.Ctx.Err() != nil { return } + + done := cb(c, ctx) + if done { + return + } + if timedOut { onTimeout(c) return diff --git a/vendor/github.com/alicebob/miniredis/v2/server/server.go b/vendor/github.com/alicebob/miniredis/v2/server/server.go index 60e391f22bd..ee4f04c2186 100644 --- a/vendor/github.com/alicebob/miniredis/v2/server/server.go +++ b/vendor/github.com/alicebob/miniredis/v2/server/server.go @@ -158,24 +158,34 @@ func (s *Server) servePeer(c net.Conn) { peer := &Peer{ w: bufio.NewWriter(c), } + defer func() { for _, f := range peer.onDisconnect { f() } }() - for { - args, err := readArray(r) - if err != nil { - return + readCh := make(chan []string) + + go func() { + defer close(readCh) + + for { + args, err := readArray(r) + if err != nil { + peer.Close() + return + } + + readCh <- args } + }() + + for args := range readCh { s.Dispatch(peer, args) peer.Flush() - s.mu.Lock() - closed := peer.closed - s.mu.Unlock() - if closed { + if peer.Closed() { c.Close() } } @@ -259,6 +269,13 @@ func (c *Peer) Close() { c.closed = true } +// Return true if the peer connection closed. +func (c *Peer) Closed() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.closed +} + // Register a function to execute on disconnect. There can be multiple // functions registered. func (c *Peer) OnDisconnect(f func()) { diff --git a/vendor/github.com/alicebob/miniredis/v2/stream.go b/vendor/github.com/alicebob/miniredis/v2/stream.go index 574f9016dcf..75848d02803 100644 --- a/vendor/github.com/alicebob/miniredis/v2/stream.go +++ b/vendor/github.com/alicebob/miniredis/v2/stream.go @@ -9,6 +9,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" ) @@ -17,6 +18,7 @@ type streamKey struct { entries []StreamEntry groups map[string]*streamGroup lastAllocatedID string + mu sync.Mutex } // a StreamEntry is an entry in a stream. The ID is always of the form @@ -31,10 +33,11 @@ type streamGroup struct { stream *streamKey lastID string pending []pendingEntry - consumers map[string]consumer + consumers map[string]*consumer } type consumer struct { + numPendingEntries int // TODO: "last seen" timestamp } @@ -51,6 +54,7 @@ func newStreamKey() *streamKey { } } +// generateID doesn't lock the mutex func (s *streamKey) generateID(now time.Time) string { ts := uint64(now.UnixNano()) / 1_000_000 @@ -60,7 +64,7 @@ func (s *streamKey) generateID(now time.Time) string { next = fmt.Sprintf("%d-%d", last[0], last[1]+1) } - lastID := s.lastID() + lastID := s.lastIDUnlocked() if streamCmp(lastID, next) >= 0 { last, _ := parseStreamID(lastID) next = fmt.Sprintf("%d-%d", last[0], last[1]+1) @@ -70,7 +74,16 @@ func (s *streamKey) generateID(now time.Time) string { return next } +// lastID locks the mutex func (s *streamKey) lastID() string { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lastIDUnlocked() +} + +// lastID doesn't lock the mutex +func (s *streamKey) lastIDUnlocked() string { if len(s.entries) == 0 { return "0-0" } @@ -79,6 +92,9 @@ func (s *streamKey) lastID() string { } func (s *streamKey) copy() *streamKey { + s.mu.Lock() + defer s.mu.Unlock() + cpy := &streamKey{ entries: s.entries, } @@ -193,17 +209,20 @@ func reversedStreamEntries(o []StreamEntry) []StreamEntry { } func (s *streamKey) createGroup(group, id string) error { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.groups[group]; ok { return errors.New("BUSYGROUP Consumer Group name already exists") } if id == "$" { - id = s.lastID() + id = s.lastIDUnlocked() } s.groups[group] = &streamGroup{ stream: s, lastID: id, - consumers: map[string]consumer{}, + consumers: map[string]*consumer{}, } return nil } @@ -212,6 +231,9 @@ func (s *streamKey) createGroup(group, id string) error { // If id is empty or "*" the ID will be generated automatically. // `values` should have an even length. func (s *streamKey) add(entryID string, values []string, now time.Time) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if entryID == "" || entryID == "*" { entryID = s.generateID(now) } @@ -223,7 +245,7 @@ func (s *streamKey) add(entryID string, values []string, now time.Time) (string, if entryID == "0-0" { return "", errors.New(msgStreamIDZero) } - if streamCmp(s.lastID(), entryID) != -1 { + if streamCmp(s.lastIDUnlocked(), entryID) != -1 { return "", errors.New(msgStreamIDTooSmall) } @@ -235,6 +257,9 @@ func (s *streamKey) add(entryID string, values []string, now time.Time) (string, } func (s *streamKey) trim(n int) { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.entries) > n { s.entries = s.entries[len(s.entries)-n:] } @@ -242,6 +267,9 @@ func (s *streamKey) trim(n int) { // all entries after "id" func (s *streamKey) after(id string) []StreamEntry { + s.mu.Lock() + defer s.mu.Unlock() + pos := sort.Search(len(s.entries), func(i int) bool { return streamCmp(id, s.entries[i].ID) < 0 }) @@ -251,6 +279,9 @@ func (s *streamKey) after(id string) []StreamEntry { // get a stream entry by ID // Also returns the position in the entries slice, if found. func (s *streamKey) get(id string) (int, *StreamEntry) { + s.mu.Lock() + defer s.mu.Unlock() + pos := sort.Search(len(s.entries), func(i int) bool { return streamCmp(id, s.entries[i].ID) <= 0 }) @@ -279,16 +310,39 @@ func (g *streamGroup) readGroup( } if !noack { + shouldAppend := len(g.pending) == 0 for _, msg := range msgs { - g.pending = append(g.pending, pendingEntry{ + if !shouldAppend { + shouldAppend = streamCmp(msg.ID, g.pending[len(g.pending)-1].id) == 1 + } + + var entry *pendingEntry + if shouldAppend { + g.pending = append(g.pending, pendingEntry{}) + entry = &g.pending[len(g.pending)-1] + } else { + var pos int + pos, entry = g.searchPending(msg.ID) + if entry == nil { + g.pending = append(g.pending[:pos+1], g.pending[pos:]...) + entry = &g.pending[pos] + } else { + g.consumers[entry.consumer].numPendingEntries-- + } + } + + *entry = pendingEntry{ id: msg.ID, consumer: consumerID, deliveryCount: 1, lastDelivery: now, - }) + } } } - g.consumers[consumerID] = consumer{} + if _, ok := g.consumers[consumerID]; !ok { + g.consumers[consumerID] = &consumer{} + } + g.consumers[consumerID].numPendingEntries += len(msgs) g.lastID = msgs[len(msgs)-1].ID return msgs } @@ -314,6 +368,16 @@ func (g *streamGroup) readGroup( return res } +func (g *streamGroup) searchPending(id string) (int, *pendingEntry) { + pos := sort.Search(len(g.pending), func(i int) bool { + return streamCmp(id, g.pending[i].id) <= 0 + }) + if pos >= len(g.pending) || g.pending[pos].id != id { + return pos, nil + } + return pos, &g.pending[pos] +} + func (g *streamGroup) ack(ids []string) (int, error) { count := 0 for _, id := range ids { @@ -321,13 +385,14 @@ func (g *streamGroup) ack(ids []string) (int, error) { return 0, errors.New(msgInvalidStreamID) } - pos := sort.Search(len(g.pending), func(i int) bool { - return streamCmp(id, g.pending[i].id) <= 0 - }) - if len(g.pending) <= pos || g.pending[pos].id != id { + pos, entry := g.searchPending(id) + if entry == nil { continue } + consumer := g.consumers[entry.consumer] + consumer.numPendingEntries-- + g.pending = append(g.pending[:pos], g.pending[pos+1:]...) count++ } @@ -370,9 +435,10 @@ func (g *streamGroup) pendingCount(consumer string) int { } func (g *streamGroup) copy() *streamGroup { - cns := map[string]consumer{} + cns := map[string]*consumer{} for k, v := range g.consumers { - cns[k] = v + c := *v + cns[k] = &c } return &streamGroup{ // don't copy stream diff --git a/vendor/github.com/yuin/gopher-lua/README.rst b/vendor/github.com/yuin/gopher-lua/README.rst index b479e46357e..2b6de3259ac 100644 --- a/vendor/github.com/yuin/gopher-lua/README.rst +++ b/vendor/github.com/yuin/gopher-lua/README.rst @@ -870,6 +870,7 @@ Libraries for GopherLua - `gluaperiphery `_ : A periphery library for the GopherLua VM (GPIO, SPI, I2C, MMIO, and Serial peripheral I/O for Linux). - `glua-async `_ : An async/await implement for gopher-lua. - `gopherlua-debugger `_ : A debugger for gopher-lua +- `gluamahonia `_ : An encoding converter for gopher-lua ---------------------------------------------------------------- Donation ---------------------------------------------------------------- diff --git a/vendor/github.com/yuin/gopher-lua/_vm.go b/vendor/github.com/yuin/gopher-lua/_vm.go index 874ed9aa4a7..049107e1774 100644 --- a/vendor/github.com/yuin/gopher-lua/_vm.go +++ b/vendor/github.com/yuin/gopher-lua/_vm.go @@ -415,7 +415,7 @@ func init() { if ret.Type() == LTNumber { reg.SetNumber(RA, ret.(LNumber)) } else { - reg.SetNumber(RA, LNumber(0)) + reg.Set(RA, ret) } } else if lv.Type() == LTTable { reg.SetNumber(RA, LNumber(lv.(*LTable).Len())) diff --git a/vendor/github.com/yuin/gopher-lua/ast/expr.go b/vendor/github.com/yuin/gopher-lua/ast/expr.go index ccda3279101..388852bab36 100644 --- a/vendor/github.com/yuin/gopher-lua/ast/expr.go +++ b/vendor/github.com/yuin/gopher-lua/ast/expr.go @@ -52,6 +52,7 @@ type StringExpr struct { type Comma3Expr struct { ExprBase + AdjustRet bool } type IdentExpr struct { diff --git a/vendor/github.com/yuin/gopher-lua/baselib.go b/vendor/github.com/yuin/gopher-lua/baselib.go index 06c90619eef..aa2c08a9419 100644 --- a/vendor/github.com/yuin/gopher-lua/baselib.go +++ b/vendor/github.com/yuin/gopher-lua/baselib.go @@ -260,7 +260,7 @@ func basePairs(L *LState) int { func basePCall(L *LState) int { L.CheckAny(1) v := L.Get(1) - if v.Type() != LTFunction { + if v.Type() != LTFunction && L.GetMetaField(v, "__call").Type() != LTFunction { L.Push(LFalse) L.Push(LString("attempt to call a " + v.Type().String() + " value")) return 2 @@ -321,11 +321,16 @@ func baseSelect(L *LState) int { switch lv := L.Get(1).(type) { case LNumber: idx := int(lv) - num := L.reg.Top() - L.indexToReg(int(lv)) - 1 + num := L.GetTop() if idx < 0 { - num++ + idx = num + idx + } else if idx > num { + idx = num } - return num + if 1 > idx { + L.ArgError(1, "index out of range") + } + return num - idx case LString: if string(lv) != "#" { L.ArgError(1, "invalid string '"+string(lv)+"'") diff --git a/vendor/github.com/yuin/gopher-lua/compile.go b/vendor/github.com/yuin/gopher-lua/compile.go index d3c665ae57c..75c75550e42 100644 --- a/vendor/github.com/yuin/gopher-lua/compile.go +++ b/vendor/github.com/yuin/gopher-lua/compile.go @@ -114,7 +114,7 @@ func isVarArgReturnExpr(expr ast.Expr) bool { case *ast.FuncCallExpr: return !ex.AdjustRet case *ast.Comma3Expr: - return true + return !ex.AdjustRet } return false } @@ -723,8 +723,12 @@ func compileReturnStmt(context *funcContext, stmt *ast.ReturnStmt) { // {{{ return } case *ast.FuncCallExpr: - reg += compileExpr(context, reg, ex, ecnone(-2)) - code.SetOpCode(code.LastPC(), OP_TAILCALL) + if ex.AdjustRet { // return (func()) + reg += compileExpr(context, reg, ex, ecnone(0)) + } else { + reg += compileExpr(context, reg, ex, ecnone(-2)) + code.SetOpCode(code.LastPC(), OP_TAILCALL) + } code.AddABC(OP_RETURN, a, 0, 0, sline(stmt)) return } diff --git a/vendor/github.com/yuin/gopher-lua/config.go b/vendor/github.com/yuin/gopher-lua/config.go index f58b59393a5..d632188953b 100644 --- a/vendor/github.com/yuin/gopher-lua/config.go +++ b/vendor/github.com/yuin/gopher-lua/config.go @@ -22,15 +22,22 @@ var LuaPath = "LUA_PATH" var LuaLDir string var LuaPathDefault string var LuaOS string +var LuaDirSep string +var LuaPathSep = ";" +var LuaPathMark = "?" +var LuaExecDir = "!" +var LuaIgMark = "-" func init() { if os.PathSeparator == '/' { // unix-like LuaOS = "unix" LuaLDir = "/usr/local/share/lua/5.1" + LuaDirSep = "/" LuaPathDefault = "./?.lua;" + LuaLDir + "/?.lua;" + LuaLDir + "/?/init.lua" } else { // windows LuaOS = "windows" LuaLDir = "!\\lua" + LuaDirSep = "\\" LuaPathDefault = ".\\?.lua;" + LuaLDir + "\\?.lua;" + LuaLDir + "\\?\\init.lua" } } diff --git a/vendor/github.com/yuin/gopher-lua/loadlib.go b/vendor/github.com/yuin/gopher-lua/loadlib.go index 772bb04ad88..40ce122b8fe 100644 --- a/vendor/github.com/yuin/gopher-lua/loadlib.go +++ b/vendor/github.com/yuin/gopher-lua/loadlib.go @@ -65,6 +65,9 @@ func OpenPackage(L *LState) int { L.SetField(packagemod, "path", LString(loGetPath(LuaPath, LuaPathDefault))) L.SetField(packagemod, "cpath", emptyLString) + L.SetField(packagemod, "config", LString(LuaDirSep+"\n"+LuaPathSep+ + "\n"+LuaPathMark+"\n"+LuaExecDir+"\n"+LuaIgMark+"\n")) + L.Push(packagemod) return 1 } diff --git a/vendor/github.com/yuin/gopher-lua/oslib.go b/vendor/github.com/yuin/gopher-lua/oslib.go index c70a99bf136..256c8811300 100644 --- a/vendor/github.com/yuin/gopher-lua/oslib.go +++ b/vendor/github.com/yuin/gopher-lua/oslib.go @@ -25,6 +25,9 @@ func getIntField(L *LState, tb *LTable, key string, v int) int { if strings.HasPrefix(slv, "0") && !strings.HasPrefix(slv, "0x") && !strings.HasPrefix(slv, "0X") { //Standard lua interpreter only support decimal and hexadecimal slv = strings.TrimLeft(slv, "0") + if slv == "" { + return 0 + } } if num, err := parseNumber(slv); err == nil { return int(num) @@ -189,20 +192,28 @@ func osTime(L *LState) int { if L.GetTop() == 0 { L.Push(LNumber(time.Now().Unix())) } else { - tbl := L.CheckTable(1) - sec := getIntField(L, tbl, "sec", 0) - min := getIntField(L, tbl, "min", 0) - hour := getIntField(L, tbl, "hour", 12) - day := getIntField(L, tbl, "day", -1) - month := getIntField(L, tbl, "month", -1) - year := getIntField(L, tbl, "year", -1) - isdst := getBoolField(L, tbl, "isdst", false) - t := time.Date(year, time.Month(month), day, hour, min, sec, 0, time.Local) - // TODO dst - if false { - print(isdst) + lv := L.CheckAny(1) + if lv == LNil { + L.Push(LNumber(time.Now().Unix())) + } else { + tbl, ok := lv.(*LTable) + if !ok { + L.TypeError(1, LTTable) + } + sec := getIntField(L, tbl, "sec", 0) + min := getIntField(L, tbl, "min", 0) + hour := getIntField(L, tbl, "hour", 12) + day := getIntField(L, tbl, "day", -1) + month := getIntField(L, tbl, "month", -1) + year := getIntField(L, tbl, "year", -1) + isdst := getBoolField(L, tbl, "isdst", false) + t := time.Date(year, time.Month(month), day, hour, min, sec, 0, time.Local) + // TODO dst + if false { + print(isdst) + } + L.Push(LNumber(t.Unix())) } - L.Push(LNumber(t.Unix())) } return 1 } diff --git a/vendor/github.com/yuin/gopher-lua/parse/Makefile b/vendor/github.com/yuin/gopher-lua/parse/Makefile index 6dd048c165f..9838b1393b9 100644 --- a/vendor/github.com/yuin/gopher-lua/parse/Makefile +++ b/vendor/github.com/yuin/gopher-lua/parse/Makefile @@ -2,3 +2,6 @@ all : parser.go parser.go : parser.go.y goyacc -o $@ parser.go.y; [ -f y.output ] && ( rm -f y.output ) + +clean: + rm -f parser.go diff --git a/vendor/github.com/yuin/gopher-lua/parse/lexer.go b/vendor/github.com/yuin/gopher-lua/parse/lexer.go index d711e78bc19..6ad57ceed7d 100644 --- a/vendor/github.com/yuin/gopher-lua/parse/lexer.go +++ b/vendor/github.com/yuin/gopher-lua/parse/lexer.go @@ -255,7 +255,7 @@ func (sc *Scanner) scanMultilineString(ch int, buf *bytes.Buffer) error { var count1, count2 int count1, ch = sc.countSep(ch) if ch != '[' { - return sc.Error(string(ch), "invalid multiline string") + return sc.Error(string(rune(ch)), "invalid multiline string") } ch = sc.Next() if ch == '\n' || ch == '\r' { @@ -338,7 +338,7 @@ redo: goto redo } else { tok.Type = ch - tok.Str = string(ch) + tok.Str = string(rune(ch)) } case '"', '\'': tok.Type = TString @@ -351,7 +351,7 @@ redo: tok.Str = buf.String() } else { tok.Type = ch - tok.Str = string(ch) + tok.Str = string(rune(ch)) } case '=': if sc.Peek() == '=' { @@ -360,7 +360,7 @@ redo: sc.Next() } else { tok.Type = ch - tok.Str = string(ch) + tok.Str = string(rune(ch)) } case '~': if sc.Peek() == '=' { @@ -377,7 +377,7 @@ redo: sc.Next() } else { tok.Type = ch - tok.Str = string(ch) + tok.Str = string(rune(ch)) } case '>': if sc.Peek() == '=' { @@ -386,7 +386,7 @@ redo: sc.Next() } else { tok.Type = ch - tok.Str = string(ch) + tok.Str = string(rune(ch)) } case '.': ch2 := sc.Peek() @@ -410,7 +410,7 @@ redo: tok.Str = buf.String() case '+', '*', '/', '%', '^', '#', '(', ')', '{', '}', ']', ';', ':', ',': tok.Type = ch - tok.Str = string(ch) + tok.Str = string(rune(ch)) default: writeChar(buf, ch) err = sc.Error(buf.String(), "Invalid token") diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go b/vendor/github.com/yuin/gopher-lua/parse/parser.go index f8f59b36154..c7565816490 100644 --- a/vendor/github.com/yuin/gopher-lua/parse/parser.go +++ b/vendor/github.com/yuin/gopher-lua/parse/parser.go @@ -1,9 +1,12 @@ +// Code generated by goyacc -o parser.go parser.go.y. DO NOT EDIT. + //line parser.go.y:2 package parse import __yyfmt__ "fmt" //line parser.go.y:2 + import ( "github.com/yuin/gopher-lua/ast" ) @@ -62,7 +65,10 @@ const TNumber = 57374 const TString = 57375 const UNARY = 57376 -var yyToknames = []string{ +var yyToknames = [...]string{ + "$end", + "error", + "$unk", "TAnd", "TBreak", "TDo", @@ -93,25 +99,37 @@ var yyToknames = []string{ "TIdent", "TNumber", "TString", - " {", - " (", - " >", - " <", - " +", - " -", - " *", - " /", - " %", + "'{'", + "'('", + "'>'", + "'<'", + "'+'", + "'-'", + "'*'", + "'/'", + "'%'", "UNARY", - " ^", + "'^'", + "';'", + "'='", + "','", + "':'", + "'.'", + "'['", + "']'", + "'#'", + "')'", + "'}'", } -var yyStatenames = []string{} + +var yyStatenames = [...]string{} const yyEofCode = 1 const yyErrCode = 2 -const yyMaxDepth = 200 +const yyInitialStackSize = 16 + +//line parser.go.y:517 -//line parser.go.y:514 func TokenName(c int) string { if c >= TAnd && c-TAnd < len(yyToknames) { if yyToknames[c-TAnd] != "" { @@ -122,7 +140,7 @@ func TokenName(c int) string { } //line yacctab:1 -var yyExca = []int{ +var yyExca = [...]int8{ -1, 1, 1, -1, -2, 0, @@ -136,16 +154,11 @@ var yyExca = []int{ -2, 68, } -const yyNprod = 95 const yyPrivate = 57344 -var yyTokenNames []string -var yyStates []string - const yyLast = 579 -var yyAct = []int{ - +var yyAct = [...]uint8{ 24, 88, 50, 23, 45, 84, 56, 65, 137, 153, 136, 113, 52, 142, 54, 53, 33, 134, 65, 132, 62, 63, 32, 61, 108, 109, 48, 111, 106, 41, @@ -205,8 +218,8 @@ var yyAct = []int{ 0, 0, 0, 0, 21, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, } -var yyPact = []int{ +var yyPact = [...]int16{ -1000, -1000, 533, -5, -1000, -1000, 292, -1000, -17, 152, -1000, 292, -1000, 292, 107, 97, 88, -1000, -1000, -1000, 292, -1000, -1000, -29, 473, -1000, -1000, -1000, -1000, -1000, @@ -227,14 +240,14 @@ var yyPact = []int{ 311, 151, -1000, 473, 146, 392, -1000, 292, -1000, -1000, -1000, 144, 365, -1000, -1000, -1000, 140, -1000, } -var yyPgo = []int{ +var yyPgo = [...]uint8{ 0, 190, 227, 2, 226, 223, 215, 210, 204, 203, 118, 6, 3, 0, 22, 107, 168, 199, 4, 197, 5, 195, 16, 193, 1, 182, } -var yyR1 = []int{ +var yyR1 = [...]int8{ 0, 1, 1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, @@ -246,8 +259,8 @@ var yyR1 = []int{ 20, 20, 21, 21, 21, 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, } -var yyR2 = []int{ +var yyR2 = [...]int8{ 0, 1, 2, 3, 0, 2, 2, 1, 3, 1, 3, 5, 4, 6, 8, 9, 11, 7, 3, 4, 4, 2, 0, 5, 1, 2, 1, 1, 3, 1, @@ -259,8 +272,8 @@ var yyR2 = []int{ 5, 4, 1, 1, 3, 2, 3, 1, 3, 2, 3, 5, 1, 1, 1, } -var yyChk = []int{ +var yyChk = [...]int16{ -1000, -1, -2, -6, -4, 45, 19, 5, -9, -15, 6, 24, 20, 13, 11, 12, 15, -10, -17, -16, 35, 31, 45, -12, -13, 16, 10, 22, 32, 30, @@ -281,8 +294,8 @@ var yyChk = []int{ -13, -3, 9, -13, -3, -13, 6, 47, 9, 9, 21, -3, -13, -3, 9, 6, -3, 9, } -var yyDef = []int{ +var yyDef = [...]int8{ 4, -2, 1, 2, 5, 6, 24, 26, 0, 9, 4, 0, 4, 0, 0, 0, 0, -2, 69, 70, 0, 33, 3, 25, 38, 40, 41, 42, 43, 44, @@ -303,8 +316,8 @@ var yyDef = []int{ 0, 0, 80, 91, 0, 0, 4, 0, 17, 14, 4, 0, 0, 23, 15, 4, 0, 16, } -var yyTok1 = []int{ +var yyTok1 = [...]int8{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, @@ -319,35 +332,63 @@ var yyTok1 = []int{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 34, 3, 54, } -var yyTok2 = []int{ +var yyTok2 = [...]int8{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 43, } -var yyTok3 = []int{ + +var yyTok3 = [...]int8{ 0, } +var yyErrorMessages = [...]struct { + state int + token int + msg string +}{} + //line yaccpar:1 /* parser for yacc output */ -var yyDebug = 0 +var ( + yyDebug = 0 + yyErrorVerbose = false +) type yyLexer interface { Lex(lval *yySymType) int Error(s string) } +type yyParser interface { + Parse(yyLexer) int + Lookahead() int +} + +type yyParserImpl struct { + lval yySymType + stack [yyInitialStackSize]yySymType + char int +} + +func (p *yyParserImpl) Lookahead() int { + return p.char +} + +func yyNewParser() yyParser { + return &yyParserImpl{} +} + const yyFlag = -1000 func yyTokname(c int) string { - // 4 is TOKSTART above - if c >= 4 && c-4 < len(yyToknames) { - if yyToknames[c-4] != "" { - return yyToknames[c-4] + if c >= 1 && c-1 < len(yyToknames) { + if yyToknames[c-1] != "" { + return yyToknames[c-1] } } return __yyfmt__.Sprintf("tok-%v", c) @@ -362,51 +403,127 @@ func yyStatname(s int) string { return __yyfmt__.Sprintf("state-%v", s) } -func yylex1(lex yyLexer, lval *yySymType) int { - c := 0 - char := lex.Lex(lval) +func yyErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !yyErrorVerbose { + return "syntax error" + } + + for _, e := range yyErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + yyTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := int(yyPact[state]) + for tok := TOKSTART; tok-1 < len(yyToknames); tok++ { + if n := base + tok; n >= 0 && n < yyLast && int(yyChk[int(yyAct[n])]) == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if yyDef[state] == -2 { + i := 0 + for yyExca[i] != -1 || int(yyExca[i+1]) != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; yyExca[i] >= 0; i += 2 { + tok := int(yyExca[i]) + if tok < TOKSTART || yyExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if yyExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += yyTokname(tok) + } + return res +} + +func yylex1(lex yyLexer, lval *yySymType) (char, token int) { + token = 0 + char = lex.Lex(lval) if char <= 0 { - c = yyTok1[0] + token = int(yyTok1[0]) goto out } if char < len(yyTok1) { - c = yyTok1[char] + token = int(yyTok1[char]) goto out } if char >= yyPrivate { if char < yyPrivate+len(yyTok2) { - c = yyTok2[char-yyPrivate] + token = int(yyTok2[char-yyPrivate]) goto out } } for i := 0; i < len(yyTok3); i += 2 { - c = yyTok3[i+0] - if c == char { - c = yyTok3[i+1] + token = int(yyTok3[i+0]) + if token == char { + token = int(yyTok3[i+1]) goto out } } out: - if c == 0 { - c = yyTok2[1] /* unknown char */ + if token == 0 { + token = int(yyTok2[1]) /* unknown char */ } if yyDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", yyTokname(c), uint(char)) + __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char)) } - return c + return char, token } func yyParse(yylex yyLexer) int { + return yyNewParser().Parse(yylex) +} + +func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int { var yyn int - var yylval yySymType var yyVAL yySymType - yyS := make([]yySymType, yyMaxDepth) + var yyDollar []yySymType + _ = yyDollar // silence set and not used + yyS := yyrcvr.stack[:] Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ yystate := 0 - yychar := -1 + yyrcvr.char = -1 + yytoken := -1 // yyrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + yystate = -1 + yyrcvr.char = -1 + yytoken = -1 + }() yyp := -1 goto yystack @@ -419,7 +536,7 @@ ret1: yystack: /* put a state and value onto the stack */ if yyDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate)) + __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate)) } yyp++ @@ -432,21 +549,22 @@ yystack: yyS[yyp].yys = yystate yynewstate: - yyn = yyPact[yystate] + yyn = int(yyPact[yystate]) if yyn <= yyFlag { goto yydefault /* simple state */ } - if yychar < 0 { - yychar = yylex1(yylex, &yylval) + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) } - yyn += yychar + yyn += yytoken if yyn < 0 || yyn >= yyLast { goto yydefault } - yyn = yyAct[yyn] - if yyChk[yyn] == yychar { /* valid shift */ - yychar = -1 - yyVAL = yylval + yyn = int(yyAct[yyn]) + if int(yyChk[yyn]) == yytoken { /* valid shift */ + yyrcvr.char = -1 + yytoken = -1 + yyVAL = yyrcvr.lval yystate = yyn if Errflag > 0 { Errflag-- @@ -456,27 +574,27 @@ yynewstate: yydefault: /* default state action */ - yyn = yyDef[yystate] + yyn = int(yyDef[yystate]) if yyn == -2 { - if yychar < 0 { - yychar = yylex1(yylex, &yylval) + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) } /* look through exception table */ xi := 0 for { - if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate { + if yyExca[xi+0] == -1 && int(yyExca[xi+1]) == yystate { break } xi += 2 } for xi += 2; ; xi += 2 { - yyn = yyExca[xi+0] - if yyn < 0 || yyn == yychar { + yyn = int(yyExca[xi+0]) + if yyn < 0 || yyn == yytoken { break } } - yyn = yyExca[xi+1] + yyn = int(yyExca[xi+1]) if yyn < 0 { goto ret0 } @@ -485,11 +603,11 @@ yydefault: /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ - yylex.Error("syntax error") + yylex.Error(yyErrorMessage(yystate, yytoken)) Nerrs++ if yyDebug >= 1 { __yyfmt__.Printf("%s", yyStatname(yystate)) - __yyfmt__.Printf(" saw %s\n", yyTokname(yychar)) + __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken)) } fallthrough @@ -498,10 +616,10 @@ yydefault: /* find a state where "error" is a legal shift action */ for yyp >= 0 { - yyn = yyPact[yyS[yyp].yys] + yyErrCode + yyn = int(yyPact[yyS[yyp].yys]) + yyErrCode if yyn >= 0 && yyn < yyLast { - yystate = yyAct[yyn] /* simulate a shift of "error" */ - if yyChk[yystate] == yyErrCode { + yystate = int(yyAct[yyn]) /* simulate a shift of "error" */ + if int(yyChk[yystate]) == yyErrCode { goto yystack } } @@ -517,12 +635,13 @@ yydefault: case 3: /* no shift yet; clobber input char */ if yyDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar)) + __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken)) } - if yychar == yyEofCode { + if yytoken == yyEofCode { goto ret1 } - yychar = -1 + yyrcvr.char = -1 + yytoken = -1 goto yynewstate /* try again in the same state */ } } @@ -536,599 +655,703 @@ yydefault: yypt := yyp _ = yypt // guard against "declared and not used" - yyp -= yyR2[yyn] + yyp -= int(yyR2[yyn]) + // yyp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if yyp+1 >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } yyVAL = yyS[yyp+1] /* consult goto table to find next state */ - yyn = yyR1[yyn] - yyg := yyPgo[yyn] + yyn = int(yyR1[yyn]) + yyg := int(yyPgo[yyn]) yyj := yyg + yyS[yyp].yys + 1 if yyj >= yyLast { - yystate = yyAct[yyg] + yystate = int(yyAct[yyg]) } else { - yystate = yyAct[yyj] - if yyChk[yystate] != -yyn { - yystate = yyAct[yyg] + yystate = int(yyAct[yyj]) + if int(yyChk[yystate]) != -yyn { + yystate = int(yyAct[yyg]) } } // dummy call; replaced with literal code switch yynt { case 1: - //line parser.go.y:73 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:73 { - yyVAL.stmts = yyS[yypt-0].stmts + yyVAL.stmts = yyDollar[1].stmts if l, ok := yylex.(*Lexer); ok { l.Stmts = yyVAL.stmts } } case 2: - //line parser.go.y:79 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:79 { - yyVAL.stmts = append(yyS[yypt-1].stmts, yyS[yypt-0].stmt) + yyVAL.stmts = append(yyDollar[1].stmts, yyDollar[2].stmt) if l, ok := yylex.(*Lexer); ok { l.Stmts = yyVAL.stmts } } case 3: - //line parser.go.y:85 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:85 { - yyVAL.stmts = append(yyS[yypt-2].stmts, yyS[yypt-1].stmt) + yyVAL.stmts = append(yyDollar[1].stmts, yyDollar[2].stmt) if l, ok := yylex.(*Lexer); ok { l.Stmts = yyVAL.stmts } } case 4: - //line parser.go.y:93 + yyDollar = yyS[yypt-0 : yypt+1] +//line parser.go.y:93 { yyVAL.stmts = []ast.Stmt{} } case 5: - //line parser.go.y:96 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:96 { - yyVAL.stmts = append(yyS[yypt-1].stmts, yyS[yypt-0].stmt) + yyVAL.stmts = append(yyDollar[1].stmts, yyDollar[2].stmt) } case 6: - //line parser.go.y:99 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:99 { - yyVAL.stmts = yyS[yypt-1].stmts + yyVAL.stmts = yyDollar[1].stmts } case 7: - //line parser.go.y:104 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:104 { - yyVAL.stmts = yyS[yypt-0].stmts + yyVAL.stmts = yyDollar[1].stmts } case 8: - //line parser.go.y:109 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:109 { - yyVAL.stmt = &ast.AssignStmt{Lhs: yyS[yypt-2].exprlist, Rhs: yyS[yypt-0].exprlist} - yyVAL.stmt.SetLine(yyS[yypt-2].exprlist[0].Line()) + yyVAL.stmt = &ast.AssignStmt{Lhs: yyDollar[1].exprlist, Rhs: yyDollar[3].exprlist} + yyVAL.stmt.SetLine(yyDollar[1].exprlist[0].Line()) } case 9: - //line parser.go.y:114 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:114 { - if _, ok := yyS[yypt-0].expr.(*ast.FuncCallExpr); !ok { + if _, ok := yyDollar[1].expr.(*ast.FuncCallExpr); !ok { yylex.(*Lexer).Error("parse error") } else { - yyVAL.stmt = &ast.FuncCallStmt{Expr: yyS[yypt-0].expr} - yyVAL.stmt.SetLine(yyS[yypt-0].expr.Line()) + yyVAL.stmt = &ast.FuncCallStmt{Expr: yyDollar[1].expr} + yyVAL.stmt.SetLine(yyDollar[1].expr.Line()) } } case 10: - //line parser.go.y:122 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:122 { - yyVAL.stmt = &ast.DoBlockStmt{Stmts: yyS[yypt-1].stmts} - yyVAL.stmt.SetLine(yyS[yypt-2].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt = &ast.DoBlockStmt{Stmts: yyDollar[2].stmts} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[3].token.Pos.Line) } case 11: - //line parser.go.y:127 + yyDollar = yyS[yypt-5 : yypt+1] +//line parser.go.y:127 { - yyVAL.stmt = &ast.WhileStmt{Condition: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts} - yyVAL.stmt.SetLine(yyS[yypt-4].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt = &ast.WhileStmt{Condition: yyDollar[2].expr, Stmts: yyDollar[4].stmts} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[5].token.Pos.Line) } case 12: - //line parser.go.y:132 + yyDollar = yyS[yypt-4 : yypt+1] +//line parser.go.y:132 { - yyVAL.stmt = &ast.RepeatStmt{Condition: yyS[yypt-0].expr, Stmts: yyS[yypt-2].stmts} - yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].expr.Line()) + yyVAL.stmt = &ast.RepeatStmt{Condition: yyDollar[4].expr, Stmts: yyDollar[2].stmts} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[4].expr.Line()) } case 13: - //line parser.go.y:137 + yyDollar = yyS[yypt-6 : yypt+1] +//line parser.go.y:137 { - yyVAL.stmt = &ast.IfStmt{Condition: yyS[yypt-4].expr, Then: yyS[yypt-2].stmts} + yyVAL.stmt = &ast.IfStmt{Condition: yyDollar[2].expr, Then: yyDollar[4].stmts} cur := yyVAL.stmt - for _, elseif := range yyS[yypt-1].stmts { + for _, elseif := range yyDollar[5].stmts { cur.(*ast.IfStmt).Else = []ast.Stmt{elseif} cur = elseif } - yyVAL.stmt.SetLine(yyS[yypt-5].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[6].token.Pos.Line) } case 14: - //line parser.go.y:147 + yyDollar = yyS[yypt-8 : yypt+1] +//line parser.go.y:147 { - yyVAL.stmt = &ast.IfStmt{Condition: yyS[yypt-6].expr, Then: yyS[yypt-4].stmts} + yyVAL.stmt = &ast.IfStmt{Condition: yyDollar[2].expr, Then: yyDollar[4].stmts} cur := yyVAL.stmt - for _, elseif := range yyS[yypt-3].stmts { + for _, elseif := range yyDollar[5].stmts { cur.(*ast.IfStmt).Else = []ast.Stmt{elseif} cur = elseif } - cur.(*ast.IfStmt).Else = yyS[yypt-1].stmts - yyVAL.stmt.SetLine(yyS[yypt-7].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + cur.(*ast.IfStmt).Else = yyDollar[7].stmts + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[8].token.Pos.Line) } case 15: - //line parser.go.y:158 + yyDollar = yyS[yypt-9 : yypt+1] +//line parser.go.y:158 { - yyVAL.stmt = &ast.NumberForStmt{Name: yyS[yypt-7].token.Str, Init: yyS[yypt-5].expr, Limit: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts} - yyVAL.stmt.SetLine(yyS[yypt-8].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt = &ast.NumberForStmt{Name: yyDollar[2].token.Str, Init: yyDollar[4].expr, Limit: yyDollar[6].expr, Stmts: yyDollar[8].stmts} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[9].token.Pos.Line) } case 16: - //line parser.go.y:163 + yyDollar = yyS[yypt-11 : yypt+1] +//line parser.go.y:163 { - yyVAL.stmt = &ast.NumberForStmt{Name: yyS[yypt-9].token.Str, Init: yyS[yypt-7].expr, Limit: yyS[yypt-5].expr, Step: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts} - yyVAL.stmt.SetLine(yyS[yypt-10].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt = &ast.NumberForStmt{Name: yyDollar[2].token.Str, Init: yyDollar[4].expr, Limit: yyDollar[6].expr, Step: yyDollar[8].expr, Stmts: yyDollar[10].stmts} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[11].token.Pos.Line) } case 17: - //line parser.go.y:168 + yyDollar = yyS[yypt-7 : yypt+1] +//line parser.go.y:168 { - yyVAL.stmt = &ast.GenericForStmt{Names: yyS[yypt-5].namelist, Exprs: yyS[yypt-3].exprlist, Stmts: yyS[yypt-1].stmts} - yyVAL.stmt.SetLine(yyS[yypt-6].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt = &ast.GenericForStmt{Names: yyDollar[2].namelist, Exprs: yyDollar[4].exprlist, Stmts: yyDollar[6].stmts} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[7].token.Pos.Line) } case 18: - //line parser.go.y:173 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:173 { - yyVAL.stmt = &ast.FuncDefStmt{Name: yyS[yypt-1].funcname, Func: yyS[yypt-0].funcexpr} - yyVAL.stmt.SetLine(yyS[yypt-2].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].funcexpr.LastLine()) + yyVAL.stmt = &ast.FuncDefStmt{Name: yyDollar[2].funcname, Func: yyDollar[3].funcexpr} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[3].funcexpr.LastLine()) } case 19: - //line parser.go.y:178 + yyDollar = yyS[yypt-4 : yypt+1] +//line parser.go.y:178 { - yyVAL.stmt = &ast.LocalAssignStmt{Names: []string{yyS[yypt-1].token.Str}, Exprs: []ast.Expr{yyS[yypt-0].funcexpr}} - yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line) - yyVAL.stmt.SetLastLine(yyS[yypt-0].funcexpr.LastLine()) + yyVAL.stmt = &ast.LocalAssignStmt{Names: []string{yyDollar[3].token.Str}, Exprs: []ast.Expr{yyDollar[4].funcexpr}} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyDollar[4].funcexpr.LastLine()) } case 20: - //line parser.go.y:183 + yyDollar = yyS[yypt-4 : yypt+1] +//line parser.go.y:183 { - yyVAL.stmt = &ast.LocalAssignStmt{Names: yyS[yypt-2].namelist, Exprs: yyS[yypt-0].exprlist} - yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line) + yyVAL.stmt = &ast.LocalAssignStmt{Names: yyDollar[2].namelist, Exprs: yyDollar[4].exprlist} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } case 21: - //line parser.go.y:187 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:187 { - yyVAL.stmt = &ast.LocalAssignStmt{Names: yyS[yypt-0].namelist, Exprs: []ast.Expr{}} - yyVAL.stmt.SetLine(yyS[yypt-1].token.Pos.Line) + yyVAL.stmt = &ast.LocalAssignStmt{Names: yyDollar[2].namelist, Exprs: []ast.Expr{}} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } case 22: - //line parser.go.y:193 + yyDollar = yyS[yypt-0 : yypt+1] +//line parser.go.y:193 { yyVAL.stmts = []ast.Stmt{} } case 23: - //line parser.go.y:196 + yyDollar = yyS[yypt-5 : yypt+1] +//line parser.go.y:196 { - yyVAL.stmts = append(yyS[yypt-4].stmts, &ast.IfStmt{Condition: yyS[yypt-2].expr, Then: yyS[yypt-0].stmts}) - yyVAL.stmts[len(yyVAL.stmts)-1].SetLine(yyS[yypt-3].token.Pos.Line) + yyVAL.stmts = append(yyDollar[1].stmts, &ast.IfStmt{Condition: yyDollar[3].expr, Then: yyDollar[5].stmts}) + yyVAL.stmts[len(yyVAL.stmts)-1].SetLine(yyDollar[2].token.Pos.Line) } case 24: - //line parser.go.y:202 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:202 { yyVAL.stmt = &ast.ReturnStmt{Exprs: nil} - yyVAL.stmt.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } case 25: - //line parser.go.y:206 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:206 { - yyVAL.stmt = &ast.ReturnStmt{Exprs: yyS[yypt-0].exprlist} - yyVAL.stmt.SetLine(yyS[yypt-1].token.Pos.Line) + yyVAL.stmt = &ast.ReturnStmt{Exprs: yyDollar[2].exprlist} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } case 26: - //line parser.go.y:210 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:210 { yyVAL.stmt = &ast.BreakStmt{} - yyVAL.stmt.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } case 27: - //line parser.go.y:216 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:216 { - yyVAL.funcname = yyS[yypt-0].funcname + yyVAL.funcname = yyDollar[1].funcname } case 28: - //line parser.go.y:219 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:219 { - yyVAL.funcname = &ast.FuncName{Func: nil, Receiver: yyS[yypt-2].funcname.Func, Method: yyS[yypt-0].token.Str} + yyVAL.funcname = &ast.FuncName{Func: nil, Receiver: yyDollar[1].funcname.Func, Method: yyDollar[3].token.Str} } case 29: - //line parser.go.y:224 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:224 { - yyVAL.funcname = &ast.FuncName{Func: &ast.IdentExpr{Value: yyS[yypt-0].token.Str}} - yyVAL.funcname.Func.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.funcname = &ast.FuncName{Func: &ast.IdentExpr{Value: yyDollar[1].token.Str}} + yyVAL.funcname.Func.SetLine(yyDollar[1].token.Pos.Line) } case 30: - //line parser.go.y:228 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:228 { - key := &ast.StringExpr{Value: yyS[yypt-0].token.Str} - key.SetLine(yyS[yypt-0].token.Pos.Line) - fn := &ast.AttrGetExpr{Object: yyS[yypt-2].funcname.Func, Key: key} - fn.SetLine(yyS[yypt-0].token.Pos.Line) + key := &ast.StringExpr{Value: yyDollar[3].token.Str} + key.SetLine(yyDollar[3].token.Pos.Line) + fn := &ast.AttrGetExpr{Object: yyDollar[1].funcname.Func, Key: key} + fn.SetLine(yyDollar[3].token.Pos.Line) yyVAL.funcname = &ast.FuncName{Func: fn} } case 31: - //line parser.go.y:237 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:237 { - yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } case 32: - //line parser.go.y:240 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:240 { - yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr) + yyVAL.exprlist = append(yyDollar[1].exprlist, yyDollar[3].expr) } case 33: - //line parser.go.y:245 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:245 { - yyVAL.expr = &ast.IdentExpr{Value: yyS[yypt-0].token.Str} - yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr = &ast.IdentExpr{Value: yyDollar[1].token.Str} + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 34: - //line parser.go.y:249 + yyDollar = yyS[yypt-4 : yypt+1] +//line parser.go.y:249 { - yyVAL.expr = &ast.AttrGetExpr{Object: yyS[yypt-3].expr, Key: yyS[yypt-1].expr} - yyVAL.expr.SetLine(yyS[yypt-3].expr.Line()) + yyVAL.expr = &ast.AttrGetExpr{Object: yyDollar[1].expr, Key: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 35: - //line parser.go.y:253 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:253 { - key := &ast.StringExpr{Value: yyS[yypt-0].token.Str} - key.SetLine(yyS[yypt-0].token.Pos.Line) - yyVAL.expr = &ast.AttrGetExpr{Object: yyS[yypt-2].expr, Key: key} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + key := &ast.StringExpr{Value: yyDollar[3].token.Str} + key.SetLine(yyDollar[3].token.Pos.Line) + yyVAL.expr = &ast.AttrGetExpr{Object: yyDollar[1].expr, Key: key} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 36: - //line parser.go.y:261 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:261 { - yyVAL.namelist = []string{yyS[yypt-0].token.Str} + yyVAL.namelist = []string{yyDollar[1].token.Str} } case 37: - //line parser.go.y:264 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:264 { - yyVAL.namelist = append(yyS[yypt-2].namelist, yyS[yypt-0].token.Str) + yyVAL.namelist = append(yyDollar[1].namelist, yyDollar[3].token.Str) } case 38: - //line parser.go.y:269 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:269 { - yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } case 39: - //line parser.go.y:272 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:272 { - yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr) + yyVAL.exprlist = append(yyDollar[1].exprlist, yyDollar[3].expr) } case 40: - //line parser.go.y:277 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:277 { yyVAL.expr = &ast.NilExpr{} - yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 41: - //line parser.go.y:281 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:281 { yyVAL.expr = &ast.FalseExpr{} - yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 42: - //line parser.go.y:285 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:285 { yyVAL.expr = &ast.TrueExpr{} - yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 43: - //line parser.go.y:289 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:289 { - yyVAL.expr = &ast.NumberExpr{Value: yyS[yypt-0].token.Str} - yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr = &ast.NumberExpr{Value: yyDollar[1].token.Str} + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 44: - //line parser.go.y:293 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:293 { yyVAL.expr = &ast.Comma3Expr{} - yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 45: - //line parser.go.y:297 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:297 { - yyVAL.expr = yyS[yypt-0].expr + yyVAL.expr = yyDollar[1].expr } case 46: - //line parser.go.y:300 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:300 { - yyVAL.expr = yyS[yypt-0].expr + yyVAL.expr = yyDollar[1].expr } case 47: - //line parser.go.y:303 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:303 { - yyVAL.expr = yyS[yypt-0].expr + yyVAL.expr = yyDollar[1].expr } case 48: - //line parser.go.y:306 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:306 { - yyVAL.expr = yyS[yypt-0].expr + yyVAL.expr = yyDollar[1].expr } case 49: - //line parser.go.y:309 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:309 { - yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "or", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyDollar[1].expr, Operator: "or", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 50: - //line parser.go.y:313 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:313 { - yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "and", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyDollar[1].expr, Operator: "and", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 51: - //line parser.go.y:317 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:317 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: ">", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: ">", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 52: - //line parser.go.y:321 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:321 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "<", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "<", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 53: - //line parser.go.y:325 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:325 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: ">=", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: ">=", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 54: - //line parser.go.y:329 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:329 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "<=", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "<=", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 55: - //line parser.go.y:333 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:333 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "==", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "==", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 56: - //line parser.go.y:337 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:337 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "~=", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "~=", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 57: - //line parser.go.y:341 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:341 { - yyVAL.expr = &ast.StringConcatOpExpr{Lhs: yyS[yypt-2].expr, Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.StringConcatOpExpr{Lhs: yyDollar[1].expr, Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 58: - //line parser.go.y:345 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:345 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "+", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "+", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 59: - //line parser.go.y:349 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:349 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "-", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "-", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 60: - //line parser.go.y:353 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:353 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "*", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "*", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 61: - //line parser.go.y:357 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:357 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "/", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "/", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 62: - //line parser.go.y:361 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:361 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "%", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "%", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 63: - //line parser.go.y:365 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:365 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "^", Rhs: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "^", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 64: - //line parser.go.y:369 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:369 { - yyVAL.expr = &ast.UnaryMinusOpExpr{Expr: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-0].expr.Line()) + yyVAL.expr = &ast.UnaryMinusOpExpr{Expr: yyDollar[2].expr} + yyVAL.expr.SetLine(yyDollar[2].expr.Line()) } case 65: - //line parser.go.y:373 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:373 { - yyVAL.expr = &ast.UnaryNotOpExpr{Expr: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-0].expr.Line()) + yyVAL.expr = &ast.UnaryNotOpExpr{Expr: yyDollar[2].expr} + yyVAL.expr.SetLine(yyDollar[2].expr.Line()) } case 66: - //line parser.go.y:377 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:377 { - yyVAL.expr = &ast.UnaryLenOpExpr{Expr: yyS[yypt-0].expr} - yyVAL.expr.SetLine(yyS[yypt-0].expr.Line()) + yyVAL.expr = &ast.UnaryLenOpExpr{Expr: yyDollar[2].expr} + yyVAL.expr.SetLine(yyDollar[2].expr.Line()) } case 67: - //line parser.go.y:383 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:383 { - yyVAL.expr = &ast.StringExpr{Value: yyS[yypt-0].token.Str} - yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr = &ast.StringExpr{Value: yyDollar[1].token.Str} + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 68: - //line parser.go.y:389 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:389 { - yyVAL.expr = yyS[yypt-0].expr + yyVAL.expr = yyDollar[1].expr } case 69: - //line parser.go.y:392 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:392 { - yyVAL.expr = yyS[yypt-0].expr + yyVAL.expr = yyDollar[1].expr } case 70: - //line parser.go.y:395 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:395 { - yyVAL.expr = yyS[yypt-0].expr + yyVAL.expr = yyDollar[1].expr } case 71: - //line parser.go.y:398 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:398 { - yyVAL.expr = yyS[yypt-1].expr - yyVAL.expr.SetLine(yyS[yypt-2].token.Pos.Line) + if ex, ok := yyDollar[2].expr.(*ast.Comma3Expr); ok { + ex.AdjustRet = true + } + yyVAL.expr = yyDollar[2].expr + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 72: - //line parser.go.y:404 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:407 { - yyS[yypt-1].expr.(*ast.FuncCallExpr).AdjustRet = true - yyVAL.expr = yyS[yypt-1].expr + yyDollar[2].expr.(*ast.FuncCallExpr).AdjustRet = true + yyVAL.expr = yyDollar[2].expr } case 73: - //line parser.go.y:410 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:413 { - yyVAL.expr = &ast.FuncCallExpr{Func: yyS[yypt-1].expr, Args: yyS[yypt-0].exprlist} - yyVAL.expr.SetLine(yyS[yypt-1].expr.Line()) + yyVAL.expr = &ast.FuncCallExpr{Func: yyDollar[1].expr, Args: yyDollar[2].exprlist} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 74: - //line parser.go.y:414 + yyDollar = yyS[yypt-4 : yypt+1] +//line parser.go.y:417 { - yyVAL.expr = &ast.FuncCallExpr{Method: yyS[yypt-1].token.Str, Receiver: yyS[yypt-3].expr, Args: yyS[yypt-0].exprlist} - yyVAL.expr.SetLine(yyS[yypt-3].expr.Line()) + yyVAL.expr = &ast.FuncCallExpr{Method: yyDollar[3].token.Str, Receiver: yyDollar[1].expr, Args: yyDollar[4].exprlist} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 75: - //line parser.go.y:420 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:423 { if yylex.(*Lexer).PNewLine { - yylex.(*Lexer).TokenError(yyS[yypt-1].token, "ambiguous syntax (function call x new statement)") + yylex.(*Lexer).TokenError(yyDollar[1].token, "ambiguous syntax (function call x new statement)") } yyVAL.exprlist = []ast.Expr{} } case 76: - //line parser.go.y:426 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:429 { if yylex.(*Lexer).PNewLine { - yylex.(*Lexer).TokenError(yyS[yypt-2].token, "ambiguous syntax (function call x new statement)") + yylex.(*Lexer).TokenError(yyDollar[1].token, "ambiguous syntax (function call x new statement)") } - yyVAL.exprlist = yyS[yypt-1].exprlist + yyVAL.exprlist = yyDollar[2].exprlist } case 77: - //line parser.go.y:432 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:435 { - yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } case 78: - //line parser.go.y:435 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:438 { - yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } case 79: - //line parser.go.y:440 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:443 { - yyVAL.expr = &ast.FunctionExpr{ParList: yyS[yypt-0].funcexpr.ParList, Stmts: yyS[yypt-0].funcexpr.Stmts} - yyVAL.expr.SetLine(yyS[yypt-1].token.Pos.Line) - yyVAL.expr.SetLastLine(yyS[yypt-0].funcexpr.LastLine()) + yyVAL.expr = &ast.FunctionExpr{ParList: yyDollar[2].funcexpr.ParList, Stmts: yyDollar[2].funcexpr.Stmts} + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.expr.SetLastLine(yyDollar[2].funcexpr.LastLine()) } case 80: - //line parser.go.y:447 + yyDollar = yyS[yypt-5 : yypt+1] +//line parser.go.y:450 { - yyVAL.funcexpr = &ast.FunctionExpr{ParList: yyS[yypt-3].parlist, Stmts: yyS[yypt-1].stmts} - yyVAL.funcexpr.SetLine(yyS[yypt-4].token.Pos.Line) - yyVAL.funcexpr.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.funcexpr = &ast.FunctionExpr{ParList: yyDollar[2].parlist, Stmts: yyDollar[4].stmts} + yyVAL.funcexpr.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.funcexpr.SetLastLine(yyDollar[5].token.Pos.Line) } case 81: - //line parser.go.y:452 + yyDollar = yyS[yypt-4 : yypt+1] +//line parser.go.y:455 { - yyVAL.funcexpr = &ast.FunctionExpr{ParList: &ast.ParList{HasVargs: false, Names: []string{}}, Stmts: yyS[yypt-1].stmts} - yyVAL.funcexpr.SetLine(yyS[yypt-3].token.Pos.Line) - yyVAL.funcexpr.SetLastLine(yyS[yypt-0].token.Pos.Line) + yyVAL.funcexpr = &ast.FunctionExpr{ParList: &ast.ParList{HasVargs: false, Names: []string{}}, Stmts: yyDollar[3].stmts} + yyVAL.funcexpr.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.funcexpr.SetLastLine(yyDollar[4].token.Pos.Line) } case 82: - //line parser.go.y:459 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:462 { yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}} } case 83: - //line parser.go.y:462 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:465 { yyVAL.parlist = &ast.ParList{HasVargs: false, Names: []string{}} - yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyS[yypt-0].namelist...) + yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyDollar[1].namelist...) } case 84: - //line parser.go.y:466 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:469 { yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}} - yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyS[yypt-2].namelist...) + yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyDollar[1].namelist...) } case 85: - //line parser.go.y:473 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:476 { yyVAL.expr = &ast.TableExpr{Fields: []*ast.Field{}} - yyVAL.expr.SetLine(yyS[yypt-1].token.Pos.Line) + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 86: - //line parser.go.y:477 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:480 { - yyVAL.expr = &ast.TableExpr{Fields: yyS[yypt-1].fieldlist} - yyVAL.expr.SetLine(yyS[yypt-2].token.Pos.Line) + yyVAL.expr = &ast.TableExpr{Fields: yyDollar[2].fieldlist} + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 87: - //line parser.go.y:484 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:487 { - yyVAL.fieldlist = []*ast.Field{yyS[yypt-0].field} + yyVAL.fieldlist = []*ast.Field{yyDollar[1].field} } case 88: - //line parser.go.y:487 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:490 { - yyVAL.fieldlist = append(yyS[yypt-2].fieldlist, yyS[yypt-0].field) + yyVAL.fieldlist = append(yyDollar[1].fieldlist, yyDollar[3].field) } case 89: - //line parser.go.y:490 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:493 { - yyVAL.fieldlist = yyS[yypt-1].fieldlist + yyVAL.fieldlist = yyDollar[1].fieldlist } case 90: - //line parser.go.y:495 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:498 { - yyVAL.field = &ast.Field{Key: &ast.StringExpr{Value: yyS[yypt-2].token.Str}, Value: yyS[yypt-0].expr} - yyVAL.field.Key.SetLine(yyS[yypt-2].token.Pos.Line) + yyVAL.field = &ast.Field{Key: &ast.StringExpr{Value: yyDollar[1].token.Str}, Value: yyDollar[3].expr} + yyVAL.field.Key.SetLine(yyDollar[1].token.Pos.Line) } case 91: - //line parser.go.y:499 + yyDollar = yyS[yypt-5 : yypt+1] +//line parser.go.y:502 { - yyVAL.field = &ast.Field{Key: yyS[yypt-3].expr, Value: yyS[yypt-0].expr} + yyVAL.field = &ast.Field{Key: yyDollar[2].expr, Value: yyDollar[5].expr} } case 92: - //line parser.go.y:502 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:505 { - yyVAL.field = &ast.Field{Value: yyS[yypt-0].expr} + yyVAL.field = &ast.Field{Value: yyDollar[1].expr} } case 93: - //line parser.go.y:507 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:510 { yyVAL.fieldsep = "," } case 94: - //line parser.go.y:510 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:513 { yyVAL.fieldsep = ";" } diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go.y b/vendor/github.com/yuin/gopher-lua/parse/parser.go.y index 956133db292..9a9f831e611 100644 --- a/vendor/github.com/yuin/gopher-lua/parse/parser.go.y +++ b/vendor/github.com/yuin/gopher-lua/parse/parser.go.y @@ -396,6 +396,9 @@ prefixexp: $$ = $1 } | '(' expr ')' { + if ex, ok := $2.(*ast.Comma3Expr); ok { + ex.AdjustRet = true + } $$ = $2 $$.SetLine($1.Pos.Line) } diff --git a/vendor/github.com/yuin/gopher-lua/pm/pm.go b/vendor/github.com/yuin/gopher-lua/pm/pm.go index e15bc21005d..e5c651f942e 100644 --- a/vendor/github.com/yuin/gopher-lua/pm/pm.go +++ b/vendor/github.com/yuin/gopher-lua/pm/pm.go @@ -210,7 +210,7 @@ func (pn *singleClass) Matches(ch int) bool { case 'l', 'L': ret = 'a' <= ch && ch <= 'z' case 'p', 'P': - ret = (0x21 <= ch && ch <= 0x2f) || (0x30 <= ch && ch <= 0x40) || (0x5b <= ch && ch <= 0x60) || (0x7b <= ch && ch <= 0x7e) + ret = (0x21 <= ch && ch <= 0x2f) || (0x3a <= ch && ch <= 0x40) || (0x5b <= ch && ch <= 0x60) || (0x7b <= ch && ch <= 0x7e) case 's', 'S': switch ch { case ' ', '\f', '\n', '\r', '\t', '\v': diff --git a/vendor/github.com/yuin/gopher-lua/table.go b/vendor/github.com/yuin/gopher-lua/table.go index e220bd9c3b6..ddf14dd88fd 100644 --- a/vendor/github.com/yuin/gopher-lua/table.go +++ b/vendor/github.com/yuin/gopher-lua/table.go @@ -46,7 +46,7 @@ func newLTable(acap int, hcap int) *LTable { return tb } -// Len returns length of this LTable. +// Len returns length of this LTable without using __len. func (tb *LTable) Len() int { if tb.array == nil { return 0 diff --git a/vendor/github.com/yuin/gopher-lua/vm.go b/vendor/github.com/yuin/gopher-lua/vm.go index f3733f1300c..aaa04dc9aad 100644 --- a/vendor/github.com/yuin/gopher-lua/vm.go +++ b/vendor/github.com/yuin/gopher-lua/vm.go @@ -549,7 +549,7 @@ func init() { if ret.Type() == LTNumber { reg.SetNumber(RA, ret.(LNumber)) } else { - reg.SetNumber(RA, LNumber(0)) + reg.Set(RA, ret) } } else if lv.Type() == LTTable { reg.SetNumber(RA, LNumber(lv.(*LTable).Len())) diff --git a/vendor/modules.txt b/vendor/modules.txt index fce90f0fb24..b8a8fd0ebfd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -87,7 +87,7 @@ github.com/alecthomas/units # github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a ## explicit github.com/alicebob/gopher-json -# github.com/alicebob/miniredis/v2 v2.22.0 +# github.com/alicebob/miniredis/v2 v2.23.1 ## explicit; go 1.14 github.com/alicebob/miniredis/v2 github.com/alicebob/miniredis/v2/geohash @@ -867,8 +867,8 @@ github.com/weaveworks/common/user # github.com/weaveworks/promrus v1.2.0 ## explicit github.com/weaveworks/promrus -# github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 -## explicit; go 1.14 +# github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 +## explicit; go 1.17 github.com/yuin/gopher-lua github.com/yuin/gopher-lua/ast github.com/yuin/gopher-lua/parse From b271db2f55bf8311a7b30bfe3e8b783ee1e9a135 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 13 Dec 2022 11:51:31 -0800 Subject: [PATCH 014/133] Logging Vertical Sharding info on query stats (#5037) * logging sharding stats Signed-off-by: Alan Protasio * test Signed-off-by: Alan Protasio * improving tests Signed-off-by: Alan Protasio * changelog Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/frontend/transport/handler.go | 10 +- pkg/frontend/v1/frontendv1pb/frontend.pb.go | 102 ++++---- pkg/frontend/v1/frontendv1pb/frontend.proto | 2 +- pkg/frontend/v2/frontend_test.go | 2 +- pkg/frontend/v2/frontendv2pb/frontend.pb.go | 83 ++++--- pkg/frontend/v2/frontendv2pb/frontend.proto | 2 +- pkg/querier/stats/stats.go | 73 ++++-- pkg/querier/stats/stats.pb.go | 246 ++++++++++++++++++-- pkg/querier/stats/stats.proto | 3 + pkg/querier/stats/stats_test.go | 51 +++- pkg/querier/tripperware/shard_by.go | 8 + pkg/querier/worker/frontend_processor.go | 6 +- pkg/querier/worker/scheduler_processor.go | 2 +- 14 files changed, 443 insertions(+), 148 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a437992eb4..d31b9d8746e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ * [ENHANCEMENT] Query-frontend/scheduler: add a new limit `frontend.max-outstanding-requests-per-tenant` for configuring queue size per tenant. Started deprecating two flags `-query-scheduler.max-outstanding-requests-per-tenant` and `-querier.max-outstanding-requests-per-tenant`, and change their value default to 0. Now if both the old flag and new flag are specified, the old flag's queue size will be picked. #5005 * [ENHANCEMENT] Query-tee: Add `/api/v1/query_exemplars` API endpoint support. #5010 * [ENHANCEMENT] Query Frontend/Query Scheduler: Increase upper bound to 60s for queue duration histogram metric. #5029 +* [ENHANCEMENT] Query Frontend: Log Vertical sharding information when `query_stats_enabled` is enabled. #5037 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 99f2116e5b1..794f61cf383 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -112,7 +112,7 @@ func NewHandler(cfg HandlerConfig, roundTripper http.RoundTripper, log log.Logge func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var ( - stats *querier_stats.Stats + stats *querier_stats.QueryStats queryString url.Values ) @@ -185,7 +185,7 @@ func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, query level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) } -func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, queryResponseTime time.Duration, stats *querier_stats.Stats, error error) { +func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, queryResponseTime time.Duration, stats *querier_stats.QueryStats, error error) { tenantIDs, err := tenant.TenantIDs(r.Context()) if err != nil { return @@ -214,7 +214,9 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer "fetched_series_count", numSeries, "fetched_chunks_bytes", numBytes, "fetched_data_bytes", numDataBytes, - }, formatQueryString(queryString)...) + }, stats.LoadExtraFields()...) + + logMessage = append(logMessage, formatQueryString(queryString)...) if error != nil { s, ok := status.FromError(error) @@ -264,7 +266,7 @@ func writeError(w http.ResponseWriter, err error) { server.WriteError(w, err) } -func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.Stats) { +func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.QueryStats) { if stats != nil { parts := make([]string, 0) parts = append(parts, statsValue("querier_wall_time", stats.LoadWallTime())) diff --git a/pkg/frontend/v1/frontendv1pb/frontend.pb.go b/pkg/frontend/v1/frontendv1pb/frontend.pb.go index b8480261ed7..6e8a9be191b 100644 --- a/pkg/frontend/v1/frontendv1pb/frontend.pb.go +++ b/pkg/frontend/v1/frontendv1pb/frontend.pb.go @@ -9,7 +9,8 @@ package frontendv1pb import ( context "context" fmt "fmt" - stats "github.com/cortexproject/cortex/pkg/querier/stats" + _ "github.com/cortexproject/cortex/pkg/querier/stats" + github_com_cortexproject_cortex_pkg_querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" httpgrpc "github.com/weaveworks/common/httpgrpc" @@ -118,9 +119,9 @@ func (m *FrontendToClient) GetStatsEnabled() bool { } type ClientToFrontend struct { - HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,1,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` - ClientID string `protobuf:"bytes,2,opt,name=clientID,proto3" json:"clientID,omitempty"` - Stats *stats.Stats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` + HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,1,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` + ClientID string `protobuf:"bytes,2,opt,name=clientID,proto3" json:"clientID,omitempty"` + Stats *github_com_cortexproject_cortex_pkg_querier_stats.QueryStats `protobuf:"bytes,3,opt,name=stats,proto3,customtype=github.com/cortexproject/cortex/pkg/querier/stats.QueryStats" json:"stats,omitempty"` } func (m *ClientToFrontend) Reset() { *m = ClientToFrontend{} } @@ -169,13 +170,6 @@ func (m *ClientToFrontend) GetClientID() string { return "" } -func (m *ClientToFrontend) GetStats() *stats.Stats { - if m != nil { - return m.Stats - } - return nil -} - type NotifyClientShutdownRequest struct { ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` } @@ -265,38 +259,40 @@ func init() { func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } var fileDescriptor_eca3873955a29cfe = []byte{ - // 496 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0x3d, 0x50, 0x4a, 0x98, 0x44, 0x91, 0xb5, 0x02, 0x14, 0x19, 0xb4, 0x8a, 0x2c, 0x40, - 0x11, 0x12, 0x36, 0x04, 0x24, 0x04, 0x12, 0x97, 0xd2, 0x50, 0x7a, 0x41, 0xc5, 0x31, 0x17, 0x2e, - 0x55, 0xec, 0x6c, 0x9c, 0xd0, 0xc6, 0xeb, 0xda, 0xeb, 0x86, 0xdc, 0x78, 0x02, 0x84, 0xc4, 0x4b, - 0xf0, 0x0c, 0x3c, 0x01, 0xc7, 0x1c, 0x7b, 0x24, 0xce, 0x85, 0x63, 0x1f, 0x01, 0x65, 0xd7, 0x71, - 0x9d, 0xa8, 0x82, 0xcb, 0x6a, 0xc7, 0x33, 0xff, 0xcc, 0x37, 0xbf, 0x17, 0xeb, 0x83, 0x98, 0x87, - 0x82, 0x85, 0x7d, 0x2b, 0x8a, 0xb9, 0xe0, 0xa4, 0xb2, 0x8a, 0x8d, 0x47, 0xc1, 0x48, 0x0c, 0x53, - 0xcf, 0xf2, 0xf9, 0xd8, 0x0e, 0x78, 0xc0, 0x6d, 0x59, 0xe0, 0xa5, 0x03, 0x19, 0xc9, 0x40, 0xde, - 0x94, 0xd0, 0x78, 0x56, 0x2a, 0x9f, 0xb0, 0xde, 0x29, 0x9b, 0xf0, 0xf8, 0x28, 0xb1, 0x7d, 0x3e, - 0x1e, 0xf3, 0xd0, 0x1e, 0x0a, 0x11, 0x05, 0x71, 0xe4, 0x17, 0x97, 0x5c, 0xf5, 0xaa, 0xa4, 0xf2, - 0x79, 0x2c, 0xd8, 0xe7, 0x28, 0xe6, 0x9f, 0x98, 0x2f, 0xf2, 0xc8, 0x8e, 0x8e, 0x02, 0xfb, 0x24, - 0x65, 0xf1, 0x88, 0xc5, 0x76, 0x22, 0x7a, 0x22, 0x51, 0xa7, 0x92, 0x9b, 0xdf, 0x01, 0xf5, 0x37, - 0x39, 0xb0, 0xcb, 0x5f, 0x1f, 0x8f, 0x58, 0x28, 0xc8, 0x73, 0xac, 0x2e, 0xa7, 0x38, 0xec, 0x24, - 0x65, 0x89, 0x68, 0x40, 0x13, 0x5a, 0xd5, 0xf6, 0x2d, 0xab, 0x98, 0xfc, 0xd6, 0x75, 0x0f, 0xf2, - 0xa4, 0x53, 0xae, 0x24, 0x26, 0x6e, 0x89, 0x69, 0xc4, 0x1a, 0x57, 0x9a, 0xd0, 0xaa, 0xb7, 0xeb, - 0x56, 0x61, 0x8d, 0x3b, 0x8d, 0x98, 0x23, 0x73, 0xc4, 0xc4, 0x9a, 0x04, 0xe8, 0x84, 0x3d, 0xef, - 0x98, 0xf5, 0x1b, 0x57, 0x9b, 0xd0, 0xaa, 0x38, 0x6b, 0xdf, 0xcc, 0xaf, 0x80, 0xba, 0x62, 0x71, - 0xf9, 0x8a, 0x8e, 0xbc, 0xc4, 0x9a, 0x9a, 0x95, 0x44, 0x3c, 0x4c, 0x58, 0x8e, 0x75, 0x7b, 0x13, - 0x4b, 0x65, 0x9d, 0xb5, 0x5a, 0x62, 0x60, 0xc5, 0x97, 0xfd, 0xf6, 0x77, 0x25, 0xdc, 0x0d, 0xa7, - 0x88, 0x89, 0x89, 0xd7, 0xe4, 0x70, 0x49, 0x52, 0x6d, 0xd7, 0x2c, 0xe5, 0x4f, 0x77, 0x79, 0x3a, - 0x2a, 0x65, 0xbe, 0xc0, 0x3b, 0xef, 0xb8, 0x18, 0x0d, 0xa6, 0x8a, 0xaa, 0x3b, 0x4c, 0x45, 0x9f, - 0x4f, 0xc2, 0xd5, 0xde, 0xe5, 0xf6, 0xb0, 0xde, 0xde, 0xa4, 0x78, 0xf7, 0x72, 0xa9, 0x42, 0x7b, - 0x78, 0x0f, 0xb7, 0x96, 0xee, 0x10, 0x1d, 0x6b, 0xcb, 0x05, 0x0e, 0x9d, 0xce, 0xfb, 0x0f, 0x9d, - 0xae, 0xab, 0x6b, 0x04, 0x71, 0x7b, 0xaf, 0xe3, 0x1e, 0xee, 0xef, 0xea, 0xd0, 0xfe, 0x09, 0x58, - 0x29, 0x9c, 0xd8, 0xc3, 0xeb, 0x07, 0x31, 0xf7, 0x59, 0x92, 0x10, 0xe3, 0xc2, 0xe3, 0x4d, 0xc3, - 0x8c, 0x52, 0x6e, 0xf3, 0x17, 0x9b, 0x5a, 0x0b, 0x1e, 0x03, 0x61, 0x78, 0xf3, 0x32, 0x36, 0x72, - 0xff, 0x42, 0xf9, 0x8f, 0xb5, 0x8d, 0x07, 0xff, 0x2b, 0x53, 0x2b, 0xee, 0xec, 0xcc, 0xe6, 0x54, - 0x3b, 0x9b, 0x53, 0xed, 0x7c, 0x4e, 0xe1, 0x4b, 0x46, 0xe1, 0x47, 0x46, 0xe1, 0x57, 0x46, 0x61, - 0x96, 0x51, 0xf8, 0x9d, 0x51, 0xf8, 0x93, 0x51, 0xed, 0x3c, 0xa3, 0xf0, 0x6d, 0x41, 0xb5, 0xd9, - 0x82, 0x6a, 0x67, 0x0b, 0xaa, 0x7d, 0xac, 0xad, 0x9a, 0x9f, 0x3e, 0x89, 0x3c, 0x6f, 0x5b, 0xbe, - 0xd7, 0xa7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xe0, 0x35, 0xe6, 0x6f, 0x03, 0x00, 0x00, + // 515 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x33, 0xba, 0xae, 0xf5, 0xb5, 0x94, 0x30, 0xa8, 0x94, 0x28, 0x63, 0x09, 0x2a, 0x45, + 0x30, 0xd1, 0x2a, 0x88, 0xa2, 0x20, 0x75, 0xeb, 0xba, 0x17, 0xd9, 0x4d, 0xe3, 0xc5, 0xcb, 0xd2, + 0xa4, 0xd3, 0xb4, 0xee, 0x36, 0x93, 0x9d, 0x4c, 0xb6, 0xf6, 0xe6, 0x47, 0x10, 0xfc, 0x12, 0x7e, + 0x06, 0x3f, 0x81, 0xc7, 0x1e, 0x17, 0x0f, 0x62, 0xd3, 0x8b, 0xc7, 0xfd, 0x08, 0xd2, 0x99, 0x34, + 0x9b, 0x96, 0x45, 0xf1, 0x32, 0xcc, 0xcb, 0x7b, 0xff, 0xf7, 0x7e, 0xef, 0x9f, 0x81, 0x6a, 0x9f, + 0xb3, 0x50, 0xd0, 0xb0, 0x67, 0x45, 0x9c, 0x09, 0x86, 0x4b, 0xcb, 0xd8, 0xb8, 0x1f, 0x0c, 0xc5, + 0x20, 0xf1, 0x2c, 0x9f, 0x8d, 0xec, 0x80, 0x05, 0xcc, 0x96, 0x05, 0x5e, 0xd2, 0x97, 0x91, 0x0c, + 0xe4, 0x4d, 0x09, 0x8d, 0xc7, 0x85, 0xf2, 0x31, 0xed, 0x1e, 0xd3, 0x31, 0xe3, 0x07, 0xb1, 0xed, + 0xb3, 0xd1, 0x88, 0x85, 0xf6, 0x40, 0x88, 0x28, 0xe0, 0x91, 0x9f, 0x5f, 0x32, 0xd5, 0x8b, 0x82, + 0xca, 0x67, 0x5c, 0xd0, 0x8f, 0x11, 0x67, 0x1f, 0xa8, 0x2f, 0xb2, 0xc8, 0x8e, 0x0e, 0x02, 0xfb, + 0x28, 0xa1, 0x7c, 0x48, 0xb9, 0x1d, 0x8b, 0xae, 0x88, 0xd5, 0xa9, 0xe4, 0xe6, 0x17, 0x04, 0xfa, + 0xeb, 0x0c, 0xd8, 0x65, 0xaf, 0x0e, 0x87, 0x34, 0x14, 0xf8, 0x09, 0x94, 0x17, 0x53, 0x1c, 0x7a, + 0x94, 0xd0, 0x58, 0xd4, 0x50, 0x1d, 0x35, 0xca, 0xcd, 0x6b, 0x56, 0x3e, 0xf9, 0x8d, 0xeb, 0xee, + 0x66, 0x49, 0xa7, 0x58, 0x89, 0x4d, 0xd8, 0x10, 0x93, 0x88, 0xd6, 0x2e, 0xd4, 0x51, 0xa3, 0xda, + 0xac, 0x5a, 0xb9, 0x35, 0xee, 0x24, 0xa2, 0x8e, 0xcc, 0x61, 0x13, 0x2a, 0x12, 0xa0, 0x1d, 0x76, + 0xbd, 0x43, 0xda, 0xab, 0x5d, 0xac, 0xa3, 0x46, 0xc9, 0x59, 0xf9, 0x66, 0x4e, 0x11, 0xe8, 0x8a, + 0xc5, 0x65, 0x4b, 0x3a, 0xfc, 0x0c, 0x2a, 0x6a, 0x56, 0x1c, 0xb1, 0x30, 0xa6, 0x19, 0xd6, 0xf5, + 0x75, 0x2c, 0x95, 0x75, 0x56, 0x6a, 0xb1, 0x01, 0x25, 0x5f, 0xf6, 0xdb, 0xd9, 0x92, 0x70, 0x57, + 0x9c, 0x3c, 0xc6, 0x3d, 0xb8, 0x24, 0x87, 0x4b, 0x92, 0x72, 0xb3, 0x62, 0x29, 0x7f, 0x3a, 0x8b, + 0xb3, 0xf5, 0xf2, 0xc7, 0xcf, 0x5b, 0xcf, 0xff, 0xdb, 0x62, 0x6b, 0x2f, 0xa1, 0x7c, 0x22, 0x3b, + 0x38, 0xaa, 0xb9, 0xf9, 0x14, 0x6e, 0xbc, 0x65, 0x62, 0xd8, 0x9f, 0xa8, 0xbd, 0x3a, 0x83, 0x44, + 0xf4, 0xd8, 0x38, 0x5c, 0x3a, 0x57, 0x04, 0x44, 0xab, 0x80, 0x26, 0x81, 0x9b, 0xe7, 0x4b, 0xd5, + 0x72, 0xf7, 0x6e, 0xc3, 0xc6, 0xc2, 0x5f, 0xac, 0x43, 0x65, 0x61, 0xc1, 0xbe, 0xd3, 0xde, 0x7b, + 0xd7, 0xee, 0xb8, 0xba, 0x86, 0x01, 0x36, 0xb7, 0xdb, 0xee, 0xfe, 0xce, 0x96, 0x8e, 0x9a, 0xdf, + 0x10, 0x94, 0x72, 0x2f, 0xb7, 0xe1, 0xf2, 0x2e, 0x67, 0x3e, 0x8d, 0x63, 0x6c, 0x9c, 0xfd, 0xa5, + 0x75, 0xcb, 0x8d, 0x42, 0x6e, 0xfd, 0x91, 0x98, 0x5a, 0x03, 0x3d, 0x40, 0x98, 0xc2, 0xd5, 0xf3, + 0xd8, 0xf0, 0x9d, 0x33, 0xe5, 0x5f, 0xd6, 0x36, 0xee, 0xfe, 0xab, 0x4c, 0xad, 0xd8, 0x6a, 0x4d, + 0x67, 0x44, 0x3b, 0x99, 0x11, 0xed, 0x74, 0x46, 0xd0, 0xa7, 0x94, 0xa0, 0xaf, 0x29, 0x41, 0xdf, + 0x53, 0x82, 0xa6, 0x29, 0x41, 0xbf, 0x52, 0x82, 0x7e, 0xa7, 0x44, 0x3b, 0x4d, 0x09, 0xfa, 0x3c, + 0x27, 0xda, 0x74, 0x4e, 0xb4, 0x93, 0x39, 0xd1, 0xde, 0x57, 0x96, 0xcd, 0x8f, 0x1f, 0x46, 0x9e, + 0xb7, 0x29, 0x5f, 0xfc, 0xa3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0x79, 0xcf, 0x36, 0xb1, + 0x03, 0x00, 0x00, } func (x Type) String() string { @@ -361,7 +357,11 @@ func (this *ClientToFrontend) Equal(that interface{}) bool { if this.ClientID != that1.ClientID { return false } - if !this.Stats.Equal(that1.Stats) { + if that1.Stats == nil { + if this.Stats != nil { + return false + } + } else if !this.Stats.Equal(*that1.Stats) { return false } return true @@ -435,9 +435,7 @@ func (this *ClientToFrontend) GoString() string { s = append(s, "HttpResponse: "+fmt.Sprintf("%#v", this.HttpResponse)+",\n") } s = append(s, "ClientID: "+fmt.Sprintf("%#v", this.ClientID)+",\n") - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -696,11 +694,11 @@ func (m *ClientToFrontend) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = l if m.Stats != nil { { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { + size := m.Stats.Size() + i -= size + if _, err := m.Stats.MarshalTo(dAtA[i:]); err != nil { return 0, err } - i -= size i = encodeVarintFrontend(dAtA, i, uint64(size)) } i-- @@ -879,7 +877,7 @@ func (this *ClientToFrontend) String() string { s := strings.Join([]string{`&ClientToFrontend{`, `HttpResponse:` + strings.Replace(fmt.Sprintf("%v", this.HttpResponse), "HTTPResponse", "httpgrpc.HTTPResponse", 1) + `,`, `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Stats", "stats.Stats", 1) + `,`, + `Stats:` + fmt.Sprintf("%v", this.Stats) + `,`, `}`, }, "") return s @@ -1166,7 +1164,7 @@ func (m *ClientToFrontend) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Stats == nil { - m.Stats = &stats.Stats{} + m.Stats = &github_com_cortexproject_cortex_pkg_querier_stats.QueryStats{} } if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/pkg/frontend/v1/frontendv1pb/frontend.proto b/pkg/frontend/v1/frontendv1pb/frontend.proto index 231f918daec..56bc4d52706 100644 --- a/pkg/frontend/v1/frontendv1pb/frontend.proto +++ b/pkg/frontend/v1/frontendv1pb/frontend.proto @@ -39,7 +39,7 @@ message FrontendToClient { message ClientToFrontend { httpgrpc.HTTPResponse httpResponse = 1; string clientID = 2; - stats.Stats stats = 3; + stats.Stats stats = 3[(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/querier/stats.QueryStats"]; } message NotifyClientShutdownRequest { diff --git a/pkg/frontend/v2/frontend_test.go b/pkg/frontend/v2/frontend_test.go index f1acd92ad1e..b6b88d2a379 100644 --- a/pkg/frontend/v2/frontend_test.go +++ b/pkg/frontend/v2/frontend_test.go @@ -88,7 +88,7 @@ func sendResponseWithDelay(f *Frontend, delay time.Duration, userID string, quer _, _ = f.QueryResult(ctx, &frontendv2pb.QueryResultRequest{ QueryID: queryID, HttpResponse: resp, - Stats: &stats.Stats{}, + Stats: &stats.QueryStats{}, }) } diff --git a/pkg/frontend/v2/frontendv2pb/frontend.pb.go b/pkg/frontend/v2/frontendv2pb/frontend.pb.go index daeb50a8c3b..b5ab1a79350 100644 --- a/pkg/frontend/v2/frontendv2pb/frontend.pb.go +++ b/pkg/frontend/v2/frontendv2pb/frontend.pb.go @@ -6,7 +6,8 @@ package frontendv2pb import ( context "context" fmt "fmt" - stats "github.com/cortexproject/cortex/pkg/querier/stats" + _ "github.com/cortexproject/cortex/pkg/querier/stats" + github_com_cortexproject_cortex_pkg_querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" httpgrpc "github.com/weaveworks/common/httpgrpc" @@ -32,9 +33,9 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type QueryResultRequest struct { - QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` - HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,2,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` - Stats *stats.Stats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` + QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` + HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,2,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` + Stats *github_com_cortexproject_cortex_pkg_querier_stats.QueryStats `protobuf:"bytes,3,opt,name=stats,proto3,customtype=github.com/cortexproject/cortex/pkg/querier/stats.QueryStats" json:"stats,omitempty"` } func (m *QueryResultRequest) Reset() { *m = QueryResultRequest{} } @@ -83,13 +84,6 @@ func (m *QueryResultRequest) GetHttpResponse() *httpgrpc.HTTPResponse { return nil } -func (m *QueryResultRequest) GetStats() *stats.Stats { - if m != nil { - return m.Stats - } - return nil -} - type QueryResultResponse struct { } @@ -133,29 +127,30 @@ func init() { func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } var fileDescriptor_eca3873955a29cfe = []byte{ - // 351 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcd, 0x4e, 0x3a, 0x31, - 0x14, 0xc5, 0xdb, 0xff, 0xdf, 0x8f, 0xa4, 0x10, 0x17, 0x35, 0x9a, 0x09, 0x8b, 0x06, 0x67, 0xc5, - 0xc6, 0x69, 0x82, 0xae, 0x4c, 0xdc, 0x10, 0x43, 0x74, 0x27, 0x23, 0x2b, 0x77, 0xcc, 0x58, 0x86, - 0x0f, 0x99, 0x96, 0xb6, 0x03, 0xb2, 0xf3, 0x09, 0x8c, 0x8f, 0xe1, 0xa3, 0xb8, 0x64, 0xc9, 0x52, - 0xca, 0xc6, 0x25, 0x8f, 0x60, 0x68, 0x81, 0x0c, 0x31, 0x71, 0xd3, 0xdc, 0x93, 0x7b, 0x7e, 0xb9, - 0xe7, 0xde, 0xa2, 0xa3, 0xb6, 0xe4, 0xa9, 0x66, 0xe9, 0x53, 0x20, 0x24, 0xd7, 0x1c, 0x17, 0x37, - 0x7a, 0x54, 0x15, 0x51, 0xe9, 0x3c, 0xe9, 0xea, 0x4e, 0x16, 0x05, 0x31, 0x1f, 0xd0, 0x84, 0x27, - 0x9c, 0x5a, 0x53, 0x94, 0xb5, 0xad, 0xb2, 0xc2, 0x56, 0x0e, 0x2e, 0x5d, 0xe6, 0xec, 0x63, 0xd6, - 0x1a, 0xb1, 0x31, 0x97, 0x7d, 0x45, 0x63, 0x3e, 0x18, 0xf0, 0x94, 0x76, 0xb4, 0x16, 0x89, 0x14, - 0xf1, 0xb6, 0x58, 0x53, 0xd7, 0x39, 0x2a, 0xe6, 0x52, 0xb3, 0x17, 0x21, 0x79, 0x8f, 0xc5, 0x7a, - 0xad, 0xa8, 0xe8, 0x27, 0x74, 0x98, 0x31, 0xd9, 0x65, 0x92, 0x2a, 0xdd, 0xd2, 0xca, 0xbd, 0x0e, - 0xf7, 0xdf, 0x20, 0xc2, 0x8d, 0x8c, 0xc9, 0x49, 0xc8, 0x54, 0xf6, 0xac, 0x43, 0x36, 0xcc, 0x98, - 0xd2, 0xd8, 0x43, 0x87, 0x2b, 0x66, 0x72, 0x77, 0xe3, 0xc1, 0x32, 0xac, 0xec, 0x85, 0x1b, 0x89, - 0xaf, 0x50, 0x71, 0x95, 0x20, 0x64, 0x4a, 0xf0, 0x54, 0x31, 0xef, 0x5f, 0x19, 0x56, 0x0a, 0xd5, - 0xd3, 0x60, 0x1b, 0xeb, 0xb6, 0xd9, 0xbc, 0xdf, 0x74, 0xc3, 0x1d, 0x2f, 0xf6, 0xd1, 0xbe, 0x9d, - 0xed, 0xfd, 0xb7, 0x50, 0x31, 0x70, 0x49, 0x1e, 0x56, 0x6f, 0xe8, 0x5a, 0xfe, 0x09, 0x3a, 0xde, - 0xc9, 0xe3, 0xd0, 0x6a, 0x0f, 0xe1, 0xfa, 0xfa, 0xb6, 0x75, 0x2e, 0x1b, 0x6e, 0x1f, 0xdc, 0x44, - 0x85, 0x9c, 0x19, 0x97, 0x83, 0xfc, 0xfd, 0x83, 0xdf, 0x7b, 0x95, 0xce, 0xfe, 0x70, 0xb8, 0x49, - 0x3e, 0xa8, 0xd5, 0xa6, 0x73, 0x02, 0x66, 0x73, 0x02, 0x96, 0x73, 0x02, 0x5f, 0x0d, 0x81, 0x1f, - 0x86, 0xc0, 0x4f, 0x43, 0xe0, 0xd4, 0x10, 0xf8, 0x65, 0x08, 0xfc, 0x36, 0x04, 0x2c, 0x0d, 0x81, - 0xef, 0x0b, 0x02, 0xa6, 0x0b, 0x02, 0x66, 0x0b, 0x02, 0x1e, 0x77, 0xfe, 0x3e, 0x3a, 0xb0, 0xe7, - 0xbd, 0xf8, 0x09, 0x00, 0x00, 0xff, 0xff, 0x02, 0xb0, 0x28, 0xb5, 0x22, 0x02, 0x00, 0x00, + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xbd, 0x4e, 0x32, 0x41, + 0x14, 0xdd, 0xf9, 0x7e, 0x93, 0x81, 0x58, 0x8c, 0xd1, 0x6c, 0x28, 0x46, 0xa4, 0xa2, 0x71, 0x27, + 0x41, 0x2b, 0xa3, 0x89, 0x21, 0x86, 0x68, 0x27, 0x2b, 0x95, 0x1d, 0xbb, 0x0c, 0xcb, 0x8f, 0xec, + 0x0c, 0x33, 0xb3, 0x20, 0x9d, 0x8f, 0xe0, 0x63, 0xf8, 0x28, 0x96, 0x5b, 0x12, 0x0b, 0x23, 0x43, + 0x63, 0xc9, 0x23, 0x18, 0x66, 0x80, 0x2c, 0x31, 0x31, 0xb1, 0xb9, 0xb9, 0x27, 0x7b, 0xce, 0xbd, + 0xe7, 0xee, 0x19, 0xb8, 0xd3, 0x16, 0x2c, 0x56, 0x34, 0x6e, 0x79, 0x5c, 0x30, 0xc5, 0x50, 0x7e, + 0x8d, 0x47, 0x15, 0x1e, 0x14, 0x8e, 0xa2, 0xae, 0xea, 0x24, 0x81, 0x17, 0xb2, 0x01, 0x89, 0x58, + 0xc4, 0x88, 0x21, 0x05, 0x49, 0xdb, 0x20, 0x03, 0x4c, 0x67, 0xc5, 0x85, 0x93, 0x0c, 0x7d, 0x4c, + 0x9b, 0x23, 0x3a, 0x66, 0xa2, 0x2f, 0x49, 0xc8, 0x06, 0x03, 0x16, 0x93, 0x8e, 0x52, 0x3c, 0x12, + 0x3c, 0xdc, 0x34, 0x2b, 0xd5, 0x79, 0x46, 0x15, 0x32, 0xa1, 0xe8, 0x03, 0x17, 0xac, 0x47, 0x43, + 0xb5, 0x42, 0x84, 0xf7, 0x23, 0x32, 0x4c, 0xa8, 0xe8, 0x52, 0x41, 0xa4, 0x6a, 0x2a, 0x69, 0xab, + 0x95, 0x97, 0x52, 0x00, 0x51, 0x3d, 0xa1, 0x62, 0xe2, 0x53, 0x99, 0xdc, 0x2b, 0x9f, 0x0e, 0x13, + 0x2a, 0x15, 0x72, 0xe1, 0xff, 0xa5, 0x66, 0x72, 0x7d, 0xe9, 0x82, 0x22, 0x28, 0xff, 0xf1, 0xd7, + 0x10, 0x9d, 0xc2, 0xfc, 0xd2, 0x81, 0x4f, 0x25, 0x67, 0xb1, 0xa4, 0xee, 0xaf, 0x22, 0x28, 0xe7, + 0x2a, 0xfb, 0xde, 0xc6, 0xd6, 0x55, 0xa3, 0x71, 0xb3, 0xfe, 0xea, 0x6f, 0x71, 0x51, 0x0b, 0xfe, + 0x35, 0xbb, 0xdd, 0xdf, 0x46, 0x94, 0xf7, 0xac, 0x93, 0xdb, 0x65, 0xad, 0x5e, 0xbc, 0xbe, 0x1d, + 0x9c, 0xfd, 0xf8, 0x18, 0xcf, 0x98, 0x37, 0x13, 0x7c, 0x3b, 0xbc, 0xb4, 0x07, 0x77, 0xb7, 0x2e, + 0xb2, 0xcb, 0x2b, 0x3d, 0x88, 0x6a, 0xab, 0x74, 0x6a, 0x4c, 0xd4, 0xed, 0x10, 0xd4, 0x80, 0xb9, + 0x0c, 0x19, 0x15, 0xbd, 0x6c, 0x82, 0xde, 0xd7, 0x3f, 0x53, 0x38, 0xfc, 0x86, 0x61, 0x37, 0x95, + 0x9c, 0x6a, 0x35, 0x9d, 0x61, 0x67, 0x3a, 0xc3, 0xce, 0x62, 0x86, 0xc1, 0xa3, 0xc6, 0xe0, 0x59, + 0x63, 0xf0, 0xa2, 0x31, 0x48, 0x35, 0x06, 0xef, 0x1a, 0x83, 0x0f, 0x8d, 0x9d, 0x85, 0xc6, 0xe0, + 0x69, 0x8e, 0x9d, 0x74, 0x8e, 0x9d, 0xe9, 0x1c, 0x3b, 0x77, 0x5b, 0xaf, 0x27, 0xf8, 0x67, 0x02, + 0x3a, 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x31, 0xd0, 0x9a, 0x67, 0x64, 0x02, 0x00, 0x00, } func (this *QueryResultRequest) Equal(that interface{}) bool { @@ -183,7 +178,11 @@ func (this *QueryResultRequest) Equal(that interface{}) bool { if !this.HttpResponse.Equal(that1.HttpResponse) { return false } - if !this.Stats.Equal(that1.Stats) { + if that1.Stats == nil { + if this.Stats != nil { + return false + } + } else if !this.Stats.Equal(*that1.Stats) { return false } return true @@ -219,9 +218,7 @@ func (this *QueryResultRequest) GoString() string { if this.HttpResponse != nil { s = append(s, "HttpResponse: "+fmt.Sprintf("%#v", this.HttpResponse)+",\n") } - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -345,11 +342,11 @@ func (m *QueryResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = l if m.Stats != nil { { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { + size := m.Stats.Size() + i -= size + if _, err := m.Stats.MarshalTo(dAtA[i:]); err != nil { return 0, err } - i -= size i = encodeVarintFrontend(dAtA, i, uint64(size)) } i-- @@ -451,7 +448,7 @@ func (this *QueryResultRequest) String() string { s := strings.Join([]string{`&QueryResultRequest{`, `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, `HttpResponse:` + strings.Replace(fmt.Sprintf("%v", this.HttpResponse), "HTTPResponse", "httpgrpc.HTTPResponse", 1) + `,`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Stats", "stats.Stats", 1) + `,`, + `Stats:` + fmt.Sprintf("%v", this.Stats) + `,`, `}`, }, "") return s @@ -587,7 +584,7 @@ func (m *QueryResultRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Stats == nil { - m.Stats = &stats.Stats{} + m.Stats = &github_com_cortexproject_cortex_pkg_querier_stats.QueryStats{} } if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err diff --git a/pkg/frontend/v2/frontendv2pb/frontend.proto b/pkg/frontend/v2/frontendv2pb/frontend.proto index b93106d7873..555c249d405 100644 --- a/pkg/frontend/v2/frontendv2pb/frontend.proto +++ b/pkg/frontend/v2/frontendv2pb/frontend.proto @@ -19,7 +19,7 @@ service FrontendForQuerier { message QueryResultRequest { uint64 queryID = 1; httpgrpc.HTTPResponse httpResponse = 2; - stats.Stats stats = 3; + stats.Stats stats = 3[(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/querier/stats.QueryStats"]; // There is no userID field here, because Querier puts userID into the context when // calling QueryResult, and that is where Frontend expects to find it. diff --git a/pkg/querier/stats/stats.go b/pkg/querier/stats/stats.go index 57c0fc23f56..d5c71f86ed4 100644 --- a/pkg/querier/stats/stats.go +++ b/pkg/querier/stats/stats.go @@ -2,6 +2,8 @@ package stats import ( "context" + "fmt" + "sync" "sync/atomic" //lint:ignore faillint we can't use go.uber.org/atomic with a protobuf struct without wrapping it. "time" @@ -12,21 +14,26 @@ type contextKey int var ctxKey = contextKey(0) +type QueryStats struct { + Stats + m sync.Mutex +} + // ContextWithEmptyStats returns a context with empty stats. -func ContextWithEmptyStats(ctx context.Context) (*Stats, context.Context) { - stats := &Stats{} +func ContextWithEmptyStats(ctx context.Context) (*QueryStats, context.Context) { + stats := &QueryStats{} ctx = context.WithValue(ctx, ctxKey, stats) return stats, ctx } // FromContext gets the Stats out of the Context. Returns nil if stats have not // been initialised in the context. -func FromContext(ctx context.Context) *Stats { +func FromContext(ctx context.Context) *QueryStats { o := ctx.Value(ctxKey) if o == nil { return nil } - return o.(*Stats) + return o.(*QueryStats) } // IsEnabled returns whether stats tracking is enabled in the context. @@ -37,7 +44,7 @@ func IsEnabled(ctx context.Context) bool { } // AddWallTime adds some time to the counter. -func (s *Stats) AddWallTime(t time.Duration) { +func (s *QueryStats) AddWallTime(t time.Duration) { if s == nil { return } @@ -46,7 +53,7 @@ func (s *Stats) AddWallTime(t time.Duration) { } // LoadWallTime returns current wall time. -func (s *Stats) LoadWallTime() time.Duration { +func (s *QueryStats) LoadWallTime() time.Duration { if s == nil { return 0 } @@ -54,7 +61,7 @@ func (s *Stats) LoadWallTime() time.Duration { return time.Duration(atomic.LoadInt64((*int64)(&s.WallTime))) } -func (s *Stats) AddFetchedSeries(series uint64) { +func (s *QueryStats) AddFetchedSeries(series uint64) { if s == nil { return } @@ -62,7 +69,46 @@ func (s *Stats) AddFetchedSeries(series uint64) { atomic.AddUint64(&s.FetchedSeriesCount, series) } -func (s *Stats) LoadFetchedSeries() uint64 { +func (s *QueryStats) AddExtraFields(fieldsVals ...interface{}) { + if s == nil { + return + } + + s.m.Lock() + defer s.m.Unlock() + + if s.ExtraFields == nil { + s.ExtraFields = map[string]string{} + } + + if len(fieldsVals)%2 == 1 { + fieldsVals = append(fieldsVals, "") + } + + for i := 0; i < len(fieldsVals); i += 2 { + if v, ok := fieldsVals[i].(string); ok { + s.ExtraFields[v] = fmt.Sprintf("%v", fieldsVals[i+1]) + } + } +} + +func (s *QueryStats) LoadExtraFields() []interface{} { + if s == nil { + return []interface{}{} + } + + s.m.Lock() + defer s.m.Unlock() + + r := make([]interface{}, 0, len(s.ExtraFields)) + for k, v := range s.ExtraFields { + r = append(r, k, v) + } + + return r +} + +func (s *QueryStats) LoadFetchedSeries() uint64 { if s == nil { return 0 } @@ -70,7 +116,7 @@ func (s *Stats) LoadFetchedSeries() uint64 { return atomic.LoadUint64(&s.FetchedSeriesCount) } -func (s *Stats) AddFetchedChunkBytes(bytes uint64) { +func (s *QueryStats) AddFetchedChunkBytes(bytes uint64) { if s == nil { return } @@ -78,7 +124,7 @@ func (s *Stats) AddFetchedChunkBytes(bytes uint64) { atomic.AddUint64(&s.FetchedChunkBytes, bytes) } -func (s *Stats) LoadFetchedChunkBytes() uint64 { +func (s *QueryStats) LoadFetchedChunkBytes() uint64 { if s == nil { return 0 } @@ -86,7 +132,7 @@ func (s *Stats) LoadFetchedChunkBytes() uint64 { return atomic.LoadUint64(&s.FetchedChunkBytes) } -func (s *Stats) AddFetchedDataBytes(bytes uint64) { +func (s *QueryStats) AddFetchedDataBytes(bytes uint64) { if s == nil { return } @@ -94,7 +140,7 @@ func (s *Stats) AddFetchedDataBytes(bytes uint64) { atomic.AddUint64(&s.FetchedDataBytes, bytes) } -func (s *Stats) LoadFetchedDataBytes() uint64 { +func (s *QueryStats) LoadFetchedDataBytes() uint64 { if s == nil { return 0 } @@ -103,7 +149,7 @@ func (s *Stats) LoadFetchedDataBytes() uint64 { } // Merge the provide Stats into this one. -func (s *Stats) Merge(other *Stats) { +func (s *QueryStats) Merge(other *QueryStats) { if s == nil || other == nil { return } @@ -112,6 +158,7 @@ func (s *Stats) Merge(other *Stats) { s.AddFetchedSeries(other.LoadFetchedSeries()) s.AddFetchedChunkBytes(other.LoadFetchedChunkBytes()) s.AddFetchedDataBytes(other.LoadFetchedDataBytes()) + s.AddExtraFields(other.LoadExtraFields()...) } func ShouldTrackHTTPGRPCResponse(r *httpgrpc.HTTPResponse) bool { diff --git a/pkg/querier/stats/stats.pb.go b/pkg/querier/stats/stats.pb.go index 0f6ad5377bb..d0a0653cfe3 100644 --- a/pkg/querier/stats/stats.pb.go +++ b/pkg/querier/stats/stats.pb.go @@ -7,6 +7,8 @@ import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" _ "github.com/golang/protobuf/ptypes/duration" io "io" @@ -38,6 +40,8 @@ type Stats struct { FetchedChunkBytes uint64 `protobuf:"varint,3,opt,name=fetched_chunk_bytes,json=fetchedChunkBytes,proto3" json:"fetched_chunk_bytes,omitempty"` // The number of bytes of data fetched for the query FetchedDataBytes uint64 `protobuf:"varint,4,opt,name=fetched_data_bytes,json=fetchedDataBytes,proto3" json:"fetched_data_bytes,omitempty"` + // Extra fields to be reported on the stats log + ExtraFields map[string]string `protobuf:"bytes,5,rep,name=extra_fields,json=extraFields,proto3" json:"extra_fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *Stats) Reset() { *m = Stats{} } @@ -100,33 +104,47 @@ func (m *Stats) GetFetchedDataBytes() uint64 { return 0 } +func (m *Stats) GetExtraFields() map[string]string { + if m != nil { + return m.ExtraFields + } + return nil +} + func init() { proto.RegisterType((*Stats)(nil), "stats.Stats") + proto.RegisterMapType((map[string]string)(nil), "stats.Stats.ExtraFieldsEntry") } func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } var fileDescriptor_b4756a0aec8b9d44 = []byte{ - // 300 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0x31, 0x4e, 0xc3, 0x30, - 0x14, 0x86, 0xfd, 0xa0, 0x45, 0xc5, 0x5d, 0xc0, 0x30, 0x94, 0x0e, 0xaf, 0x15, 0x53, 0x07, 0x70, - 0x11, 0x8c, 0x2c, 0xa8, 0xed, 0x09, 0x5a, 0x26, 0x96, 0xc8, 0x49, 0xdd, 0x34, 0xa2, 0x8d, 0x51, - 0xe2, 0x08, 0xb1, 0x71, 0x04, 0x46, 0x8e, 0xc0, 0x51, 0x3a, 0x66, 0x2c, 0x0b, 0x10, 0x67, 0x61, - 0xec, 0x11, 0x50, 0x9c, 0x44, 0x6c, 0xfe, 0xf5, 0xfd, 0xdf, 0x2f, 0xf9, 0xd1, 0x76, 0xac, 0x85, - 0x8e, 0xf9, 0x53, 0xa4, 0xb4, 0x62, 0x4d, 0x1b, 0xba, 0x97, 0x7e, 0xa0, 0x97, 0x89, 0xcb, 0x3d, - 0xb5, 0x1e, 0xfa, 0xca, 0x57, 0x43, 0x4b, 0xdd, 0x64, 0x61, 0x93, 0x0d, 0xf6, 0x55, 0x5a, 0x5d, - 0xf4, 0x95, 0xf2, 0x57, 0xf2, 0xbf, 0x35, 0x4f, 0x22, 0xa1, 0x03, 0x15, 0x96, 0xfc, 0xfc, 0x13, - 0x68, 0x73, 0x56, 0x0c, 0xb3, 0x3b, 0x7a, 0xf8, 0x2c, 0x56, 0x2b, 0x47, 0x07, 0x6b, 0xd9, 0x81, - 0x3e, 0x0c, 0xda, 0xd7, 0x67, 0xbc, 0xb4, 0x79, 0x6d, 0xf3, 0x49, 0x65, 0x8f, 0x5a, 0x9b, 0xaf, - 0x1e, 0x79, 0xff, 0xee, 0xc1, 0xb4, 0x55, 0x58, 0xf7, 0xc1, 0x5a, 0xb2, 0x2b, 0x7a, 0xba, 0x90, - 0xda, 0x5b, 0xca, 0xb9, 0x13, 0xcb, 0x28, 0x90, 0xb1, 0xe3, 0xa9, 0x24, 0xd4, 0x9d, 0xbd, 0x3e, - 0x0c, 0x1a, 0x53, 0x56, 0xb1, 0x99, 0x45, 0xe3, 0x82, 0x30, 0x4e, 0x4f, 0x6a, 0xc3, 0x5b, 0x26, - 0xe1, 0xa3, 0xe3, 0xbe, 0x68, 0x19, 0x77, 0xf6, 0xad, 0x70, 0x5c, 0xa1, 0x71, 0x41, 0x46, 0x05, - 0x60, 0x17, 0xb4, 0x5e, 0x71, 0xe6, 0x42, 0x8b, 0xaa, 0xde, 0xb0, 0xf5, 0xa3, 0x8a, 0x4c, 0x84, - 0x16, 0xb6, 0x3d, 0xba, 0x4d, 0x33, 0x24, 0xdb, 0x0c, 0xc9, 0x2e, 0x43, 0x78, 0x35, 0x08, 0x1f, - 0x06, 0x61, 0x63, 0x10, 0x52, 0x83, 0xf0, 0x63, 0x10, 0x7e, 0x0d, 0x92, 0x9d, 0x41, 0x78, 0xcb, - 0x91, 0xa4, 0x39, 0x92, 0x6d, 0x8e, 0xe4, 0xa1, 0xbc, 0xb3, 0x7b, 0x60, 0xff, 0x7c, 0xf3, 0x17, - 0x00, 0x00, 0xff, 0xff, 0xb7, 0x24, 0x3a, 0xa7, 0x84, 0x01, 0x00, 0x00, + // 388 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0x3f, 0x6e, 0xe2, 0x40, + 0x18, 0xc5, 0x3d, 0x80, 0x57, 0x30, 0xde, 0x82, 0x9d, 0xa5, 0x30, 0x48, 0x3b, 0xa0, 0xad, 0x28, + 0x76, 0x87, 0x15, 0xdb, 0x44, 0x89, 0x14, 0x21, 0xfe, 0xe4, 0x00, 0x26, 0x55, 0x1a, 0x6b, 0x6c, + 0x0f, 0xc6, 0xc2, 0x78, 0x22, 0x7b, 0x9c, 0xc4, 0x5d, 0x8e, 0x90, 0x32, 0x47, 0xc8, 0x51, 0x28, + 0x29, 0x91, 0x22, 0x25, 0xc1, 0x34, 0x29, 0x39, 0x42, 0xe4, 0xb1, 0x11, 0x12, 0xdd, 0xf7, 0xfc, + 0x7b, 0xef, 0xb3, 0xbe, 0xa7, 0x81, 0x5a, 0x24, 0xa8, 0x88, 0xc8, 0x6d, 0xc8, 0x05, 0x47, 0xaa, + 0x14, 0xad, 0xbf, 0xae, 0x27, 0xe6, 0xb1, 0x45, 0x6c, 0xbe, 0xec, 0xb9, 0xdc, 0xe5, 0x3d, 0x49, + 0xad, 0x78, 0x26, 0x95, 0x14, 0x72, 0xca, 0x53, 0x2d, 0xec, 0x72, 0xee, 0xfa, 0xec, 0xe8, 0x72, + 0xe2, 0x90, 0x0a, 0x8f, 0x07, 0x05, 0x6f, 0x9e, 0x72, 0x1a, 0x24, 0x39, 0xfa, 0xfd, 0x5a, 0x82, + 0xea, 0x34, 0xfb, 0x27, 0x1a, 0xc0, 0xda, 0x3d, 0xf5, 0x7d, 0x53, 0x78, 0x4b, 0xa6, 0x83, 0x0e, + 0xe8, 0x6a, 0xfd, 0x26, 0xc9, 0x83, 0xe4, 0x10, 0x24, 0xe3, 0x62, 0xf1, 0xb0, 0xba, 0x7a, 0x6b, + 0x2b, 0xcf, 0xef, 0x6d, 0x60, 0x54, 0xb3, 0xd4, 0xb5, 0xb7, 0x64, 0xe8, 0x1f, 0x6c, 0xcc, 0x98, + 0xb0, 0xe7, 0xcc, 0x31, 0x23, 0x16, 0x7a, 0x2c, 0x32, 0x6d, 0x1e, 0x07, 0x42, 0x2f, 0x75, 0x40, + 0xb7, 0x62, 0xa0, 0x82, 0x4d, 0x25, 0x1a, 0x65, 0x04, 0x11, 0xf8, 0xf3, 0x90, 0xb0, 0xe7, 0x71, + 0xb0, 0x30, 0xad, 0x44, 0xb0, 0x48, 0x2f, 0xcb, 0xc0, 0x8f, 0x02, 0x8d, 0x32, 0x32, 0xcc, 0x00, + 0xfa, 0x03, 0x0f, 0x5b, 0x4c, 0x87, 0x0a, 0x5a, 0xd8, 0x2b, 0xd2, 0x5e, 0x2f, 0xc8, 0x98, 0x0a, + 0x9a, 0xbb, 0x07, 0xf0, 0x3b, 0x7b, 0x10, 0x21, 0x35, 0x67, 0x1e, 0xf3, 0x9d, 0x48, 0x57, 0x3b, + 0xe5, 0xae, 0xd6, 0xff, 0x45, 0xf2, 0xc2, 0xe5, 0xd5, 0x64, 0x92, 0x19, 0xae, 0x24, 0x9f, 0x04, + 0x22, 0x4c, 0x0c, 0x8d, 0x1d, 0xbf, 0xb4, 0x2e, 0x61, 0xfd, 0xd4, 0x80, 0xea, 0xb0, 0xbc, 0x60, + 0x89, 0x6c, 0xa8, 0x66, 0x64, 0x23, 0x6a, 0x40, 0xf5, 0x8e, 0xfa, 0x31, 0x93, 0x87, 0xd6, 0x8c, + 0x5c, 0x9c, 0x97, 0xce, 0xc0, 0xf0, 0x62, 0xbd, 0xc5, 0xca, 0x66, 0x8b, 0x95, 0xfd, 0x16, 0x83, + 0xc7, 0x14, 0x83, 0x97, 0x14, 0x83, 0x55, 0x8a, 0xc1, 0x3a, 0xc5, 0xe0, 0x23, 0xc5, 0xe0, 0x33, + 0xc5, 0xca, 0x3e, 0xc5, 0xe0, 0x69, 0x87, 0x95, 0xf5, 0x0e, 0x2b, 0x9b, 0x1d, 0x56, 0x6e, 0xf2, + 0x47, 0x60, 0x7d, 0x93, 0xad, 0xff, 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x25, 0x6d, 0xec, 0xdd, + 0x21, 0x02, 0x00, 0x00, } func (this *Stats) Equal(that interface{}) bool { @@ -160,18 +178,39 @@ func (this *Stats) Equal(that interface{}) bool { if this.FetchedDataBytes != that1.FetchedDataBytes { return false } + if len(this.ExtraFields) != len(that1.ExtraFields) { + return false + } + for i := range this.ExtraFields { + if this.ExtraFields[i] != that1.ExtraFields[i] { + return false + } + } return true } func (this *Stats) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&stats.Stats{") s = append(s, "WallTime: "+fmt.Sprintf("%#v", this.WallTime)+",\n") s = append(s, "FetchedSeriesCount: "+fmt.Sprintf("%#v", this.FetchedSeriesCount)+",\n") s = append(s, "FetchedChunkBytes: "+fmt.Sprintf("%#v", this.FetchedChunkBytes)+",\n") s = append(s, "FetchedDataBytes: "+fmt.Sprintf("%#v", this.FetchedDataBytes)+",\n") + keysForExtraFields := make([]string, 0, len(this.ExtraFields)) + for k, _ := range this.ExtraFields { + keysForExtraFields = append(keysForExtraFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtraFields) + mapStringForExtraFields := "map[string]string{" + for _, k := range keysForExtraFields { + mapStringForExtraFields += fmt.Sprintf("%#v: %#v,", k, this.ExtraFields[k]) + } + mapStringForExtraFields += "}" + if this.ExtraFields != nil { + s = append(s, "ExtraFields: "+mapStringForExtraFields+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -203,6 +242,25 @@ func (m *Stats) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ExtraFields) > 0 { + for k := range m.ExtraFields { + v := m.ExtraFields[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintStats(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStats(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStats(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } if m.FetchedDataBytes != 0 { i = encodeVarintStats(dAtA, i, uint64(m.FetchedDataBytes)) i-- @@ -257,6 +315,14 @@ func (m *Stats) Size() (n int) { if m.FetchedDataBytes != 0 { n += 1 + sovStats(uint64(m.FetchedDataBytes)) } + if len(m.ExtraFields) > 0 { + for k, v := range m.ExtraFields { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovStats(uint64(len(k))) + 1 + len(v) + sovStats(uint64(len(v))) + n += mapEntrySize + 1 + sovStats(uint64(mapEntrySize)) + } + } return n } @@ -270,11 +336,22 @@ func (this *Stats) String() string { if this == nil { return "nil" } + keysForExtraFields := make([]string, 0, len(this.ExtraFields)) + for k, _ := range this.ExtraFields { + keysForExtraFields = append(keysForExtraFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtraFields) + mapStringForExtraFields := "map[string]string{" + for _, k := range keysForExtraFields { + mapStringForExtraFields += fmt.Sprintf("%v: %v,", k, this.ExtraFields[k]) + } + mapStringForExtraFields += "}" s := strings.Join([]string{`&Stats{`, `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `FetchedSeriesCount:` + fmt.Sprintf("%v", this.FetchedSeriesCount) + `,`, `FetchedChunkBytes:` + fmt.Sprintf("%v", this.FetchedChunkBytes) + `,`, `FetchedDataBytes:` + fmt.Sprintf("%v", this.FetchedDataBytes) + `,`, + `ExtraFields:` + mapStringForExtraFields + `,`, `}`, }, "") return s @@ -406,6 +483,133 @@ func (m *Stats) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStats + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStats + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtraFields == nil { + m.ExtraFields = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStats + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStats + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthStats + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthStats + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipStats(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExtraFields[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStats(dAtA[iNdEx:]) diff --git a/pkg/querier/stats/stats.proto b/pkg/querier/stats/stats.proto index 4cbc4e5b7e2..c0e509d9a17 100644 --- a/pkg/querier/stats/stats.proto +++ b/pkg/querier/stats/stats.proto @@ -6,6 +6,7 @@ option go_package = "stats"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; +import "google/protobuf/any.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -19,4 +20,6 @@ message Stats { uint64 fetched_chunk_bytes = 3; // The number of bytes of data fetched for the query uint64 fetched_data_bytes = 4; + // Extra fields to be reported on the stats log + map extra_fields = 5; } diff --git a/pkg/querier/stats/stats_test.go b/pkg/querier/stats/stats_test.go index add83dfaf58..41daec7bb6b 100644 --- a/pkg/querier/stats/stats_test.go +++ b/pkg/querier/stats/stats_test.go @@ -18,7 +18,7 @@ func TestStats_WallTime(t *testing.T) { }) t.Run("add and load wall time nil receiver", func(t *testing.T) { - var stats *Stats + var stats *QueryStats stats.AddWallTime(time.Second) assert.Equal(t, time.Duration(0), stats.LoadWallTime()) @@ -35,13 +35,30 @@ func TestStats_AddFetchedSeries(t *testing.T) { }) t.Run("add and load series nil receiver", func(t *testing.T) { - var stats *Stats + var stats *QueryStats stats.AddFetchedSeries(50) assert.Equal(t, uint64(0), stats.LoadFetchedSeries()) }) } +func TestQueryStats_AddExtraFields(t *testing.T) { + t.Run("add and load extra fields", func(t *testing.T) { + stats, _ := ContextWithEmptyStats(context.Background()) + stats.AddExtraFields("a", "b") + stats.AddExtraFields("c") + + checkExtraFields(t, []interface{}{"a", "b", "c", ""}, stats.LoadExtraFields()) + }) + + t.Run("add and load extra fields nil receiver", func(t *testing.T) { + var stats *QueryStats + stats.AddExtraFields("a", "b") + + checkExtraFields(t, []interface{}{}, stats.LoadExtraFields()) + }) +} + func TestStats_AddFetchedChunkBytes(t *testing.T) { t.Run("add and load bytes", func(t *testing.T) { stats, _ := ContextWithEmptyStats(context.Background()) @@ -52,7 +69,7 @@ func TestStats_AddFetchedChunkBytes(t *testing.T) { }) t.Run("add and load bytes nil receiver", func(t *testing.T) { - var stats *Stats + var stats *QueryStats stats.AddFetchedChunkBytes(1024) assert.Equal(t, uint64(0), stats.LoadFetchedChunkBytes()) @@ -69,7 +86,7 @@ func TestStats_AddFetchedDataBytes(t *testing.T) { }) t.Run("add and load bytes nil receiver", func(t *testing.T) { - var stats *Stats + var stats *QueryStats stats.AddFetchedDataBytes(1024) assert.Equal(t, uint64(0), stats.LoadFetchedDataBytes()) @@ -78,17 +95,20 @@ func TestStats_AddFetchedDataBytes(t *testing.T) { func TestStats_Merge(t *testing.T) { t.Run("merge two stats objects", func(t *testing.T) { - stats1 := &Stats{} + stats1 := &QueryStats{} stats1.AddWallTime(time.Millisecond) stats1.AddFetchedSeries(50) stats1.AddFetchedChunkBytes(42) stats1.AddFetchedDataBytes(100) + stats1.AddExtraFields("a", "b") + stats1.AddExtraFields("a", "b") - stats2 := &Stats{} + stats2 := &QueryStats{} stats2.AddWallTime(time.Second) stats2.AddFetchedSeries(60) stats2.AddFetchedChunkBytes(100) stats2.AddFetchedDataBytes(101) + stats2.AddExtraFields("c", "d") stats1.Merge(stats2) @@ -96,11 +116,12 @@ func TestStats_Merge(t *testing.T) { assert.Equal(t, uint64(110), stats1.LoadFetchedSeries()) assert.Equal(t, uint64(142), stats1.LoadFetchedChunkBytes()) assert.Equal(t, uint64(201), stats1.LoadFetchedDataBytes()) + checkExtraFields(t, []interface{}{"a", "b", "c", "d"}, stats1.LoadExtraFields()) }) t.Run("merge two nil stats objects", func(t *testing.T) { - var stats1 *Stats - var stats2 *Stats + var stats1 *QueryStats + var stats2 *QueryStats stats1.Merge(stats2) @@ -108,5 +129,19 @@ func TestStats_Merge(t *testing.T) { assert.Equal(t, uint64(0), stats1.LoadFetchedSeries()) assert.Equal(t, uint64(0), stats1.LoadFetchedChunkBytes()) assert.Equal(t, uint64(0), stats1.LoadFetchedDataBytes()) + checkExtraFields(t, []interface{}{}, stats1.LoadExtraFields()) }) } + +func checkExtraFields(t *testing.T, expected, actual []interface{}) { + assert.Equal(t, len(expected), len(actual)) + expectedMap := map[string]string{} + actualMap := map[string]string{} + + for i := 0; i < len(expected); i += 2 { + expectedMap[expected[i].(string)] = expected[i+1].(string) + actualMap[actual[i].(string)] = actual[i+1].(string) + } + + assert.Equal(t, expectedMap, actualMap) +} diff --git a/pkg/querier/tripperware/shard_by.go b/pkg/querier/tripperware/shard_by.go index 67ee8c97ca4..8bb218af321 100644 --- a/pkg/querier/tripperware/shard_by.go +++ b/pkg/querier/tripperware/shard_by.go @@ -10,6 +10,7 @@ import ( "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/weaveworks/common/httpgrpc" + querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" cquerysharding "github.com/cortexproject/cortex/pkg/querysharding" "github.com/cortexproject/cortex/pkg/tenant" util_log "github.com/cortexproject/cortex/pkg/util/log" @@ -37,6 +38,7 @@ type shardBy struct { func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { tenantIDs, err := tenant.TenantIDs(ctx) + stats := querier_stats.FromContext(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) @@ -54,6 +56,12 @@ func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { level.Warn(logger).Log("msg", "error analyzing query", "q", r.GetQuery(), "err", err) } + stats.AddExtraFields( + "shard_by.is_shardable", analysis.IsShardable(), + "shard_by.num_shards", numShards, + "shard_by.sharding_labels", analysis.ShardingLabels(), + ) + if err != nil || !analysis.IsShardable() { return s.next.Do(ctx, r) } diff --git a/pkg/querier/worker/frontend_processor.go b/pkg/querier/worker/frontend_processor.go index 9189ff1dd43..61208176664 100644 --- a/pkg/querier/worker/frontend_processor.go +++ b/pkg/querier/worker/frontend_processor.go @@ -95,7 +95,7 @@ func (fp *frontendProcessor) process(c frontendv1pb.Frontend_ProcessClient) erro // and cancel the query. We don't actually handle queries in parallel // here, as we're running in lock step with the server - each Recv is // paired with a Send. - go fp.runRequest(ctx, request.HttpRequest, request.StatsEnabled, func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error { + go fp.runRequest(ctx, request.HttpRequest, request.StatsEnabled, func(response *httpgrpc.HTTPResponse, stats *stats.QueryStats) error { return c.Send(&frontendv1pb.ClientToFrontend{ HttpResponse: response, Stats: stats, @@ -114,8 +114,8 @@ func (fp *frontendProcessor) process(c frontendv1pb.Frontend_ProcessClient) erro } } -func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, statsEnabled bool, sendHTTPResponse func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error) { - var stats *querier_stats.Stats +func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, statsEnabled bool, sendHTTPResponse func(response *httpgrpc.HTTPResponse, stats *stats.QueryStats) error) { + var stats *querier_stats.QueryStats if statsEnabled { stats, ctx = querier_stats.ContextWithEmptyStats(ctx) } diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index ec9b52e6da0..379c0bd8ecb 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -152,7 +152,7 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer } func (sp *schedulerProcessor) runRequest(ctx context.Context, logger log.Logger, queryID uint64, frontendAddress string, statsEnabled bool, request *httpgrpc.HTTPRequest) { - var stats *querier_stats.Stats + var stats *querier_stats.QueryStats if statsEnabled { stats, ctx = querier_stats.ContextWithEmptyStats(ctx) } From 7bf76698298e6da1babb8238447423c3f1fa648e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Dec 2022 13:51:51 -0800 Subject: [PATCH 015/133] Bump go.opentelemetry.io/contrib/propagators/aws from 1.11.1 to 1.12.0 (#5033) Bumps [go.opentelemetry.io/contrib/propagators/aws](https://github.com/open-telemetry/opentelemetry-go-contrib) from 1.11.1 to 1.12.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/v1.11.1...v1.12.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/propagators/aws dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index e1e90df5c20..b0de6461ad5 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( go.etcd.io/etcd/api/v3 v3.5.6 go.etcd.io/etcd/client/pkg/v3 v3.5.6 go.etcd.io/etcd/client/v3 v3.5.4 - go.opentelemetry.io/contrib/propagators/aws v1.11.1 + go.opentelemetry.io/contrib/propagators/aws v1.12.0 go.opentelemetry.io/otel v1.11.2 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 diff --git a/go.sum b/go.sum index 35580801521..ee2d19e9cc1 100644 --- a/go.sum +++ b/go.sum @@ -1009,8 +1009,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 h1:qZ3KzA4 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0/go.mod h1:14Oo79mRwusSI02L0EfG3Gp1uF3+1wSL+D4zDysxyqs= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= -go.opentelemetry.io/contrib/propagators/aws v1.11.1 h1:bPoZrezYKRb3HXrW6I7QmYLz5bStFrb4ZWmcRw8k+Gg= -go.opentelemetry.io/contrib/propagators/aws v1.11.1/go.mod h1:5jZiQXbiLiVtJP2YRe/IbHURUnWMVsnj8MVinGPAKJs= +go.opentelemetry.io/contrib/propagators/aws v1.12.0 h1:n3lNyZs2ytVEfFhcn2QO5Z80lgrMXyP1Ncx0C7rUU8A= +go.opentelemetry.io/contrib/propagators/aws v1.12.0/go.mod h1:xZSOQIixr40Cq3NHn/YsvkDOzpVaR0j19WJRZnOKwbk= go.opentelemetry.io/contrib/propagators/b3 v1.9.0 h1:Lzb9zU98jCE2kyfCjWfSSsiQoGtvBL+COxvUBf7FNhU= go.opentelemetry.io/contrib/propagators/b3 v1.9.0/go.mod h1:fyx3gFXn+4w5uWTTiqaI8oBNBW/6w9Ow5zxXf7NGixU= go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 h1:edJTgwezAtLKUINAXfjxllJ1vlsphNPV7RkuKNd/HkQ= diff --git a/vendor/modules.txt b/vendor/modules.txt index b8a8fd0ebfd..33174cb5a8c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -929,7 +929,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp # go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 ## explicit; go 1.17 go.opentelemetry.io/contrib/propagators/autoprop -# go.opentelemetry.io/contrib/propagators/aws v1.11.1 +# go.opentelemetry.io/contrib/propagators/aws v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/contrib/propagators/aws/xray # go.opentelemetry.io/contrib/propagators/b3 v1.9.0 From e2dc0bff9f2b3e74ff1c00f4033df55d3c9eb02d Mon Sep 17 00:00:00 2001 From: Harry John Date: Wed, 14 Dec 2022 15:27:06 -0800 Subject: [PATCH 016/133] Ingester: Metadata APIs should honour QueryIngestersWithin when QueryStoreForLabels is enabled (#5027) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Ingester Metadata APIs should honour QueryIngestersWithin Signed-off-by: 🌲 Harry 🌊 John 🏔 * Deprecate query-store-for-labels-enabled flag Signed-off-by: 🌲 Harry 🌊 John 🏔 Signed-off-by: 🌲 Harry 🌊 John 🏔 Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/blocks-storage/querier.md | 5 +- docs/configuration/config-file-reference.md | 4 +- pkg/cortex/modules.go | 2 + pkg/ingester/ingester.go | 18 +++++-- pkg/ingester/ingester_test.go | 25 ++++++++-- pkg/querier/distributor_queryable.go | 53 ++++++++++++--------- pkg/querier/distributor_queryable_test.go | 22 ++++++--- pkg/querier/querier.go | 4 +- pkg/querier/querier_test.go | 4 +- 10 files changed, 92 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d31b9d8746e..a33045728d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ * [ENHANCEMENT] Query-tee: Add `/api/v1/query_exemplars` API endpoint support. #5010 * [ENHANCEMENT] Query Frontend/Query Scheduler: Increase upper bound to 60s for queue duration histogram metric. #5029 * [ENHANCEMENT] Query Frontend: Log Vertical sharding information when `query_stats_enabled` is enabled. #5037 +* [ENHANCEMENT] Ingester: The metadata APIs should honour `querier.query-ingesters-within` when `querier.query-store-for-labels-enabled` is true. #5027 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index af2edea1cbf..1e84886d514 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -131,8 +131,9 @@ querier: # CLI flag: -querier.query-ingesters-within [query_ingesters_within: | default = 0s] - # Query long-term store for series, label values and label names APIs. Works - # only with blocks engine. + # Deprecated (Querying long-term store for labels will be always enabled in + # the future.): Query long-term store for series, label values and label names + # APIs. # CLI flag: -querier.query-store-for-labels-enabled [query_store_for_labels_enabled: | default = false] diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 926edb10424..705c84ad305 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -810,8 +810,8 @@ The `querier_config` configures the Cortex querier. # CLI flag: -querier.query-ingesters-within [query_ingesters_within: | default = 0s] -# Query long-term store for series, label values and label names APIs. Works -# only with blocks engine. +# Deprecated (Querying long-term store for labels will be always enabled in the +# future.): Query long-term store for series, label values and label names APIs. # CLI flag: -querier.query-store-for-labels-enabled [query_store_for_labels_enabled: | default = false] diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index d05ed1083dc..c8560f7e3b7 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -393,6 +393,8 @@ func (t *Cortex) initIngesterService() (serv services.Service, err error) { t.Cfg.Ingester.DistributorShardingStrategy = t.Cfg.Distributor.ShardingStrategy t.Cfg.Ingester.DistributorShardByAllLabels = t.Cfg.Distributor.ShardByAllLabels t.Cfg.Ingester.InstanceLimitsFn = ingesterInstanceLimits(t.RuntimeConfig) + t.Cfg.Ingester.QueryStoreForLabels = t.Cfg.Querier.QueryStoreForLabels + t.Cfg.Ingester.QueryIngestersWithin = t.Cfg.Querier.QueryIngestersWithin t.tsdbIngesterConfig() t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Overrides, prometheus.DefaultRegisterer, util_log.Logger) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 9f776bd93bc..b8f8ac23456 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -106,6 +106,10 @@ type Config struct { DistributorShardingStrategy string `yaml:"-"` DistributorShardByAllLabels bool `yaml:"-"` + // Injected at runtime and read from querier config. + QueryStoreForLabels bool `yaml:"-"` + QueryIngestersWithin time.Duration `yaml:"-"` + DefaultLimits InstanceLimits `yaml:"instance_limits"` InstanceLimitsFn func() *InstanceLimits `yaml:"-"` @@ -1303,7 +1307,7 @@ func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesReque return &client.LabelValuesResponse{}, nil } - mint, maxt, err := metadataQueryRange(startTimestampMs, endTimestampMs, db) + mint, maxt, err := metadataQueryRange(startTimestampMs, endTimestampMs, db, i.cfg.QueryStoreForLabels, i.cfg.QueryIngestersWithin) if err != nil { return nil, err } @@ -1364,7 +1368,7 @@ func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest return &client.LabelNamesResponse{}, nil } - mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db) + mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db, i.cfg.QueryStoreForLabels, i.cfg.QueryIngestersWithin) if err != nil { return nil, err } @@ -1432,7 +1436,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr return nil, err } - mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db) + mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db, i.cfg.QueryStoreForLabels, i.cfg.QueryIngestersWithin) if err != nil { return nil, err } @@ -2564,7 +2568,13 @@ func (i *Ingester) flushHandler(w http.ResponseWriter, r *http.Request) { } // metadataQueryRange returns the best range to query for metadata queries based on the timerange in the ingester. -func metadataQueryRange(queryStart, queryEnd int64, db *userTSDB) (mint, maxt int64, err error) { +func metadataQueryRange(queryStart, queryEnd int64, db *userTSDB, queryStoreForLabels bool, queryIngestersWithin time.Duration) (mint, maxt int64, err error) { + if queryIngestersWithin > 0 && queryStoreForLabels { + // If the feature for querying metadata from store-gateway is enabled, + // then we don't want to manipulate the mint and maxt. + return + } + // Ingesters are run with limited retention and we don't support querying the store-gateway for labels yet. // This means if someone loads a dashboard that is outside the range of the ingester, and we only return the // data for the timerange requested (which will be empty), the dashboards will break. To fix this we should diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 748bd194c0f..8ae139d9a98 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -1733,10 +1733,12 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { } tests := map[string]struct { - from int64 - to int64 - matchers []*client.LabelMatchers - expected []*cortexpb.Metric + from int64 + to int64 + matchers []*client.LabelMatchers + expected []*cortexpb.Metric + queryStoreForLabels bool + queryIngestersWithin time.Duration }{ "should return an empty response if no metric match": { from: math.MinInt64, @@ -1794,6 +1796,18 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { {Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[1].lbls)}, }, }, + "should filter metrics by time range if queryStoreForLabels and queryIngestersWithin is enabled": { + from: 100, + to: 1000, + matchers: []*client.LabelMatchers{{ + Matchers: []*client.LabelMatcher{ + {Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"}, + }, + }}, + expected: []*cortexpb.Metric{}, + queryStoreForLabels: true, + queryIngestersWithin: time.Hour, + }, "should not return duplicated metrics on overlapping matchers": { from: math.MinInt64, to: math.MaxInt64, @@ -1860,7 +1874,8 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { EndTimestampMs: testData.to, MatchersSet: testData.matchers, } - + i.cfg.QueryStoreForLabels = testData.queryStoreForLabels + i.cfg.QueryIngestersWithin = testData.queryIngestersWithin res, err := i.MetricsForLabelMatchers(ctx, req) require.NoError(t, err) assert.ElementsMatch(t, testData.expected, res.Metric) diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index 1d89dd064e3..ae3468cc65c 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -37,13 +37,14 @@ type Distributor interface { MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) } -func newDistributorQueryable(distributor Distributor, streaming bool, streamingMetdata bool, iteratorFn chunkIteratorFunc, queryIngestersWithin time.Duration) QueryableWithFilter { +func newDistributorQueryable(distributor Distributor, streaming bool, streamingMetdata bool, iteratorFn chunkIteratorFunc, queryIngestersWithin time.Duration, queryStoreForLabels bool) QueryableWithFilter { return distributorQueryable{ distributor: distributor, streaming: streaming, streamingMetdata: streamingMetdata, iteratorFn: iteratorFn, queryIngestersWithin: queryIngestersWithin, + queryStoreForLabels: queryStoreForLabels, } } @@ -53,6 +54,7 @@ type distributorQueryable struct { streamingMetdata bool iteratorFn chunkIteratorFunc queryIngestersWithin time.Duration + queryStoreForLabels bool } func (d distributorQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { @@ -65,6 +67,7 @@ func (d distributorQueryable) Querier(ctx context.Context, mint, maxt int64) (st streamingMetadata: d.streamingMetdata, chunkIterFn: d.iteratorFn, queryIngestersWithin: d.queryIngestersWithin, + queryStoreForLabels: d.queryStoreForLabels, }, nil } @@ -81,6 +84,7 @@ type distributorQuerier struct { streamingMetadata bool chunkIterFn chunkIteratorFunc queryIngestersWithin time.Duration + queryStoreForLabels bool } // Select implements storage.Querier interface. @@ -95,35 +99,18 @@ func (q *distributorQuerier) Select(sortSeries bool, sp *storage.SelectHints, ma } // If the querier receives a 'series' query, it means only metadata is needed. - // For this specific case we shouldn't apply the queryIngestersWithin - // time range manipulation, otherwise we'll end up returning no series at all for + // For the specific case where queryStoreForLabels is disabled + // we shouldn't apply the queryIngestersWithin time range manipulation. + // Otherwise we'll end up returning no series at all for // older time ranges (while in Cortex we do ignore the start/end and always return // series in ingesters). - // Also, in the recent versions of Prometheus, we pass in the hint but with Func set to "series". - // See: https://github.com/prometheus/prometheus/pull/8050 - if sp != nil && sp.Func == "series" { - var ( - ms []metric.Metric - err error - ) - - if q.streamingMetadata { - ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(minT), model.Time(maxT), matchers...) - } else { - ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), matchers...) - } - - if err != nil { - return storage.ErrSeriesSet(err) - } - return series.MetricsToSeriesSet(sortSeries, ms) - } + shouldNotQueryStoreForMetadata := (sp != nil && sp.Func == "series" && !q.queryStoreForLabels) // If queryIngestersWithin is enabled, we do manipulate the query mint to query samples up until // now - queryIngestersWithin, because older time ranges are covered by the storage. This // optimization is particularly important for the blocks storage where the blocks retention in the // ingesters could be way higher than queryIngestersWithin. - if q.queryIngestersWithin > 0 { + if q.queryIngestersWithin > 0 && !shouldNotQueryStoreForMetadata { now := time.Now() origMinT := minT minT = math.Max64(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin))) @@ -138,6 +125,26 @@ func (q *distributorQuerier) Select(sortSeries bool, sp *storage.SelectHints, ma } } + // In the recent versions of Prometheus, we pass in the hint but with Func set to "series". + // See: https://github.com/prometheus/prometheus/pull/8050 + if sp != nil && sp.Func == "series" { + var ( + ms []metric.Metric + err error + ) + + if q.streamingMetadata { + ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(minT), model.Time(maxT), matchers...) + } else { + ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), matchers...) + } + + if err != nil { + return storage.ErrSeriesSet(err) + } + return series.MetricsToSeriesSet(sortSeries, ms) + } + if q.streaming { return q.streamingSelect(ctx, sortSeries, minT, maxT, matchers) } diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index 1481d719825..410de35421c 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -46,7 +46,7 @@ func TestDistributorQuerier(t *testing.T) { }, nil) - queryable := newDistributorQueryable(d, false, false, nil, 0) + queryable := newDistributorQueryable(d, false, false, nil, 0, false) querier, err := queryable.Querier(context.Background(), mint, maxt) require.NoError(t, err) @@ -70,6 +70,7 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) tests := map[string]struct { querySeries bool + queryStoreForLabels bool queryIngestersWithin time.Duration queryMinT int64 queryMaxT int64 @@ -112,6 +113,15 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) expectedMinT: util.TimeToMillis(now.Add(-100 * time.Minute)), expectedMaxT: util.TimeToMillis(now.Add(-90 * time.Minute)), }, + "should manipulate query time range if queryIngestersWithin is enabled and queryStoreForLabels is enabled": { + querySeries: true, + queryStoreForLabels: true, + queryIngestersWithin: time.Hour, + queryMinT: util.TimeToMillis(now.Add(-100 * time.Minute)), + queryMaxT: util.TimeToMillis(now.Add(-30 * time.Minute)), + expectedMinT: util.TimeToMillis(now.Add(-60 * time.Minute)), + expectedMaxT: util.TimeToMillis(now.Add(-30 * time.Minute)), + }, } for _, streamingEnabled := range []bool{false, true} { @@ -124,7 +134,7 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]metric.Metric{}, nil) ctx := user.InjectOrgID(context.Background(), "test") - queryable := newDistributorQueryable(distributor, streamingEnabled, streamingEnabled, nil, testData.queryIngestersWithin) + queryable := newDistributorQueryable(distributor, streamingEnabled, streamingEnabled, nil, testData.queryIngestersWithin, testData.queryStoreForLabels) querier, err := queryable.Querier(ctx, testData.queryMinT, testData.queryMaxT) require.NoError(t, err) @@ -161,7 +171,7 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) func TestDistributorQueryableFilter(t *testing.T) { d := &MockDistributor{} - dq := newDistributorQueryable(d, false, false, nil, 1*time.Hour) + dq := newDistributorQueryable(d, false, false, nil, 1*time.Hour, true) now := time.Now() @@ -209,7 +219,7 @@ func TestIngesterStreaming(t *testing.T) { nil) ctx := user.InjectOrgID(context.Background(), "0") - queryable := newDistributorQueryable(d, true, true, mergeChunks, 0) + queryable := newDistributorQueryable(d, true, true, mergeChunks, 0, true) querier, err := queryable.Querier(ctx, mint, maxt) require.NoError(t, err) @@ -285,7 +295,7 @@ func TestIngesterStreamingMixedResults(t *testing.T) { nil) ctx := user.InjectOrgID(context.Background(), "0") - queryable := newDistributorQueryable(d, true, true, mergeChunks, 0) + queryable := newDistributorQueryable(d, true, true, mergeChunks, 0, true) querier, err := queryable.Querier(ctx, mint, maxt) require.NoError(t, err) @@ -336,7 +346,7 @@ func TestDistributorQuerier_LabelNames(t *testing.T) { d.On("MetricsForLabelMatchersStream", mock.Anything, model.Time(mint), model.Time(maxt), someMatchers). Return(metrics, nil) - queryable := newDistributorQueryable(d, false, streamingEnabled, nil, 0) + queryable := newDistributorQueryable(d, false, streamingEnabled, nil, 0, true) querier, err := queryable.Querier(context.Background(), mint, maxt) require.NoError(t, err) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 705bec628db..3e4ceb76e3e 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -96,7 +96,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.IngesterMetadataStreaming, "querier.ingester-metadata-streaming", false, "Use streaming RPCs for metadata APIs from ingester.") f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") - f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") + f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Deprecated (Querying long-term store for labels will be always enabled in the future.): Query long-term store for series, label values and label names APIs.") f.BoolVar(&cfg.EnablePerStepStats, "querier.per-step-stats-enabled", false, "Enable returning samples stats per steps in query response.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") @@ -146,7 +146,7 @@ func getChunksIteratorFunction(cfg Config) chunkIteratorFunc { func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, tombstonesLoader purger.TombstonesLoader, reg prometheus.Registerer, logger log.Logger) (storage.SampleAndChunkQueryable, storage.ExemplarQueryable, v1.QueryEngine) { iteratorFunc := getChunksIteratorFunction(cfg) - distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterStreaming, cfg.IngesterMetadataStreaming, iteratorFunc, cfg.QueryIngestersWithin) + distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterStreaming, cfg.IngesterMetadataStreaming, iteratorFunc, cfg.QueryIngestersWithin, cfg.QueryStoreForLabels) ns := make([]QueryableWithFilter, len(stores)) for ix, s := range stores { diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index b8cfe922570..24f57950547 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -213,8 +213,8 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unorderedResponse, nil) distributor.On("Query", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(unorderedResponseMatrix, nil) - distributorQueryableStreaming := newDistributorQueryable(distributor, true, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin) - distributorQueryable := newDistributorQueryable(distributor, false, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin) + distributorQueryableStreaming := newDistributorQueryable(distributor, true, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin, cfg.QueryStoreForLabels) + distributorQueryable := newDistributorQueryable(distributor, false, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin, cfg.QueryStoreForLabels) tCases := []struct { name string From 0ac83e76be198d2c8fac242d9ff0077452608f8c Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Thu, 15 Dec 2022 08:34:09 -0800 Subject: [PATCH 017/133] Building Arm Images (#5041) * wip Signed-off-by: Alan Protasio * est Signed-off-by: Alan Protasio * quay Signed-off-by: Alan Protasio * using different repository for the archteture specific images Signed-off-by: Alan Protasio * testing linux repos Signed-off-by: Alan Protasio * testing manifes on both platforms Signed-off-by: Alan Protasio * enabling deploy stage only on master Signed-off-by: Alan Protasio * changelog Signed-off-by: Alan Protasio * using amd64 image for e2e teests Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- .github/workflows/scripts/install-docker.sh | 4 ++++ .github/workflows/test-build-deploy.yml | 2 +- CHANGELOG.md | 1 + Makefile | 26 +++++++++++++++------ cmd/cortex/Dockerfile | 4 +++- cmd/query-tee/Dockerfile | 4 +++- cmd/test-exporter/Dockerfile | 3 ++- cmd/thanosconvert/Dockerfile | 3 ++- push-images | 23 ++++++++++++++---- 9 files changed, 53 insertions(+), 17 deletions(-) diff --git a/.github/workflows/scripts/install-docker.sh b/.github/workflows/scripts/install-docker.sh index ea694615dbc..d6a215e9270 100755 --- a/.github/workflows/scripts/install-docker.sh +++ b/.github/workflows/scripts/install-docker.sh @@ -4,4 +4,8 @@ set -x VER="20.10.19" curl -L -o /tmp/docker-$VER.tgz https://download.docker.com/linux/static/stable/x86_64/docker-$VER.tgz tar -xz -C /tmp -f /tmp/docker-$VER.tgz +mkdir -vp ~/.docker/cli-plugins/ +curl --silent -L "https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-amd64" > ~/.docker/cli-plugins/docker-buildx +chmod a+x ~/.docker/cli-plugins/docker-buildx mv /tmp/docker/* /usr/bin +docker run --privileged --rm tonistiigi/binfmt --install all diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 8dd03841afc..67682ad6092 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -141,7 +141,7 @@ jobs: run: | export CORTEX_IMAGE_PREFIX="${IMAGE_PREFIX:-quay.io/cortexproject/}" export IMAGE_TAG=$(make image-tag) - export CORTEX_IMAGE="${CORTEX_IMAGE_PREFIX}cortex:$IMAGE_TAG" + export CORTEX_IMAGE="${CORTEX_IMAGE_PREFIX}cortex:$IMAGE_TAG-amd64" export CORTEX_CHECKOUT_DIR="/go/src/github.com/cortexproject/cortex" echo "Running integration tests with image: $CORTEX_IMAGE" go test -tags=requires_docker -timeout 2400s -v -count=1 ./integration/... diff --git a/CHANGELOG.md b/CHANGELOG.md index a33045728d6..b7ce27ee2ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ * [FEATURE] Query Frontend: Log query params in query frontend even if error happens. #5005 * [FEATURE] Ingester: Enable snapshotting of In-memory TSDB on disk during shutdown via `-blocks-storage.tsdb.memory-snapshot-on-shutdown`. #5011 * [FEATURE] Query Frontend/Scheduler: Add a new counter metric `cortex_request_queue_requests_total` for total requests going to queue. #5030 +* [FEATURE] Build ARM docker images. #5041 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 ## 1.14.0 2022-12-02 diff --git a/Makefile b/Makefile index c1ef3cf693f..da88ad9cb9e 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,9 @@ VERSION=$(shell cat "./VERSION" 2> /dev/null) GOPROXY_VALUE=$(shell go env GOPROXY) +# ARCHS +ARCHS = amd64 arm64 + # Boiler plate for building Docker containers. # All this must go at top of file I'm afraid. IMAGE_PREFIX ?= quay.io/cortexproject/ @@ -37,8 +40,9 @@ SED ?= $(shell which gsed 2>/dev/null || which sed) # Dependencies (i.e. things that go in the image) still need to be explicitly # declared. %/$(UPTODATE): %/Dockerfile - @echo - $(SUDO) docker build --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)$(shell basename $(@D)) -t $(IMAGE_PREFIX)$(shell basename $(@D)):$(IMAGE_TAG) $(@D)/ + for arch in $(ARCHS); do \ + $(SUDO) docker buildx build --platform linux/$$arch --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)$(shell basename $(@D)) -t $(IMAGE_PREFIX)$(shell basename $(@D)):$(IMAGE_TAG)-$$arch $(@D)/ ; \ + done @echo @echo Please use push-multiarch-build-image to build and push build image for all supported architectures. touch $@ @@ -160,7 +164,11 @@ else exes: $(EXES) $(EXES): - CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D) + @for arch in $(ARCHS); do \ + echo "Building $@ for $$arch";\ + CGO_ENABLED=0 GOARCH=$$arch GOOS=linux go build $(GO_FLAGS) -o $@-$$arch ./$(@D); \ + done + protos: $(PROTO_GOS) @@ -272,16 +280,20 @@ clean-protos: save-images: @mkdir -p docker-images - for image_name in $(IMAGE_NAMES); do \ + @for image_name in $(IMAGE_NAMES); do \ if ! echo $$image_name | grep build; then \ - docker save $$image_name:$(IMAGE_TAG) -o docker-images/$$(echo $$image_name | tr "/" _):$(IMAGE_TAG); \ + for arch in $(ARCHS); do \ + docker save $$image_name:$(IMAGE_TAG)-$$arch -o docker-images/$$(echo $$image_name | tr "/" _):$(IMAGE_TAG)-$$arch; \ + done;\ fi \ done load-images: - for image_name in $(IMAGE_NAMES); do \ + @for image_name in $(IMAGE_NAMES); do \ if ! echo $$image_name | grep build; then \ - docker load -i docker-images/$$(echo $$image_name | tr "/" _):$(IMAGE_TAG); \ + for arch in $(ARCHS); do \ + docker load -i docker-images/$$(echo $$image_name | tr "/" _):$(IMAGE_TAG)-$$arch; \ + done;\ fi \ done diff --git a/cmd/cortex/Dockerfile b/cmd/cortex/Dockerfile index ab995a5332c..52fbb0c8d21 100644 --- a/cmd/cortex/Dockerfile +++ b/cmd/cortex/Dockerfile @@ -1,7 +1,9 @@ FROM alpine:3.14 +ARG TARGETARCH + RUN apk add --no-cache ca-certificates COPY migrations /migrations/ -COPY cortex /bin/cortex +COPY cortex-$TARGETARCH /bin/cortex EXPOSE 80 ENTRYPOINT [ "/bin/cortex" ] diff --git a/cmd/query-tee/Dockerfile b/cmd/query-tee/Dockerfile index 622ed403ea6..8974e5bd272 100644 --- a/cmd/query-tee/Dockerfile +++ b/cmd/query-tee/Dockerfile @@ -1,6 +1,8 @@ FROM alpine:3.14 +ARG TARGETARCH + RUN apk add --no-cache ca-certificates -COPY query-tee / +COPY query-tee-$TARGETARCH /query-tee ENTRYPOINT ["/query-tee"] ARG revision diff --git a/cmd/test-exporter/Dockerfile b/cmd/test-exporter/Dockerfile index 927d6d9a272..0ec8f1ffef5 100644 --- a/cmd/test-exporter/Dockerfile +++ b/cmd/test-exporter/Dockerfile @@ -1,6 +1,7 @@ FROM alpine:3.14 +ARG TARGETARCH RUN apk add --no-cache ca-certificates -COPY test-exporter / +COPY test-exporter-$TARGETARCH /test-exporter ENTRYPOINT ["/test-exporter"] ARG revision diff --git a/cmd/thanosconvert/Dockerfile b/cmd/thanosconvert/Dockerfile index 2a97a9c765b..9e4faa4a1aa 100644 --- a/cmd/thanosconvert/Dockerfile +++ b/cmd/thanosconvert/Dockerfile @@ -1,6 +1,7 @@ FROM alpine:3.14 +ARG TARGETARCH RUN apk add --no-cache ca-certificates -COPY thanosconvert / +COPY thanosconvert-$TARGETARCH /thanosconvert ENTRYPOINT ["/thanosconvert"] ARG revision diff --git a/push-images b/push-images index c6cae70c62c..fe32c85a170 100755 --- a/push-images +++ b/push-images @@ -29,8 +29,16 @@ done push_image() { local image="$1" - echo "Pushing ${image}:${IMAGE_TAG}" - docker push ${image}:${IMAGE_TAG} + + for arch in amd64 arm64; do \ + echo "Pushing ${image}-linux:${IMAGE_TAG}-$arch" + docker tag ${image}:${IMAGE_TAG}-$arch ${image}-linux:${IMAGE_TAG}-$arch + docker push ${image}-linux:${IMAGE_TAG}-$arch + done; + + docker manifest create ${image}:${IMAGE_TAG} --amend ${image}-linux:${IMAGE_TAG}-amd64 --amend ${image}-linux:${IMAGE_TAG}-arm64 + docker manifest push ${image}:${IMAGE_TAG} + if [ -n "${NO_QUAY}" ]; then return @@ -38,10 +46,15 @@ push_image() { # remove the quay prefix and push to docker hub docker_hub_image=${image#$QUAY_PREFIX} - docker tag ${image}:${IMAGE_TAG} ${docker_hub_image}:${IMAGE_TAG} + for arch in amd64 arm64; do \ + docker tag ${image}:${IMAGE_TAG}-$arch ${docker_hub_image}-linux:${IMAGE_TAG}-$arch + + echo "Pushing ${docker_hub_image}-linux:${IMAGE_TAG}-$arch" + docker push ${docker_hub_image}-linux:${IMAGE_TAG}-$arch + done; - echo "Pushing ${docker_hub_image}:${IMAGE_TAG}" - docker push ${docker_hub_image}:${IMAGE_TAG} + docker manifest create ${docker_hub_image}:${IMAGE_TAG} --amend ${docker_hub_image}-linux:${IMAGE_TAG}-amd64 --amend ${docker_hub_image}-linux:${IMAGE_TAG}-arm64 + docker manifest push ${docker_hub_image}:${IMAGE_TAG} } for image in ${IMAGES}; do From 66e86eb3c43daead231e69765567b18c5948d383 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 15 Dec 2022 12:12:06 -0800 Subject: [PATCH 018/133] update thanos to latest main 4054531421b5fc88286b8757f86767ec41100082 (#5043) Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- go.mod | 10 +- go.sum | 22 +- pkg/storegateway/bucket_stores.go | 1 + vendor/github.com/efficientgo/core/COPYRIGHT | 2 + .../efficientgo/{tools => }/core/LICENSE | 0 .../{tools/core/pkg => core}/errcapture/do.go | 7 +- .../core/pkg => core}/errcapture/doc.go | 30 +- .../github.com/efficientgo/core/errors/doc.go | 13 + .../efficientgo/core/errors/errors.go | 168 +++++ .../efficientgo/core/errors/stacktrace.go | 58 ++ .../core/pkg => core}/logerrcapture/do.go | 5 +- .../core/pkg => core}/logerrcapture/doc.go | 32 +- .../efficientgo/core/merrors/doc.go | 29 + .../errors.go => core/merrors/merrors.go} | 0 .../efficientgo/core/testutil/doc.go | 6 + .../core/testutil/internal/difflib.go | 645 ++++++++++++++++++ .../core/pkg => core}/testutil/testorbench.go | 0 .../core/pkg => core}/testutil/testutil.go | 113 +-- .../efficientgo/tools/core/pkg/merrors/doc.go | 20 - .../tools/core/pkg/testutil/doc.go | 6 - .../github.com/minio/minio-go/v7/.gitignore | 3 +- .../minio/minio-go/v7/.golangci.yml | 4 +- vendor/github.com/minio/minio-go/v7/Makefile | 5 +- vendor/github.com/minio/minio-go/v7/README.md | 70 +- .../minio/minio-go/v7/README_zh_CN.md | 70 +- .../minio/minio-go/v7/api-compose-object.go | 2 +- .../minio/minio-go/v7/api-datatypes.go | 20 +- .../minio/minio-go/v7/api-error-response.go | 16 +- .../minio/minio-go/v7/api-get-options.go | 5 +- .../github.com/minio/minio-go/v7/api-list.go | 98 ++- .../minio-go/v7/api-put-object-common.go | 7 +- .../minio-go/v7/api-put-object-multipart.go | 76 ++- .../minio-go/v7/api-put-object-streaming.go | 92 ++- .../minio/minio-go/v7/api-put-object.go | 8 +- .../minio/minio-go/v7/api-remove.go | 6 +- .../minio/minio-go/v7/api-s3-datatypes.go | 10 +- .../github.com/minio/minio-go/v7/api-stat.go | 9 +- vendor/github.com/minio/minio-go/v7/api.go | 54 +- .../github.com/minio/minio-go/v7/constants.go | 11 +- vendor/github.com/minio/minio-go/v7/core.go | 15 +- .../minio/minio-go/v7/functional_tests.go | 32 +- .../v7/pkg/credentials/assume_role.go | 2 +- .../minio-go/v7/pkg/credentials/chain.go | 21 +- .../v7/pkg/credentials/credentials.go | 9 +- .../v7/pkg/credentials/credentials.json | 7 + .../v7/pkg/credentials/credentials.sample | 3 + .../minio/minio-go/v7/pkg/credentials/doc.go | 34 +- .../pkg/credentials/file_aws_credentials.go | 48 +- .../minio-go/v7/pkg/encrypt/fips_disabled.go | 24 + .../minio-go/v7/pkg/encrypt/fips_enabled.go | 24 + .../minio-go/v7/pkg/encrypt/server-side.go | 6 +- .../minio-go/v7/pkg/notification/info.go | 6 +- .../v7/pkg/notification/notification.go | 25 +- ...st-signature-streaming-unsigned-trailer.go | 225 ++++++ .../pkg/signer/request-signature-streaming.go | 111 ++- .../v7/pkg/signer/request-signature-v2.go | 27 +- .../v7/pkg/signer/request-signature-v4.go | 41 +- .../minio/minio-go/v7/pkg/signer/utils.go | 3 +- .../minio/minio-go/v7/pkg/tags/tags.go | 101 ++- .../minio/minio-go/v7/post-policy.go | 25 +- .../minio/minio-go/v7/s3-endpoints.go | 3 + vendor/github.com/minio/minio-go/v7/utils.go | 40 ++ .../exporter-toolkit/web/handler.go | 10 +- .../thanos-io/objstore/CHANGELOG.md | 4 + .../github.com/thanos-io/objstore/README.md | 5 +- .../thanos-io/objstore/exthttp/tlsconfig.go | 4 +- vendor/github.com/thanos-io/objstore/inmem.go | 13 +- .../github.com/thanos-io/objstore/objstore.go | 2 +- .../objstore/providers/azure/helpers.go | 2 +- .../providers/filesystem/filesystem.go | 5 +- .../thanos-io/objstore/providers/s3/s3.go | 5 +- .../objstore/providers/swift/swift.go | 121 ++-- .../github.com/thanos-io/objstore/testing.go | 11 +- .../thanos-io/objstore/tlsconfig.go | 4 +- .../thanos-io/objstore/tracing/tracing.go | 5 + .../thanos-io/thanos/pkg/block/block.go | 31 + .../thanos-io/thanos/pkg/block/fetcher.go | 3 + .../thanos/pkg/block/metadata/markers.go | 31 +- .../pkg/compact/downsample/downsample.go | 108 +++ .../thanos-io/thanos/pkg/store/bucket.go | 550 +++++++++------ .../thanos-io/thanos/pkg/store/proxy_heap.go | 37 +- .../thanos/pkg/store/storepb/custom.go | 11 + vendor/modules.txt | 22 +- 83 files changed, 2770 insertions(+), 779 deletions(-) create mode 100644 vendor/github.com/efficientgo/core/COPYRIGHT rename vendor/github.com/efficientgo/{tools => }/core/LICENSE (100%) rename vendor/github.com/efficientgo/{tools/core/pkg => core}/errcapture/do.go (89%) rename vendor/github.com/efficientgo/{tools/core/pkg => core}/errcapture/doc.go (71%) create mode 100644 vendor/github.com/efficientgo/core/errors/doc.go create mode 100644 vendor/github.com/efficientgo/core/errors/errors.go create mode 100644 vendor/github.com/efficientgo/core/errors/stacktrace.go rename vendor/github.com/efficientgo/{tools/core/pkg => core}/logerrcapture/do.go (94%) rename vendor/github.com/efficientgo/{tools/core/pkg => core}/logerrcapture/doc.go (66%) create mode 100644 vendor/github.com/efficientgo/core/merrors/doc.go rename vendor/github.com/efficientgo/{tools/core/pkg/merrors/errors.go => core/merrors/merrors.go} (100%) create mode 100644 vendor/github.com/efficientgo/core/testutil/doc.go create mode 100644 vendor/github.com/efficientgo/core/testutil/internal/difflib.go rename vendor/github.com/efficientgo/{tools/core/pkg => core}/testutil/testorbench.go (100%) rename vendor/github.com/efficientgo/{tools/core/pkg => core}/testutil/testutil.go (68%) delete mode 100644 vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go delete mode 100644 vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go diff --git a/go.mod b/go.mod index b0de6461ad5..63b2f0e7b94 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/hashicorp/memberlist v0.5.0 github.com/json-iterator/go v1.1.12 github.com/lib/pq v1.10.7 - github.com/minio/minio-go/v7 v7.0.37 + github.com/minio/minio-go/v7 v7.0.45 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -47,8 +47,8 @@ require ( github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 - github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 - github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d + github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 + github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae go.etcd.io/etcd/api/v3 v3.5.6 @@ -108,7 +108,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.4.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd // indirect + github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect @@ -170,7 +170,7 @@ require ( github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.7.1 // indirect + github.com/prometheus/exporter-toolkit v0.7.3 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/cors v1.8.2 // indirect diff --git a/go.sum b/go.sum index ee2d19e9cc1..7a6933a402c 100644 --- a/go.sum +++ b/go.sum @@ -254,10 +254,9 @@ github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/efficientgo/core v1.0.0-rc.0 h1:jJoA0N+C4/knWYVZ6GrdHOtDyrg8Y/TR4vFpTaqTsqs= +github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 h1:rydBwnBoywKQMjWF0z8SriYtQ+uUcaFsxuijMjJr5PI= +github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4/go.mod h1:kQa0V74HNYMfuJH6jiPiwNdpWXl4xd/K4tzlrcvYDQI= github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= -github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd h1:svR6KxSP1xiPw10RN4Pd7g6BAVkEcNN628PAqZH31mM= -github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= @@ -706,8 +705,8 @@ github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.37 h1:aJvYMbtpVPSFBck6guyvOkxK03MycxDOCs49ZBuY5M8= -github.com/minio/minio-go/v7 v7.0.37/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs= +github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -848,8 +847,9 @@ github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJ github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.7.3 h1:IYBn0CTGi/nYxstdTUKysuSofUNJ3DQW3FmZ/Ub6rgU= +github.com/prometheus/exporter-toolkit v0.7.3/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -934,10 +934,10 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 h1:9dceDSKKsLWNHjrMpyzK1t7eVcAZv9Dp3FX+uokUS2Y= -github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604/go.mod h1:Vx5dZs9ElxEhNLnum/OgB0pNTqNdI2zdXL82BeJr3T4= -github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d h1:cfGwZH4LBrkFbQHea7HUlNDOQhILGBRPxDWjy4FhAME= -github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d/go.mod h1:odqdxSO+o/UaVgNpdkYYaQUW/JpT7LByXyZmxoe6uoc= +github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= +github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= +github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 h1:NRu4DwMAHgI+AJ0eevwQ8mVgrT15KskHOIKtQ0T3/EE= +github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5/go.mod h1:2bDlu7xW2TXrSt/TGBozMMVECjjrnqoLJKc/ZIiPngA= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1044,7 +1044,6 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= @@ -1370,7 +1369,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index 859499862f8..680ad43a6e0 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -475,6 +475,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro store.WithIndexCache(u.indexCache), store.WithQueryGate(u.queryGate), store.WithChunkPool(u.chunksPool), + store.WithSeriesBatchSize(store.SeriesBatchSize), } if u.logLevel.String() == "debug" { bucketStoreOpts = append(bucketStoreOpts, store.WithDebugLogging()) diff --git a/vendor/github.com/efficientgo/core/COPYRIGHT b/vendor/github.com/efficientgo/core/COPYRIGHT new file mode 100644 index 00000000000..0b803fd346d --- /dev/null +++ b/vendor/github.com/efficientgo/core/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The EfficientGo Authors. +Licensed under the Apache License 2.0. \ No newline at end of file diff --git a/vendor/github.com/efficientgo/tools/core/LICENSE b/vendor/github.com/efficientgo/core/LICENSE similarity index 100% rename from vendor/github.com/efficientgo/tools/core/LICENSE rename to vendor/github.com/efficientgo/core/LICENSE diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go b/vendor/github.com/efficientgo/core/errcapture/do.go similarity index 89% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go rename to vendor/github.com/efficientgo/core/errcapture/do.go index c7176a475b2..40f9eb9d7eb 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go +++ b/vendor/github.com/efficientgo/core/errcapture/do.go @@ -10,11 +10,10 @@ package errcapture import ( "io" - "io/ioutil" "os" - "github.com/efficientgo/tools/core/pkg/merrors" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/merrors" ) type doFunc func() error @@ -38,7 +37,7 @@ func Do(err *error, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with error capture but exhausts the reader before. func ExhaustClose(err *error, r io.ReadCloser, format string, a ...interface{}) { - _, copyErr := io.Copy(ioutil.Discard, r) + _, copyErr := io.Copy(io.Discard, r) Do(err, r.Close, format, a...) diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go b/vendor/github.com/efficientgo/core/errcapture/doc.go similarity index 71% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go rename to vendor/github.com/efficientgo/core/errcapture/doc.go index 96c1c22918f..ab7f2ed82ba 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go +++ b/vendor/github.com/efficientgo/core/errcapture/doc.go @@ -1,29 +1,43 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package errcapture - +// Package errcapture implements robust error handling in defer statements using provided return argument. +// // Close a `io.Closer` interface or execute any function that returns error safely while capturing error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `errcapture` provides utility functions to capture error and add to provided one, // still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer errcapture.Do(&err, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer errcapture.Do(&err, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `errcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File) (err error) { +// defer errcapture.Do(&err, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The errcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/logerrcapture if you want to just log an error instead. +package errcapture diff --git a/vendor/github.com/efficientgo/core/errors/doc.go b/vendor/github.com/efficientgo/core/errors/doc.go new file mode 100644 index 00000000000..c489b2e2f27 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/doc.go @@ -0,0 +1,13 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package errors provides basic utilities to manipulate errors with a useful stacktrace. It combines the +// benefits of errors.New and fmt.Errorf world into a single package. It is an improved and minimal version of the popular +// https://github.com/pkg/errors (archived) package allowing reliable wrapping of errors with stacktrace. Unfortunately, +// the standard library recommended error wrapping using `%+w` is prone to errors and does not support stacktraces. +package errors diff --git a/vendor/github.com/efficientgo/core/errors/errors.go b/vendor/github.com/efficientgo/core/errors/errors.go new file mode 100644 index 00000000000..ab032cfe105 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/errors.go @@ -0,0 +1,168 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "errors" + "fmt" + "strings" +) + +// base is the fundamental struct that implements the error interface and the acts as the backbone of this errors package. +type base struct { + // info contains the error message passed through calls like errors.Wrap, errors.New. + info string + // stacktrace stores information about the program counters - i.e. where this error was generated. + stack stacktrace + // err is the actual error which is being wrapped with a stacktrace and message information. + err error +} + +// Error implements the error interface. +func (b *base) Error() string { + if b.err != nil { + return fmt.Sprintf("%s: %s", b.info, b.err.Error()) + } + return b.info +} + +// Unwrap implements the error Unwrap interface. +func (b *base) Unwrap() error { + return b.err +} + +// Format implements the fmt.Formatter interface to support the formatting of an error chain with "%+v" verb. +// Whenever error is printed with %+v format verb, stacktrace info gets dumped to the output. +func (b *base) Format(s fmt.State, verb rune) { + if verb == 'v' && s.Flag('+') { + s.Write([]byte(formatErrorChain(b))) + return + } + + s.Write([]byte(b.Error())) +} + +// New returns a new error with a stacktrace of recent call frames. Each call to New +// returns a distinct error value even if the text is identical. An alternative of +// the errors.New function from github.com/pkg/errors. +func New(message string) error { + return &base{ + info: message, + stack: newStackTrace(), + err: nil, + } +} + +// Newf is like New, but it formats input according to a format specifier. +// An alternative of the fmt.Errorf function. +// +// If no args have been passed, it is same as `New` function without formatting. Character like +// '%' still has to be escaped in that scenario. +func Newf(format string, args ...interface{}) error { + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: nil, + } +} + +// Wrap returns a new error, which wraps another error with a stacktrace containing recent call frames. +// +// If cause is nil, it does not produce the error, similar to errors.Wrap from github.com/pkg/errors was doing. +// Still, avoid `errors.Wrap(nil, "...") patterns as it can lead to inefficiencies while constructing arguments +// to format as well readability issues. We keep this behaviour to make sure it's a drop-in replacement. +func Wrap(cause error, message string) error { + if cause == nil { + return nil + } + + return &base{ + info: message, + stack: newStackTrace(), + err: cause, + } +} + +// Wrapf is like Wrap but the message is formatted with the supplied format specifier. +// +// If no args have been passed, it is same as `Wrap` function without formatting. +// Character like '%' still has to be escaped in that scenario. +func Wrapf(cause error, format string, args ...interface{}) error { + if cause == nil { + return nil + } + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: cause, + } +} + +// Cause returns the result of repeatedly calling the Unwrap method on err, if err's +// type implements an Unwrap method. Otherwise, Cause returns the last encountered error. +// The difference between Unwrap and Cause is the first one performs unwrapping of one level +// but Cause returns the last err (whether it's nil or not) where it failed to assert +// the interface containing the Unwrap method. +// This is a replacement of errors.Cause without the causer interface from pkg/errors which +// actually can be sufficed through the errors.Is function. But considering some use cases +// where we need to peel off all the external layers applied through errors.Wrap family, +// it is useful ( where external SDK doesn't use errors.Is internally). +func Cause(err error) error { + for err != nil { + e, ok := err.(interface { + Unwrap() error + }) + if !ok { + return err + } + err = e.Unwrap() + } + return nil +} + +// formatErrorChain formats an error chain. +func formatErrorChain(err error) string { + var buf strings.Builder + for err != nil { + if e, ok := err.(*base); ok { + buf.WriteString(fmt.Sprintf("%s\n%v", e.info, e.stack)) + err = e.err + } else { + buf.WriteString(fmt.Sprintf("%s\n", err.Error())) + err = nil + } + } + return buf.String() +} + +// The functions `Is`, `As` & `Unwrap` provides a thin wrapper around the builtin errors +// package in go. Just for sake of completeness and correct autocompletion behaviors from +// IDEs they have been wrapped using functions instead of using variable to reference them +// as first class functions (eg: var Is = errros.Is ). + +// Is is a wrapper of built-in errors.Is. It reports whether any error in err's +// chain matches target. The chain consists of err itself followed by the sequence +// of errors obtained by repeatedly calling Unwrap. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As is a wrapper of built-in errors.As. It finds the first error in err's +// chain that matches target, and if one is found, sets target to that error +// value and returns true. Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} + +// Unwrap is a wrapper of built-in errors.Unwrap. Unwrap returns the result of +// calling the Unwrap method on err, if err's type contains an Unwrap method +// returning error. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return errors.Unwrap(err) +} diff --git a/vendor/github.com/efficientgo/core/errors/stacktrace.go b/vendor/github.com/efficientgo/core/errors/stacktrace.go new file mode 100644 index 00000000000..a479977d812 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/stacktrace.go @@ -0,0 +1,58 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "fmt" + "runtime" + "strings" +) + +// stacktrace holds a snapshot of program counters. +type stacktrace []uintptr + +// newStackTrace captures a stack trace. It skips first 3 frames to record the +// snapshot of the stack trace at the origin of a particular error. It tries to +// record maximum 16 frames (if available). +func newStackTrace() stacktrace { + const stackDepth = 16 // record maximum 16 frames (if available). + + pc := make([]uintptr, stackDepth) + // using skip=3 for not to count the program counter address of + // 1. the respective function from errors package (eg. errors.New) + // 2. newStacktrace itself + // 3. the function used in runtime.Callers + n := runtime.Callers(3, pc) + + // this approach is taken to reduce long term memory footprint (obtained through escape analysis). + // We are returning a new slice by re-slicing the pc with the required length and capacity (when the + // no of returned callFrames is less that stackDepth). This uses less memory compared to pc[:n] as + // the capacity of new slice is inherited from the parent slice if not specified. + return pc[:n:n] +} + +// String implements the fmt.Stringer interface to provide formatted text output. +func (s stacktrace) String() string { + var buf strings.Builder + + // CallersFrames takes the slice of Program Counter addresses returned by Callers to + // retrieve function/file/line information. + cf := runtime.CallersFrames(s) + for { + // more indicates if the next call will be successful or not. + frame, more := cf.Next() + // used formatting scheme <`>`space><:> for example: + // > testing.tRunner /home/go/go1.17.8/src/testing/testing.go:1259 + buf.WriteString(fmt.Sprintf("> %s\t%s:%d\n", frame.Func.Name(), frame.File, frame.Line)) + if !more { + break + } + } + return buf.String() +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go b/vendor/github.com/efficientgo/core/logerrcapture/do.go similarity index 94% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go rename to vendor/github.com/efficientgo/core/logerrcapture/do.go index 41662b90138..6bad9fa6eba 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/do.go @@ -11,10 +11,9 @@ package logerrcapture import ( "fmt" "io" - "io/ioutil" "os" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" ) // Logger interface compatible with go-kit/logger. @@ -42,7 +41,7 @@ func Do(logger Logger, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with a log message on error but exhausts the reader before. func ExhaustClose(logger Logger, r io.ReadCloser, format string, a ...interface{}) { - _, err := io.Copy(ioutil.Discard, r) + _, err := io.Copy(io.Discard, r) if err != nil { _ = logger.Log("msg", "failed to exhaust reader, performance may be impeded", "err", err) } diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go b/vendor/github.com/efficientgo/core/logerrcapture/doc.go similarity index 66% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go rename to vendor/github.com/efficientgo/core/logerrcapture/doc.go index 414efc99582..90738562ceb 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/doc.go @@ -1,30 +1,44 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package logerrcapture - -// Close a `io.Closer` interface or execute any function that returns error safely while logging error. +// Package logerrcapture implements robust error handling in defer statements using provided logger. +// +// The Close a `io.Closer` interface or execute any function that returns error safely while logging error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `logerrcapture` provides utility functions to capture error and log it via provided // logger, while still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer logerrcapture.Do(logger, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer logerrcapture.Do(logger, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `logerrcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File, logger logerrcapture.Logger) error { +// defer logerrcapture.Do(logger, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The logerrcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Recommended: Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/errcapture if you want to return error instead of just logging (causing // hard error). +package logerrcapture diff --git a/vendor/github.com/efficientgo/core/merrors/doc.go b/vendor/github.com/efficientgo/core/merrors/doc.go new file mode 100644 index 00000000000..a670493f46a --- /dev/null +++ b/vendor/github.com/efficientgo/core/merrors/doc.go @@ -0,0 +1,29 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package merrors implements multi error implementation that chains errors on the same level. +// Supports errors.As and errors.Is functions. +// +// Example 1: +// +// return merrors.New(err1, err2).Err() +// +// Example 2: +// +// merr := merrors.New(err1) +// merr.Add(err2, errOrNil3) +// for _, err := range errs { +// merr.Add(err) +// } +// return merr.Err() +// +// Example 3: +// +// func CloseAll(closers []io.Closer) error { +// errs := merrors.New() +// for _ , c := range closers { +// errs.Add(c.Close()) +// } +// return errs.Err() +// } +package merrors diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go b/vendor/github.com/efficientgo/core/merrors/merrors.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go rename to vendor/github.com/efficientgo/core/merrors/merrors.go diff --git a/vendor/github.com/efficientgo/core/testutil/doc.go b/vendor/github.com/efficientgo/core/testutil/doc.go new file mode 100644 index 00000000000..ac4ca1df551 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package testutil is a minimal testing utility with only few functions like `Assert`, `Ok`, `NotOk` for errors and `Equals`. +// It also provides TB ("TestOrBench") utility for union of testing and benchmarks. +package testutil diff --git a/vendor/github.com/efficientgo/core/testutil/internal/difflib.go b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go new file mode 100644 index 00000000000..aea79d21543 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go @@ -0,0 +1,645 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go b/vendor/github.com/efficientgo/core/testutil/testorbench.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go rename to vendor/github.com/efficientgo/core/testutil/testorbench.go diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go b/vendor/github.com/efficientgo/core/testutil/testutil.go similarity index 68% rename from vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go rename to vendor/github.com/efficientgo/core/testutil/testutil.go index 8a46d2e68f5..c74f368507b 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go +++ b/vendor/github.com/efficientgo/core/testutil/testutil.go @@ -12,9 +12,8 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" - "github.com/pmezard/go-difflib/difflib" - "go.uber.org/goleak" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/testutil/internal" ) // Assert fails the test if the condition is false. @@ -77,6 +76,68 @@ func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { tb.Fatal(sprintfWithLimit("\033[31m%s:%d:"+msg+"\n\n\texp: %#v\n\n\tgot: %#v%s\033[39m\n\n", filepath.Base(file), line, exp, act, diff(exp, act))) } +// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. +func FaultOrPanicToErr(f func()) (err error) { + // Set this go routine to panic on segfault to allow asserting on those. + debug.SetPanicOnFault(true) + defer func() { + if r := recover(); r != nil { + err = errors.Newf("invoked function panicked or caused segmentation fault: %v", r) + } + debug.SetPanicOnFault(false) + }() + + f() + + return err +} + +// ContainsStringSlice fails the test if needle is not contained within haystack, if haystack or needle is +// an empty slice, or if needle is longer than haystack. +func ContainsStringSlice(tb testing.TB, haystack, needle []string) { + _, file, line, _ := runtime.Caller(1) + + if !contains(haystack, needle) { + tb.Fatalf(sprintfWithLimit("\033[31m%s:%d: %#v does not contain %#v\033[39m\n\n", filepath.Base(file), line, haystack, needle)) + } +} + +func contains(haystack, needle []string) bool { + if len(haystack) == 0 || len(needle) == 0 { + return false + } + + if len(haystack) < len(needle) { + return false + } + + for i := 0; i < len(haystack); i++ { + outer := i + + for j := 0; j < len(needle); j++ { + // End of the haystack but not the end of the needle, end + if outer == len(haystack) { + return false + } + + // No match, try the next index of the haystack + if haystack[outer] != needle[j] { + break + } + + // End of the needle and it still matches, end + if j == len(needle)-1 { + return true + } + + // This element matches between the two slices, try the next one + outer++ + } + } + + return false +} + func sprintfWithLimit(act string, v ...interface{}) string { s := fmt.Sprintf(act, v...) if len(s) > 10000 { @@ -128,9 +189,9 @@ func diff(expected interface{}, actual interface{}) string { a = reflect.ValueOf(actual).String() } - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), + diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ + A: internal.SplitLines(e), + B: internal.SplitLines(a), FromFile: "Expected", FromDate: "", ToFile: "Actual", @@ -139,43 +200,3 @@ func diff(expected interface{}, actual interface{}) string { }) return "\n\nDiff:\n" + diff } - -// TolerantVerifyLeakMain verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeakMain(m *testing.M) { - goleak.VerifyTestMain(m, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// TolerantVerifyLeak verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeak(t *testing.T) { - goleak.VerifyNone(t, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. -func FaultOrPanicToErr(f func()) (err error) { - // Set this go routine to panic on segfault to allow asserting on those. - debug.SetPanicOnFault(true) - defer func() { - if r := recover(); r != nil { - err = errors.Errorf("invoked function panicked or caused segmentation fault: %v", r) - } - debug.SetPanicOnFault(false) - }() - - f() - - return err -} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go deleted file mode 100644 index d1eaf8333cb..00000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package merrors - -// Safe multi error implementation that chains errors on the same level. Supports errors.As and errors.Is functions. -// -// Example 1: -// -// return merrors.New(err1, err2).Err() -// -// Example 2: -// -// merr := merrors.New(err1) -// merr.Add(err2, errOrNil3) -// for _, err := range errs { -// merr.Add(err) -// } -// return merr.Err() -// diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go deleted file mode 100644 index dc9f63f1b0a..00000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package testutil - -// Simplistic assertion helpers for testing code. TestOrBench utils for union of testing and benchmarks. diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore index 8081bd0ffbf..12ecd6ae9b7 100644 --- a/vendor/github.com/minio/minio-go/v7/.gitignore +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -1,4 +1,5 @@ *~ *.test validator -golangci-lint \ No newline at end of file +golangci-lint +functional_tests \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml index dfc0c2d5377..875b949c6dd 100644 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -12,8 +12,7 @@ linters: - govet - ineffassign - gosimple - - deadcode - - structcheck + - unused - gocritic issues: @@ -25,3 +24,4 @@ issues: - "captLocal:" - "ifElseChain:" - "elseif:" + - "should have a package comment" diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index ac4a328f0e3..ace66670542 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -9,7 +9,7 @@ checks: lint vet test examples functional-test lint: @mkdir -p ${GOPATH}/bin - @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin @echo "Running $@ check" @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml @@ -27,7 +27,8 @@ examples: @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) functional-test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + @GO111MODULE=on go build -race functional_tests.go + @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests clean: @echo "Cleaning up all the generated files" diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md index 4e3abf71d2d..9b6bbbec342 100644 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -2,7 +2,7 @@ The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. -This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). This document assumes that you have a working [Go development environment](https://golang.org/doc/install). @@ -126,53 +126,53 @@ mc ls play/mymusic/ ## API Reference The full API Reference is available here. -* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) ### API Reference : File Object Operations -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) ### API Reference : Object Operations -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## Full Examples @@ -236,8 +236,8 @@ The full API Reference is available here. * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## Explore Further -* [Complete Documentation](https://docs.min.io) -* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ## Contribute [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md index 64e79341196..97087c61e0c 100644 --- a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md +++ b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md @@ -14,7 +14,7 @@ MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对 - Ceph Object Gateway - Riak CS -本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html)。 本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 @@ -140,52 +140,52 @@ mc ls play/mymusic/ ## API文档 完整的API文档在这里。 -* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整API文档](https://min.io/docs/minio/linux/developers/go/API.html) ### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO 扩展) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO 扩展) ### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) ### API文档 : 操作对象 -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## 完整示例 @@ -253,8 +253,8 @@ mc ls play/mymusic/ * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## 了解更多 -* [完整文档](https://docs.min.io) -* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整文档](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API文档](https://min.io/docs/minio/linux/developers/go/API.html) ## 贡献 [贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index b59924a3d51..835c8bd8ad5 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -221,7 +221,7 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) } if dstOpts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if !dstOpts.Internal.LegalholdTimestamp.IsZero() { headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go index 5a8d473c623..1f479387772 100644 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -45,19 +45,20 @@ type StringMap map[string]string // on the first line is initialize it. func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { *m = StringMap{} - type xmlMapEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` + type Item struct { + Key string + Value string } for { - var e xmlMapEntry + var e Item err := d.Decode(&e) if err == io.EOF { break - } else if err != nil { + } + if err != nil { return err } - (*m)[e.XMLName.Local] = e.Value + (*m)[e.Key] = e.Value } return nil } @@ -86,6 +87,8 @@ type UploadInfo struct { ExpirationRuleID string // Verified checksum values, if any. + // Values are base64 (standard) encoded. + // For multipart objects this is a checksum of the checksum of each part. ChecksumCRC32 string ChecksumCRC32C string ChecksumSHA1 string @@ -118,7 +121,7 @@ type ObjectInfo struct { Metadata http.Header `json:"metadata" xml:"-"` // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - UserMetadata StringMap `json:"userMetadata"` + UserMetadata StringMap `json:"userMetadata,omitempty"` // x-amz-tagging values in their k/v values. UserTags map[string]string `json:"userTags"` @@ -146,7 +149,8 @@ type ObjectInfo struct { // - FAILED // - REPLICA (on the destination) ReplicationStatus string `xml:"ReplicationStatus"` - + // set to true if delete marker has backing object version on target, and eligible to replicate + ReplicationReady bool // Lifecycle expiry-date and ruleID associated with the expiry // not to be confused with `Expires` HTTP header. Expiration time.Time diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index dd781cae33b..33ca394588e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -67,14 +67,14 @@ type ErrorResponse struct { // // For example: // -// import s3 "github.com/minio/minio-go/v7" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... func ToErrorResponse(err error) ErrorResponse { switch err := err.(type) { case ErrorResponse: diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go index 08ae95c4249..0082c1fa762 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -27,8 +27,9 @@ import ( // AdvancedGetOptions for internal use by MinIO server - not intended for client use. type AdvancedGetOptions struct { - ReplicationDeleteMarker bool - ReplicationProxyRequest string + ReplicationDeleteMarker bool + IsReplicationReadyForDeleteMarker bool + ReplicationProxyRequest string } // GetObjectOptions are used to specify additional headers or options diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index b624b07786e..d216afb3972 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -32,11 +32,10 @@ import ( // This call requires explicit authentication, no anonymous requests are // allowed for listing buckets. // -// api := client.New(....) -// for message := range api.ListBuckets(context.Background()) { -// fmt.Println(message) -// } -// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Execute GET on service. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) @@ -71,21 +70,28 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List // Return object owner information by default fetchOwner := true + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -99,9 +105,9 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -138,6 +144,14 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List if !result.IsTruncated { return } + + // Add this to catch broken S3 API implementations. + if continuationToken == "" { + sendObjectInfo(ObjectInfo{ + Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL), + }) + return + } } }(objectStatCh) return objectStatCh @@ -263,20 +277,28 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // If recursive we do not delimit. delimiter = "" } + + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -289,9 +311,9 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // Get list of objects a maximum of 1000 per request. result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -344,21 +366,28 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts delimiter = "" } + sendObjectInfo := func(info ObjectInfo) { + select { + case resultCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } @@ -375,9 +404,9 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts // Get list of objects a maximum of 1000 per request. result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -659,11 +688,10 @@ func (o *ListObjectsOptions) Set(key, value string) { // ListObjects returns objects list after evaluating the passed options. // -// api := client.New(....) -// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { -// fmt.Println(object) -// } -// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { if opts.WithVersions { return c.listObjectVersions(ctx, bucketName, opts) @@ -694,12 +722,12 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb // If you enable recursive as 'true' this function will return back all // the multipart objects in a given bucket name. // -// api := client.New(....) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) } @@ -916,7 +944,7 @@ func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName strin } // listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded +// - lists some or all (up to 1000) parts that have been uploaded // for a specific multipart upload // // You can use the request parameters as selection criteria to return diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go index 149a536e42f..7fad7600c3d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -65,10 +65,9 @@ func isReadAt(reader io.Reader) (ok bool) { // NOTE: Assumption here is that for any object to be uploaded to any S3 compatible // object storage it will have the following parameters as constants. // -// maxPartsCount - 10000 -// minPartSize - 16MiB -// maxMultipartPutObjectSize - 5TiB -// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { // object size is '-1' set it to 5TiB. var unknownSize bool diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 5fec17a1c45..3c9a13ff20b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -159,8 +159,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj crcBytes = append(crcBytes, cSum...) } + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -269,57 +270,73 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object return initiateMultipartUploadResult, nil } +type uploadPartParams struct { + bucketName string + objectName string + uploadID string + reader io.Reader + partNumber int + md5Base64 string + sha256Hex string + size int64 + sse encrypt.ServerSide + streamSha256 bool + customHeader http.Header + trailer http.Header +} + // uploadPart - Uploads a part in a multipart upload. -func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName string, uploadID string, reader io.Reader, partNumber int, md5Base64 string, sha256Hex string, size int64, sse encrypt.ServerSide, streamSha256 bool, customHeader http.Header) (ObjectPart, error) { +func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } - if err := s3utils.CheckValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } - if size > maxPartSize { - return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) + if p.size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } - if size <= -1 { - return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) + if p.size <= -1 { + return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } - if partNumber <= 0 { + if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } - if uploadID == "" { + if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. - urlValues.Set("uploadId", uploadID) + urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. - if customHeader == nil { - customHeader = make(http.Header) + if p.customHeader == nil { + p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) + if p.sse != nil && p.sse.Type() == encrypt.SSEC { + p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, + bucketName: p.bucketName, + objectName: p.objectName, queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - streamSha256: streamSha256, + customHeader: p.customHeader, + contentBody: p.reader, + contentLength: p.size, + contentMD5Base64: p.md5Base64, + contentSHA256Hex: p.sha256Hex, + streamSha256: p.streamSha256, + trailer: p.trailer, } // Execute PUT on each part. @@ -330,7 +347,7 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. @@ -341,8 +358,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } - objPart.Size = size - objPart.PartNumber = partNumber + objPart.Size = p.size + objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil @@ -431,5 +448,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, + + ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, + ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, + ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, + ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 464bde7f305..e3a14c59d8e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -107,11 +107,19 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN return UploadInfo{}, err } + withChecksum := c.trailingHeaderSupport + if withChecksum { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload in progress, if the // function returns any error, since we do not resume @@ -177,14 +185,33 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // As a special case if partNumber is lastPartNumber, we // calculate the offset based on the last part size. if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) + readOffset = size - lastPartSize partSize = lastPartSize } sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + var trailer = make(http.Header, 1) + if withChecksum { + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + } // Proceed to upload the part. - objPart, err := c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, "", "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, nil) + p := uploadPartParams{bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: sectionReader, + partNumber: uploadReq.PartNum, + size: partSize, + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + sha256Hex: "", + trailer: trailer, + } + objPart, err := c.uploadPart(ctx, p) if err != nil { uploadedPartsCh <- uploadedPartRes{ Error: err, @@ -221,8 +248,12 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Update the totalUploadedSize. totalUploadedSize += uploadRes.Size complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, }) } } @@ -235,6 +266,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) + if withChecksum { + // Add hash of hashes. + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + for _, part := range complMultipartUpload.Parts { + cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) + if err == nil { + crc.Write(cs) + } + } + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err @@ -339,8 +382,8 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Update progress reader appropriately to the latest offset // as we read from the source. hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, hooked, partNumber, md5Base64, "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -419,6 +462,7 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") } + var readSeeker io.Seeker if size > 0 { if isReadAt(reader) && !isObject(reader) { seeker, ok := reader.(io.Seeker) @@ -428,35 +472,49 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument(err.Error()) } reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + readSeeker = reader.(io.Seeker) } } } var md5Base64 string if opts.SendContentMd5 { - // Create a buffer. - buf := make([]byte, size) + // Calculate md5sum. + hash := c.md5Hasher() - length, rErr := readFull(reader, buf) - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return UploadInfo{}, rErr + if readSeeker != nil { + if _, err := io.Copy(hash, reader); err != nil { + return UploadInfo{}, err + } + // Seek back to beginning of io.NewSectionReader's offset. + _, err = readSeeker.Seek(0, io.SeekStart) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } else { + // Create a buffer. + buf := make([]byte, size) + + length, err := readFull(reader, buf) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, err + } + + hash.Write(buf[:length]) + reader = bytes.NewReader(buf[:length]) } - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - reader = bytes.NewReader(buf[:length]) hash.Close() } // Update progress reader appropriately to the latest offset as we // read from the source. - readSeeker := newHook(reader, opts.Progress) + progressReader := newHook(reader, opts.Progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) + return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) } // putObjectDo - executes the put object http operation. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 321ad00aae4..5735bee5eb2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -159,7 +159,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) } if opts.Internal.ReplicationRequest { - header.Set(minIOBucketReplicationRequest, "") + header.Set(minIOBucketReplicationRequest, "true") } if !opts.Internal.LegalholdTimestamp.IsZero() { header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) @@ -269,6 +269,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str } if size < 0 { + if opts.DisableMultipart { + return UploadInfo{}, errors.New("no length provided and multipart disabled") + } return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } @@ -366,7 +369,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, "", int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 0fee90226b6..c0ff31a5bd2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -82,8 +82,8 @@ func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, // RemoveBucket deletes the bucket name. // -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -166,7 +166,7 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) } if opts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if opts.ForceDelete { headers.Set(minIOForceDelete, "true") diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index b1b848f4a65..827395ee125 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -316,17 +316,15 @@ type completeMultipartUploadResult struct { // CompletePart sub container lists individual part numbers and their // md5sum, part of completeMultipartUpload. type CompletePart struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` - // Part number identifies the part. PartNumber int ETag string // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` } // completeMultipartUpload container for completing multipart upload. diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index 6deb5f5dc94..418d6cb2547 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -70,6 +70,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if opts.Internal.ReplicationDeleteMarker { headers.Set(minIOBucketReplicationDeleteMarker, "true") } + if opts.Internal.IsReplicationReadyForDeleteMarker { + headers.Set(isMinioTgtReplicationReady, "true") + } urlValues := make(url.Values) if opts.VersionID != "" { @@ -90,6 +93,7 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if resp != nil { deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { errResp := ErrorResponse{ @@ -105,8 +109,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, }, errResp } return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationReady: replicationReady, // whether delete marker can be replicated }, httpRespToErrorResponse(resp, bucketName, objectName) } } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 6598e9d92eb..a93a6c6207a 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -20,8 +20,10 @@ package minio import ( "bytes" "context" + "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "io/ioutil" "math/rand" @@ -93,6 +95,8 @@ type Client struct { sha256Hasher func() md5simd.Hasher healthStatus int32 + + trailingHeaderSupport bool } // Options for New method @@ -103,6 +107,10 @@ type Options struct { Region string BucketLookup BucketLookupType + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool + // Custom hash routines. Leave nil to use standard. CustomMD5 func() md5simd.Hasher CustomSHA256 func() md5simd.Hasher @@ -111,13 +119,13 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.37" + libraryVersion = "v7.0.45" ) // User Agent should always following the below style. // Please open an issue to discuss any new changes here. // -// MinIO (OS; ARCH) LIB/VER APP/VER +// MinIO (OS; ARCH) LIB/VER APP/VER const ( libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion @@ -246,6 +254,9 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { if clnt.sha256Hasher == nil { clnt.sha256Hasher = newSHA256Hasher } + + clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. clnt.lookup = opts.BucketLookup @@ -312,9 +323,9 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { // Hash materials provides relevant initialized hash algo writers // based on the expected signature type. // -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { hashSums = make(map[string][]byte) hashAlgos = make(map[string]md5simd.Hasher) @@ -419,6 +430,8 @@ type requestMetadata struct { contentMD5Base64 string // carries base64 encoded md5sum contentSHA256Hex string // carries hex encoded sha256sum streamSha256 bool + addCrc bool + trailer http.Header // (http.Request).Trailer. Requires v4 signature. } // dumpHTTP - dump HTTP request and response. @@ -581,6 +594,17 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } } + if metadata.addCrc { + if metadata.trailer == nil { + metadata.trailer = make(http.Header, 1) + } + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { + // Update trailer when done. + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + } // Instantiate a new request. var req *http.Request req, err = c.newRequest(ctx, method, metadata) @@ -592,6 +616,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, err } + // Initiate the request. res, err = c.do(req) if err != nil { @@ -632,7 +657,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // code dictates invalid region, we can retry the request // with the new region. // - // Additionally we should only retry if bucketLocation and custom + // Additionally, we should only retry if bucketLocation and custom // region is empty. if c.region == "" { switch errResponse.Code { @@ -814,9 +839,12 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request // Add signature version '2' authorization header. req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) case metadata.streamSha256 && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. + if len(metadata.trailer) > 0 { + req.Trailer = metadata.trailer + } + // Streaming signature is used by default for a PUT object request. + // Additionally, we also look if the initialized client is secure, + // if yes then we don't need to perform streaming signature. req = signer.StreamingSignV4(req, accessKeyID, secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) default: @@ -824,11 +852,17 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request shaHeader := unsignedPayload if metadata.contentSHA256Hex != "" { shaHeader = metadata.contentSHA256Hex + if len(metadata.trailer) > 0 { + // Sanity check, we should not end up here if upstream is sane. + return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") + } + } else if len(metadata.trailer) > 0 { + shaHeader = unsignedPayloadTrailer } req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) } // Return request. diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index dee83b870cc..1c3e8e6f646 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -46,6 +46,10 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 // we don't want to sign the request payload const unsignedPayload = "UNSIGNED-PAYLOAD" +// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload, but have a trailer. +const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + // Total number of parallel workers used for multipart operation. const totalWorkers = 4 @@ -96,6 +100,9 @@ const ( minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" // Header indicates last legalhold update time on source minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" - - minIOForceDelete = "x-minio-force-delete" + minIOForceDelete = "x-minio-force-delete" + // Header indicates delete marker replication request can be sent by source now. + minioTgtReplicationReady = "X-Minio-Replication-Ready" + // Header asks if delete marker replication request can be sent by source now. + isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" ) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index f6132e79a03..207a3870207 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -88,8 +88,19 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - streamSha256 := true - return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse, streamSha256, nil) + p := uploadPartParams{ + bucketName: bucket, + objectName: object, + uploadID: uploadID, + reader: data, + partNumber: partID, + md5Base64: md5Base64, + sha256Hex: sha256Hex, + size: size, + sse: sse, + streamSha256: true, + } + return c.uploadPart(ctx, p) } // ListObjectParts - List uploaded parts of an incomplete upload.x diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 483b5cb115e..e86e142e552 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -2054,10 +2054,10 @@ func testPutObjectWithChecksums() { ChecksumSHA1 string ChecksumSHA256 string }{ - {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE(), ChecksumCRC32: "yXTVFQ=="}, - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "zXqj7Q=="}, - {header: "x-amz-checksum-sha1", hasher: sha1.New(), ChecksumSHA1: "SwmAs3F75Sw/sE4dHehkvYtn9UE="}, - {header: "x-amz-checksum-sha256", hasher: sha256.New(), ChecksumSHA256: "8Tlu9msuw/cpmWNEnQx97axliBjiE6gK1doiY0N9WuA="}, + {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()}, + {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, + {header: "x-amz-checksum-sha1", hasher: sha1.New()}, + {header: "x-amz-checksum-sha256", hasher: sha256.New()}, } for i, test := range tests { @@ -2113,10 +2113,10 @@ func testPutObjectWithChecksums() { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) // Read the data back gopts := minio.GetObjectOptions{Checksum: true} @@ -2132,10 +2132,10 @@ func testPutObjectWithChecksums() { return } - cmpChecksum(st.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(st.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(st.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(st.ChecksumCRC32C, test.ChecksumCRC32C) + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) if st.Size != int64(bufSize) { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) @@ -4204,10 +4204,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) return } - if err := policy.SetKeyStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) return @@ -4216,10 +4212,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) return } - if err := policy.SetContentTypeStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) return diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index 12ed0842770..e964b521737 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -19,6 +19,7 @@ package credentials import ( "bytes" + "crypto/sha256" "encoding/hex" "encoding/xml" "errors" @@ -31,7 +32,6 @@ import ( "time" "github.com/minio/minio-go/v7/pkg/signer" - sha256 "github.com/minio/sha256-simd" ) // AssumeRoleResponse contains the result of successful AssumeRole request. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go index 6dc8e9d0525..ddccfb173fe 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -31,18 +31,17 @@ package credentials // will cache that Provider for all calls to IsExpired(), until Retrieve is // called again after IsExpired() is true. // -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) // +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } type Chain struct { Providers []Provider curr Provider diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index 6b93a27fbd4..af6104967c7 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -65,10 +65,11 @@ type Provider interface { // provider's struct. // // Example: -// type IAMCredentialProvider struct { -// Expiry -// ... -// } +// +// type IAMCredentialProvider struct { +// Expiry +// ... +// } type Expiry struct { // The date/time when to expire on expiration time.Time diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json new file mode 100644 index 00000000000..afbfad559ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json @@ -0,0 +1,7 @@ +{ + "Version": 1, + "SessionToken": "token", + "AccessKeyId": "accessKey", + "SecretAccessKey": "secret", + "Expiration": "9999-04-27T16:02:25.000Z" +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample index 7fc91d9d204..e2dc1bfecb1 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -10,3 +10,6 @@ aws_secret_access_key = secret [with_colon] aws_access_key_id: accessKey aws_secret_access_key: secret + +[with_process] +credential_process = /bin/cat credentials.json diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go index 0c94477b752..fbfb105491d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -28,35 +28,33 @@ // // Example of using the environment variable credentials. // -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } // // Example of forcing credentials to expire and be refreshed on the next Get(). // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. // -// -// Custom Provider +// # Custom Provider // // Each Provider built into this package also provides a helper method to generate // a Credentials pointer setup with the provider. To use a custom Provider just // create a type which satisfies the Provider interface and pass it to the // NewCredentials method. // -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} // +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index cbdcfe25653..da09707e339 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -18,17 +18,33 @@ package credentials import ( + "encoding/json" + "errors" "os" + "os/exec" "path/filepath" + "strings" + "time" ini "gopkg.in/ini.v1" ) +// A externalProcessCredentials stores the output of a credential_process +type externalProcessCredentials struct { + Version int + SessionToken string + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + Expiration time.Time +} + // A FileAWSCredentials retrieves credentials from the current user's home // directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type FileAWSCredentials struct { + Expiry + // Path to the shared credentials file. // // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the @@ -89,6 +105,33 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { // Default to empty string if not found. token := iniProfile.Key("aws_session_token") + // If credential_process is defined, obtain credentials by executing + // the external process + credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) + if credentialProcess != "" { + args := strings.Fields(credentialProcess) + if len(args) <= 1 { + return Value{}, errors.New("invalid credential process args") + } + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.Output() + if err != nil { + return Value{}, err + } + var externalProcessCredentials externalProcessCredentials + err = json.Unmarshal([]byte(out), &externalProcessCredentials) + if err != nil { + return Value{}, err + } + p.retrieved = true + p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: externalProcessCredentials.AccessKeyID, + SecretAccessKey: externalProcessCredentials.SecretAccessKey, + SessionToken: externalProcessCredentials.SessionToken, + SignerType: SignatureV4, + }, nil + } p.retrieved = true return Value{ AccessKeyID: id.String(), @@ -98,11 +141,6 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { }, nil } -// IsExpired returns if the shared credentials have expired. -func (p *FileAWSCredentials) IsExpired() bool { - return !p.retrieved -} - // loadProfiles loads from the file pointed to by shared credentials filename for profile. // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go new file mode 100644 index 00000000000..6db26c036f5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go @@ -0,0 +1,24 @@ +//go:build !fips +// +build !fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = false diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go new file mode 100644 index 00000000000..640258242f9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go @@ -0,0 +1,24 @@ +//go:build fips +// +build fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = true diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go index 06e68e7367e..163fa62b427 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -67,9 +67,9 @@ var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { // Type is the server-side-encryption method. It represents one of // the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption type Type string const ( diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go index d0a47163838..126661a9e68 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -22,14 +22,14 @@ type identity struct { PrincipalID string `json:"principalId"` } -// event bucket metadata. +// event bucket metadata. type bucketMeta struct { Name string `json:"name"` OwnerIdentity identity `json:"ownerIdentity"` ARN string `json:"arn"` } -// event object metadata. +// event object metadata. type objectMeta struct { Key string `json:"key"` Size int64 `json:"size,omitempty"` @@ -40,7 +40,7 @@ type objectMeta struct { Sequencer string `json:"sequencer"` } -// event server specific metadata. +// event server specific metadata. type eventMeta struct { SchemaVersion string `json:"s3SchemaVersion"` ConfigurationID string `json:"configurationId"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index 75a1f609621..931ca5bc284 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -21,6 +21,7 @@ import ( "encoding/xml" "errors" "fmt" + "strings" "github.com/minio/minio-go/v7/pkg/set" ) @@ -29,7 +30,8 @@ import ( type EventType string // The role of all event types are described in : -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +// +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations const ( ObjectCreatedAll EventType = "s3:ObjectCreated:*" ObjectCreatedPut = "s3:ObjectCreated:Put" @@ -87,6 +89,27 @@ func NewArn(partition, service, region, accountID, resource string) Arn { } } +var ( + // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' + ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") + // ErrInvalidArnFormat is returned when ARN string format is not valid + ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'") +) + +// NewArnFromString parses string representation of ARN into Arn object. +// Returns an error if the string format is incorrect. +func NewArnFromString(arn string) (Arn, error) { + parts := strings.Split(arn, ":") + if len(parts) != 6 { + return Arn{}, ErrInvalidArnFormat + } + if parts[0] != "arn" { + return Arn{}, ErrInvalidArnPrefix + } + + return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil +} + // String returns the string format of the ARN func (arn Arn) String() string { return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 00000000000..5838b9de995 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -0,0 +1,225 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// getUnsignedChunkLength - calculates the length of chunk metadata +func getUnsignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + crlfLen + + chunkDataSize + + crlfLen +} + +// getUSStreamLength - calculates the length of the overall stream (data + metadata) +func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getUnsignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getUnsignedChunkLength(remainingBytes) + } + streamLen += getUnsignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += crlfLen + } + + return streamLen +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + req.TransferEncoding = []string{"aws-chunked"} + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) +} + +// StreamingUSReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingUSReader struct { + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header +} + +// writeChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { + s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingUSReader) addTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// StreamingUnsignedV4 - provides chunked upload +func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { + // Set headers needed for streaming signature. + prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingUSReader{ + baseReadCloser: req.Body, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingUSReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.writeChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.writeChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addTrailer(s.trailer) + } + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingUSReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index b1296d2b176..49e999b01a3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -32,13 +32,17 @@ import ( // Reference for constants used below - // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF + trailerKVSeparator = ":" + trailerSignature = "x-amz-trailer-signature" ) // Request headers to be ignored while calculating seed signature for @@ -60,7 +64,7 @@ func getSignedChunkLength(chunkDataSize int64) int64 { } // getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { +func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { if dataLen <= 0 { return 0 } @@ -73,6 +77,15 @@ func getStreamLength(dataLen, chunkSize int64) int64 { streamLen += getSignedChunkLength(remainingBytes) } streamLen += getSignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen + } + return streamLen } @@ -91,18 +104,41 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ return strings.Join(stringToSignParts, "\n") } +// buildTrailerChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingTrailerHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + // prepareStreamingRequest - prepares a request with appropriate // headers before computing the seed signature. func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if len(req.Trailer) == 0 { + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + } else { + req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) + for k := range req.Trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + req.TransferEncoding = []string{"aws-chunked"} + } + if sessionToken != "" { req.Header.Set("X-Amz-Security-Token", sessionToken) } req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) } @@ -122,6 +158,16 @@ func buildChunkSignature(chunkData []byte, reqTime time.Time, region, return getSignature(signingKey, chunkStringToSign) } +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + // getSeedSignature - returns the seed signature for a given request. func (s *StreamingReader) setSeedSignature(req *http.Request) { // Get canonical request @@ -156,10 +202,11 @@ type StreamingReader struct { chunkNum int totalChunks int lastChunkSize int + trailer http.Header } // signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { +func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { // Compute chunk signature for next header signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, s.region, s.prevSignature, s.secretAccessKey) @@ -175,13 +222,40 @@ func (s *StreamingReader) signChunk(chunkLen int) { s.buf.Write(s.chunkBuf[:chunkLen]) // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) + if addCrLf { + s.buf.Write([]byte("\r\n")) + } // Reset chunkBufLen for next chunk read. s.chunkBufLen = 0 s.chunkNum++ } +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingReader) addSignedTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + // Compute chunk signature + signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + // setStreamingAuthHeader - builds and sets authorization header value // for streaming signature. func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { @@ -222,6 +296,11 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, lastChunkSize: int(dataLen % payloadChunkSize), } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } // Add the request headers required for chunk upload signing. @@ -272,7 +351,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { (s.chunkNum == s.totalChunks-1 && s.chunkBufLen == s.lastChunkSize) { // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) + s.signChunk(s.chunkBufLen, true) break } } @@ -289,7 +368,11 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { } // Sign the chunk and write it to s.buf. - s.signChunk(0) + s.signChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addSignedTrailer(s.trailer) + } break } return 0, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index cf7921d1f64..fa4f8c91e6c 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -162,11 +162,12 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost b // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func preStringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -189,11 +190,12 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func stringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -281,8 +283,9 @@ var resourceList = []string{ // From the Amazon docs: // // CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { // Save request URL. requestURL := req.URL diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go index a12608ebb0b..34914490c0e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -42,7 +42,6 @@ const ( ServiceTypeSTS = "sts" ) -// // Excerpts from @lsegal - // https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. // @@ -57,7 +56,6 @@ const ( // * Accept-Encoding // Some S3 servers like Hitachi Content Platform do not honor this header for signature // calculation. -// var v4IgnoredHeaders = map[string]bool{ "Accept-Encoding": true, "Authorization": true, @@ -177,12 +175,13 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // getCanonicalRequest generate a canonical request of style. // // canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// +// +// \n +// \n +// \n +// \n +// \n +// func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") canonicalRequest := strings.Join([]string{ @@ -264,11 +263,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4STS - signature v4 for STS request. func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) } // Internal function called for different service types. -func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -285,6 +284,15 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati req.Header.Set("X-Amz-Security-Token", sessionToken) } + if len(trailer) > 0 { + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.TransferEncoding = []string{"aws-chunked"} + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + } + hashedPayload := getHashedPayload(req) if serviceType == ServiceTypeSTS { // Content sha256 header is not sent with the request @@ -322,11 +330,22 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati auth := strings.Join(parts, ", ") req.Header.Set("Authorization", auth) + if len(trailer) > 0 { + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) + } return &req } // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) +} + +// SignV4Trailer sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go index b54fa4c77cf..333f1aa250f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -19,10 +19,9 @@ package signer import ( "crypto/hmac" + "crypto/sha256" "net/http" "strings" - - "github.com/minio/sha256-simd" ) // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go index d7c65af5a0f..98ae17efad3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -1,5 +1,6 @@ /* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020-2022 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +21,8 @@ import ( "encoding/xml" "io" "net/url" + "regexp" + "sort" "strings" "unicode/utf8" ) @@ -63,8 +66,17 @@ const ( maxTagCount = 50 ) +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +// borrowed from this article and also testing various ASCII characters following regex +// is supported by AWS S3 for both tags and values. +var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`) + func checkKey(key string) error { - if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { + if len(key) == 0 { + return errInvalidTagKey + } + + if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { return errInvalidTagKey } @@ -72,8 +84,10 @@ func checkKey(key string) error { } func checkValue(value string) error { - if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { - return errInvalidTagValue + if value != "" { + if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { + return errInvalidTagValue + } } return nil @@ -136,11 +150,26 @@ type tagSet struct { } func (tags tagSet) String() string { - vals := make(url.Values) - for key, value := range tags.tagMap { - vals.Set(key, value) + if len(tags.tagMap) == 0 { + return "" } - return vals.Encode() + var buf strings.Builder + keys := make([]string, 0, len(tags.tagMap)) + for k := range tags.tagMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + keyEscaped := url.QueryEscape(k) + valueEscaped := url.QueryEscape(tags.tagMap[k]) + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(keyEscaped) + buf.WriteByte('=') + buf.WriteString(valueEscaped) + } + return buf.String() } func (tags *tagSet) remove(key string) { @@ -175,7 +204,7 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error { } func (tags tagSet) toMap() map[string]string { - m := make(map[string]string) + m := make(map[string]string, len(tags.tagMap)) for key, value := range tags.tagMap { m[key] = value } @@ -188,6 +217,7 @@ func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { Tags []Tag `xml:"Tag"` }{} + tagList.Tags = make([]Tag, 0, len(tags.tagMap)) for key, value := range tags.tagMap { tagList.Tags = append(tagList.Tags, Tag{key, value}) } @@ -213,7 +243,7 @@ func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { return errTooManyTags } - m := map[string]string{} + m := make(map[string]string, len(tagList.Tags)) for _, tag := range tagList.Tags { if _, found := m[tag.Key]; found { return errDuplicateTagKey @@ -311,14 +341,49 @@ func ParseObjectXML(reader io.Reader) (*Tags, error) { return unmarshalXML(reader, true) } +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (tags *tagSet) parseTags(tgs string) (err error) { + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err1 := url.QueryUnescape(key) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + value, err1 = url.QueryUnescape(value) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + if err = tags.set(key, value, true); err != nil { + return err + } + } + return err +} + // Parse decodes HTTP query formatted string into tags which is limited by isObject. // A query formatted string is like "key1=value1&key2=value2". func Parse(s string, isObject bool) (*Tags, error) { - values, err := url.ParseQuery(s) - if err != nil { - return nil, err - } - tagging := &Tags{ TagSet: &tagSet{ tagMap: make(map[string]string), @@ -326,10 +391,8 @@ func Parse(s string, isObject bool) (*Tags, error) { }, } - for key := range values { - if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { - return nil, err - } + if err := tagging.TagSet.parseTags(s); err != nil { + return nil, err } return tagging, nil diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 7bf560304a2..4b3df19127f 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -32,12 +32,11 @@ const expirationDateFormat = "2006-01-02T15:04:05.999Z" // // Example: // -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } type policyCondition struct { matchType string condition string @@ -98,10 +97,8 @@ func (p *PostPolicy) SetKey(key string) error { // SetKeyStartsWith - Sets an object name that an policy based upload // can start with. +// Can use an empty value ("") to allow any key. func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return errInvalidArgument("Object prefix is empty.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$key", @@ -172,10 +169,8 @@ func (p *PostPolicy) SetContentType(contentType string) error { // SetContentTypeStartsWith - Sets what content-type of the object for this policy // based upload can start with. +// Can use an empty value ("") to allow any content-type. func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { - if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" { - return errInvalidArgument("No content type specified.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$Content-Type", @@ -286,10 +281,14 @@ func (p *PostPolicy) SetUserData(key string, value string) error { } // addNewPolicy - internal helper to validate adding new policies. +// Can use starts-with with an empty value ("") to allow any content within a form field. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + if policyCond.matchType == "" || policyCond.condition == "" { return errInvalidArgument("Policy fields are empty.") } + if policyCond.matchType != "starts-with" && policyCond.value == "" { + return errInvalidArgument("Policy value is empty.") + } p.conditions = append(p.conditions, policyCond) return nil } diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go index 3a4cacfe811..589c0e5498f 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -28,8 +28,10 @@ var awsS3EndpointMap = map[string]string{ "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", + "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", @@ -38,6 +40,7 @@ var awsS3EndpointMap = map[string]string{ "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", + "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index f32f84ab0ab..a8a45b1a8ce 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -20,6 +20,7 @@ package minio import ( "context" "crypto/md5" + fipssha256 "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/xml" @@ -39,6 +40,7 @@ import ( "time" md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/sha256-simd" ) @@ -520,6 +522,9 @@ func newMd5Hasher() md5simd.Hasher { } func newSHA256Hasher() md5simd.Hasher { + if encrypt.FIPS { + return &hashWrapper{Hash: fipssha256.New(), isSHA256: true} + } return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} } @@ -627,3 +632,38 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { } return false } + +// newHashReaderWrapper will hash all reads done through r. +// When r returns io.EOF the done function will be called with the sum. +func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { + return &hashReaderWrapper{ + r: r, + h: h, + done: done, + } +} + +type hashReaderWrapper struct { + r io.Reader + h hash.Hash + done func(hash []byte) +} + +// Read implements the io.Reader interface. +func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { + n, err = h.r.Read(p) + if n > 0 { + n2, err := h.h.Write(p[:n]) + if err != nil { + return 0, err + } + if n2 != n { + return 0, io.ErrShortWrite + } + } + if err == io.EOF { + // Call back + h.done(h.h.Sum(nil)) + } + return n, err +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go index ae3ebc03b97..c607a163a32 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go @@ -19,6 +19,7 @@ import ( "encoding/hex" "fmt" "net/http" + "strings" "sync" "github.com/go-kit/log" @@ -113,7 +114,12 @@ func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { hashedPassword = "$2y$10$QOauhQNbBCuQDKes6eFzPeMqBSjb7Mr5DUmpZ/VcEd00UAV/LDeSi" } - cacheKey := hex.EncodeToString(append(append([]byte(user), []byte(hashedPassword)...), []byte(pass)...)) + cacheKey := strings.Join( + []string{ + hex.EncodeToString([]byte(user)), + hex.EncodeToString([]byte(hashedPassword)), + hex.EncodeToString([]byte(pass)), + }, ":") authOk, ok := u.cache.get(cacheKey) if !ok { @@ -122,7 +128,7 @@ func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(pass)) u.bcryptMtx.Unlock() - authOk = err == nil + authOk = validUser && err == nil u.cache.set(cacheKey, authOk) } diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index d44ef3382c7..c415422357a 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -11,11 +11,15 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ## Unreleased ### Fixed +- [#33](https://github.com/thanos-io/objstore/pull/33) Tracing: Add `ContextWithTracer()` to inject the tracer into the context. +- [#34](https://github.com/thanos-io/objstore/pull/34) Fix ignored options when creating shared credential Azure client. ### Added - [#15](https://github.com/thanos-io/objstore/pull/15) Add Oracle Cloud Infrastructure Object Storage Bucket support. - [#25](https://github.com/thanos-io/objstore/pull/25) S3: Support specifying S3 storage class. +- [#32](https://github.com/thanos-io/objstore/pull/32) Swift: Support authentication using application credentials. ### Changed +- [#38](https://github.com/thanos-io/objstore/pull/38) *: Upgrade minio-go version to `v7.0.45`. ### Removed diff --git a/vendor/github.com/thanos-io/objstore/README.md b/vendor/github.com/thanos-io/objstore/README.md index 642dd18df77..e914cf0678b 100644 --- a/vendor/github.com/thanos-io/objstore/README.md +++ b/vendor/github.com/thanos-io/objstore/README.md @@ -125,7 +125,7 @@ Current object storage client implementations: | [OpenStack Swift](#openstack-swift) | Beta (working PoC) | Production Usage | yes | @FUSAKLA | | [Tencent COS](#tencent-cos) | Beta | Production Usage | no | @jojohappy,@hanjm | | [AliYun OSS](#aliyun-oss) | Beta | Production Usage | no | @shaulboozhiao,@wujinhu | -| [Baidu BOS](#baidu-bos) | Beta | Production Usage | no | ?? | +| [Baidu BOS](#baidu-bos) | Beta | Production Usage | no | @yahaa | | [Local Filesystem](#filesystem) | Stable | Testing and Demo only | yes | @bwplotka | | [Oracle Cloud Infrastructure Object Storage](#oracle-cloud-infrastructure-object-storage) | Beta | Production Usage | yes | @aarontams,@gaurav-05,@ericrrath | @@ -473,6 +473,9 @@ config: password: "" domain_id: "" domain_name: "" + application_credential_id: "" + application_credential_name: "" + application_credential_secret: "" project_id: "" project_name: "" project_domain_id: "" diff --git a/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go b/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go index d1315ad2859..26b58d24f78 100644 --- a/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go +++ b/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go @@ -7,7 +7,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" ) // TLSConfig configures the options for TLS connections. @@ -60,7 +60,7 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } diff --git a/vendor/github.com/thanos-io/objstore/inmem.go b/vendor/github.com/thanos-io/objstore/inmem.go index f90ed6d90c6..1b0a6439869 100644 --- a/vendor/github.com/thanos-io/objstore/inmem.go +++ b/vendor/github.com/thanos-io/objstore/inmem.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "io" - "io/ioutil" "sort" "strings" "sync" @@ -112,7 +111,7 @@ func (b *InMemBucket) Get(_ context.Context, name string) (io.ReadCloser, error) return nil, errNotFound } - return ioutil.NopCloser(bytes.NewReader(file)), nil + return io.NopCloser(bytes.NewReader(file)), nil } // GetRange returns a new range reader for the given object name and range. @@ -129,15 +128,15 @@ func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64 } if int64(len(file)) < off { - return ioutil.NopCloser(bytes.NewReader(nil)), nil + return io.NopCloser(bytes.NewReader(nil)), nil } if length == -1 { - return ioutil.NopCloser(bytes.NewReader(file[off:])), nil + return io.NopCloser(bytes.NewReader(file[off:])), nil } if length <= 0 { - return ioutil.NopCloser(bytes.NewReader(nil)), errors.New("length cannot be smaller or equal 0") + return io.NopCloser(bytes.NewReader(nil)), errors.New("length cannot be smaller or equal 0") } if int64(len(file)) <= off+length { @@ -145,7 +144,7 @@ func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64 length = int64(len(file)) - off } - return ioutil.NopCloser(bytes.NewReader(file[off : off+length])), nil + return io.NopCloser(bytes.NewReader(file[off : off+length])), nil } // Exists checks if the given directory exists in memory. @@ -171,7 +170,7 @@ func (b *InMemBucket) Attributes(_ context.Context, name string) (ObjectAttribut func (b *InMemBucket) Upload(_ context.Context, name string, r io.Reader) error { b.mtx.Lock() defer b.mtx.Unlock() - body, err := ioutil.ReadAll(r) + body, err := io.ReadAll(r) if err != nil { return err } diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index 9ba2ea2d6ae..9a07f2fdf06 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/efficientgo/tools/core/pkg/logerrcapture" + "github.com/efficientgo/core/logerrcapture" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go index c2ac65864b5..958068f7ca3 100644 --- a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go +++ b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go @@ -43,7 +43,7 @@ func getContainerClient(conf Config) (*azblob.ContainerClient, error) { if err != nil { return nil, err } - containerClient, err := azblob.NewContainerClientWithSharedKey(containerURL, cred, nil) + containerClient, err := azblob.NewContainerClientWithSharedKey(containerURL, cred, opt) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go index 8189168e807..0c75745d0b3 100644 --- a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go +++ b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go @@ -7,11 +7,10 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" - "github.com/efficientgo/tools/core/pkg/errcapture" + "github.com/efficientgo/core/errcapture" "github.com/pkg/errors" "gopkg.in/yaml.v2" @@ -67,7 +66,7 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt return nil } - files, err := ioutil.ReadDir(absDir) + files, err := os.ReadDir(absDir) if err != nil { return err } diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go index 3ce8af950cc..1208239a45b 100644 --- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go +++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "os" "runtime" @@ -17,7 +16,7 @@ import ( "testing" "time" - "github.com/efficientgo/tools/core/pkg/logerrcapture" + "github.com/efficientgo/core/logerrcapture" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/minio/minio-go/v7" @@ -286,7 +285,7 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B } case SSEC: - key, err := ioutil.ReadFile(config.SSEConfig.EncryptionKey) + key, err := os.ReadFile(config.SSEConfig.EncryptionKey) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go index 03f75012b31..f30c655dc74 100644 --- a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go +++ b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/efficientgo/tools/core/pkg/errcapture" + "github.com/efficientgo/core/errcapture" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/ncw/swift" @@ -40,27 +40,30 @@ var DefaultConfig = Config{ } type Config struct { - AuthVersion int `yaml:"auth_version"` - AuthUrl string `yaml:"auth_url"` - Username string `yaml:"username"` - UserDomainName string `yaml:"user_domain_name"` - UserDomainID string `yaml:"user_domain_id"` - UserId string `yaml:"user_id"` - Password string `yaml:"password"` - DomainId string `yaml:"domain_id"` - DomainName string `yaml:"domain_name"` - ProjectID string `yaml:"project_id"` - ProjectName string `yaml:"project_name"` - ProjectDomainID string `yaml:"project_domain_id"` - ProjectDomainName string `yaml:"project_domain_name"` - RegionName string `yaml:"region_name"` - ContainerName string `yaml:"container_name"` - ChunkSize int64 `yaml:"large_object_chunk_size"` - SegmentContainerName string `yaml:"large_object_segments_container_name"` - Retries int `yaml:"retries"` - ConnectTimeout model.Duration `yaml:"connect_timeout"` - Timeout model.Duration `yaml:"timeout"` - UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` + AuthVersion int `yaml:"auth_version"` + AuthUrl string `yaml:"auth_url"` + Username string `yaml:"username"` + UserDomainName string `yaml:"user_domain_name"` + UserDomainID string `yaml:"user_domain_id"` + UserId string `yaml:"user_id"` + Password string `yaml:"password"` + DomainId string `yaml:"domain_id"` + DomainName string `yaml:"domain_name"` + ApplicationCredentialID string `yaml:"application_credential_id"` + ApplicationCredentialName string `yaml:"application_credential_name"` + ApplicationCredentialSecret string `yaml:"application_credential_secret"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + ProjectDomainID string `yaml:"project_domain_id"` + ProjectDomainName string `yaml:"project_domain_name"` + RegionName string `yaml:"region_name"` + ContainerName string `yaml:"container_name"` + ChunkSize int64 `yaml:"large_object_chunk_size"` + SegmentContainerName string `yaml:"large_object_segments_container_name"` + Retries int `yaml:"retries"` + ConnectTimeout model.Duration `yaml:"connect_timeout"` + Timeout model.Duration `yaml:"timeout"` + UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` } func parseConfig(conf []byte) (*Config, error) { @@ -76,25 +79,28 @@ func configFromEnv() (*Config, error) { } config := Config{ - AuthVersion: c.AuthVersion, - AuthUrl: c.AuthUrl, - Password: c.ApiKey, - Username: c.UserName, - UserId: c.UserId, - DomainId: c.DomainId, - DomainName: c.Domain, - ProjectID: c.TenantId, - ProjectName: c.Tenant, - ProjectDomainID: c.TenantDomainId, - ProjectDomainName: c.TenantDomain, - RegionName: c.Region, - ContainerName: os.Getenv("OS_CONTAINER_NAME"), - ChunkSize: DefaultConfig.ChunkSize, - SegmentContainerName: os.Getenv("SWIFT_SEGMENTS_CONTAINER_NAME"), - Retries: c.Retries, - ConnectTimeout: model.Duration(c.ConnectTimeout), - Timeout: model.Duration(c.Timeout), - UseDynamicLargeObjects: false, + AuthVersion: c.AuthVersion, + AuthUrl: c.AuthUrl, + Username: c.UserName, + UserId: c.UserId, + Password: c.ApiKey, + DomainId: c.DomainId, + DomainName: c.Domain, + ApplicationCredentialID: c.ApplicationCredentialId, + ApplicationCredentialName: c.ApplicationCredentialName, + ApplicationCredentialSecret: c.ApplicationCredentialSecret, + ProjectID: c.TenantId, + ProjectName: c.Tenant, + ProjectDomainID: c.TenantDomainId, + ProjectDomainName: c.TenantDomain, + RegionName: c.Region, + ContainerName: os.Getenv("OS_CONTAINER_NAME"), + ChunkSize: DefaultConfig.ChunkSize, + SegmentContainerName: os.Getenv("SWIFT_SEGMENTS_CONTAINER_NAME"), + Retries: c.Retries, + ConnectTimeout: model.Duration(c.ConnectTimeout), + Timeout: model.Duration(c.Timeout), + UseDynamicLargeObjects: false, } if os.Getenv("SWIFT_CHUNK_SIZE") != "" { var err error @@ -111,21 +117,24 @@ func configFromEnv() (*Config, error) { func connectionFromConfig(sc *Config) *swift.Connection { connection := swift.Connection{ - Domain: sc.DomainName, - DomainId: sc.DomainId, - UserName: sc.Username, - UserId: sc.UserId, - ApiKey: sc.Password, - AuthUrl: sc.AuthUrl, - Retries: sc.Retries, - Region: sc.RegionName, - AuthVersion: sc.AuthVersion, - Tenant: sc.ProjectName, - TenantId: sc.ProjectID, - TenantDomain: sc.ProjectDomainName, - TenantDomainId: sc.ProjectDomainID, - ConnectTimeout: time.Duration(sc.ConnectTimeout), - Timeout: time.Duration(sc.Timeout), + AuthVersion: sc.AuthVersion, + AuthUrl: sc.AuthUrl, + UserName: sc.Username, + UserId: sc.UserId, + ApiKey: sc.Password, + DomainId: sc.DomainId, + Domain: sc.DomainName, + ApplicationCredentialId: sc.ApplicationCredentialID, + ApplicationCredentialName: sc.ApplicationCredentialName, + ApplicationCredentialSecret: sc.ApplicationCredentialSecret, + TenantId: sc.ProjectID, + Tenant: sc.ProjectName, + TenantDomain: sc.ProjectDomainName, + TenantDomainId: sc.ProjectDomainID, + Region: sc.RegionName, + Retries: sc.Retries, + ConnectTimeout: time.Duration(sc.ConnectTimeout), + Timeout: time.Duration(sc.Timeout), } return &connection } diff --git a/vendor/github.com/thanos-io/objstore/testing.go b/vendor/github.com/thanos-io/objstore/testing.go index 3a38bcef229..d750142af1d 100644 --- a/vendor/github.com/thanos-io/objstore/testing.go +++ b/vendor/github.com/thanos-io/objstore/testing.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math/rand" "sort" "strings" @@ -16,7 +15,7 @@ import ( "testing" "time" - "github.com/efficientgo/tools/core/pkg/testutil" + "github.com/efficientgo/core/testutil" ) func CreateTemporaryTestBucketName(t testing.TB) string { @@ -107,7 +106,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rc1, err := bkt.Get(ctx, "id1/obj_1.some") testutil.Ok(t, err) defer func() { testutil.Ok(t, rc1.Close()) }() - content, err := ioutil.ReadAll(rc1) + content, err := io.ReadAll(rc1) testutil.Ok(t, err) testutil.Equals(t, "@test-data@", string(content)) @@ -119,7 +118,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rc2, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, 3) testutil.Ok(t, err) defer func() { testutil.Ok(t, rc2.Close()) }() - content, err = ioutil.ReadAll(rc2) + content, err = io.ReadAll(rc2) testutil.Ok(t, err) testutil.Equals(t, "tes", string(content)) @@ -127,7 +126,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rcUnspecifiedLen, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, -1) testutil.Ok(t, err) defer func() { testutil.Ok(t, rcUnspecifiedLen.Close()) }() - content, err = ioutil.ReadAll(rcUnspecifiedLen) + content, err = io.ReadAll(rcUnspecifiedLen) testutil.Ok(t, err) testutil.Equals(t, "test-data@", string(content)) @@ -142,7 +141,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rcLength, err := bkt.GetRange(ctx, "id1/obj_1.some", 3, 9999) testutil.Ok(t, err) defer func() { testutil.Ok(t, rcLength.Close()) }() - content, err = ioutil.ReadAll(rcLength) + content, err = io.ReadAll(rcLength) testutil.Ok(t, err) testutil.Equals(t, "st-data@", string(content)) diff --git a/vendor/github.com/thanos-io/objstore/tlsconfig.go b/vendor/github.com/thanos-io/objstore/tlsconfig.go index f705fa31b6b..81c4f08736a 100644 --- a/vendor/github.com/thanos-io/objstore/tlsconfig.go +++ b/vendor/github.com/thanos-io/objstore/tlsconfig.go @@ -7,7 +7,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" ) // NewTLSConfig creates a new tls.Config from the given TLSConfig. @@ -46,7 +46,7 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } diff --git a/vendor/github.com/thanos-io/objstore/tracing/tracing.go b/vendor/github.com/thanos-io/objstore/tracing/tracing.go index f0214206958..9beb2bd8133 100644 --- a/vendor/github.com/thanos-io/objstore/tracing/tracing.go +++ b/vendor/github.com/thanos-io/objstore/tracing/tracing.go @@ -29,6 +29,11 @@ type Tracer interface { GetTraceIDFromSpanContext(ctx opentracing.SpanContext) (string, bool) } +// ContextWithTracer returns a new `context.Context` that holds a reference to given opentracing.Tracer. +func ContextWithTracer(ctx context.Context, tracer opentracing.Tracer) context.Context { + return context.WithValue(ctx, tracerKey, tracer) +} + // tracerFromContext extracts opentracing.Tracer from the given context. func tracerFromContext(ctx context.Context) opentracing.Tracer { val := ctx.Value(tracerKey) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 7886e5d8693..5edf27f2423 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -402,3 +402,34 @@ func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucke level.Info(logger).Log("msg", "block has been marked for no compaction", "block", id) return nil } + +// MarkForNoDownsample creates a file which marks block to be not downsampled. +func MarkForNoDownsample(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason metadata.NoDownsampleReason, details string, markedForNoDownsample prometheus.Counter) error { + m := path.Join(id.String(), metadata.NoDownsampleMarkFilename) + noDownsampleMarkExists, err := bkt.Exists(ctx, m) + if err != nil { + return errors.Wrapf(err, "check exists %s in bucket", m) + } + if noDownsampleMarkExists { + level.Warn(logger).Log("msg", "requested to mark for no deletion, but file already exists; this should not happen; investigate", "err", errors.Errorf("file %s already exists in bucket", m)) + return nil + } + noDownsampleMark, err := json.Marshal(metadata.NoDownsampleMark{ + ID: id, + Version: metadata.NoDownsampleMarkVersion1, + + NoDownsampleTime: time.Now().Unix(), + Reason: reason, + Details: details, + }) + if err != nil { + return errors.Wrap(err, "json encode no downsample mark") + } + + if err := bkt.Upload(ctx, m, bytes.NewBuffer(noDownsampleMark)); err != nil { + return errors.Wrapf(err, "upload file %s to bucket", m) + } + markedForNoDownsample.Inc() + level.Info(logger).Log("msg", "block has been marked for no downsample", "block", id) + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 1a9e213847d..16435bfd8fb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -80,6 +80,9 @@ const ( // MarkedForNoCompactionMeta is label for blocks which are loaded but also marked for no compaction. This label is also counted in `loaded` label metric. MarkedForNoCompactionMeta = "marked-for-no-compact" + // MarkedForNoDownsampleMeta is label for blocks which are loaded but also marked for no downsample. This label is also counted in `loaded` label metric. + MarkedForNoDownsampleMeta = "marked-for-no-downsample" + // Modified label values. replicaRemovedMeta = "replica-label-removed" ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go index 8af9a5a1f87..83273eb3430 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go @@ -24,11 +24,15 @@ const ( // NoCompactMarkFilename is the known json filename for optional file storing details about why block has to be excluded from compaction. // If such file is present in block dir, it means the block has to excluded from compaction (both vertical and horizontal) or rewrite (e.g deletions). NoCompactMarkFilename = "no-compact-mark.json" - + // NoDownsampleMarkFilename is the known json filenanme for optional file storing details about why block has to be excluded from downsampling. + // If such file is present in block dir, it means the block has to be excluded from downsampling. + NoDownsampleMarkFilename = "no-downsample-mark.json" // DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos. DeletionMarkVersion1 = 1 // NoCompactMarkVersion1 is the version of no-compact-mark file supported by Thanos. NoCompactMarkVersion1 = 1 + // NoDownsampleVersion1 is the version of no-downsample-mark file supported by Thanos. + NoDownsampleMarkVersion1 = 1 ) var ( @@ -62,9 +66,14 @@ func (m *DeletionMark) markerFilename() string { return DeletionMarkFilename } // NoCompactReason is a reason for a block to be excluded from compaction. type NoCompactReason string +// NoDownsampleReason is a reason for a block to be excluded from downsample. +type NoDownsampleReason string + const ( // ManualNoCompactReason is a custom reason of excluding from compaction that should be added when no-compact mark is added for unknown/user specified reason. ManualNoCompactReason NoCompactReason = "manual" + // ManualNoDownsampleReason is a custom reason of excluding from downsample that should be added when no-downsample mark is added for unknown/user specified reason. + ManualNoDownsampleReason NoDownsampleReason = "manual" // IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424) // This reason can be ignored when vertical block sharding will be implemented. IndexSizeExceedingNoCompactReason = "index-size-exceeding" @@ -88,6 +97,22 @@ type NoCompactMark struct { func (n *NoCompactMark) markerFilename() string { return NoCompactMarkFilename } +// NoDownsampleMark marker stores reason of block being excluded from downsample if needed. +type NoDownsampleMark struct { + // ID of the tsdb block. + ID ulid.ULID `json:"id"` + // Version of the file. + Version int `json:"version"` + // Details is a human readable string giving details of reason. + Details string `json:"details,omitempty"` + + // NoDownsampleTime is a unix timestamp of when the block was marked for no downsample. + NoDownsampleTime int64 `json:"no_downsample_time"` + Reason NoDownsampleReason `json:"reason"` +} + +func (n *NoDownsampleMark) markerFilename() string { return NoDownsampleMarkFilename } + // ReadMarker reads the given mark file from

/.json in bucket. func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.InstrumentedBucketReader, dir string, marker Marker) error { markerFile := path.Join(dir, marker.markerFilename()) @@ -113,6 +138,10 @@ func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.Instrumente if version := marker.(*NoCompactMark).Version; version != NoCompactMarkVersion1 { return errors.Errorf("unexpected no-compact-mark file version %d, expected %d", version, NoCompactMarkVersion1) } + case NoDownsampleMarkFilename: + if version := marker.(*NoDownsampleMark).Version; version != NoDownsampleMarkVersion1 { + return errors.Errorf("unexpected no-downsample-mark file version %d, expected %d", version, NoDownsampleMarkVersion1) + } case DeletionMarkFilename: if version := marker.(*DeletionMark).Version; version != DeletionMarkVersion1 { return errors.Errorf("unexpected deletion-mark file version %d, expected %d", version, DeletionMarkVersion1) diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 7731eab877d..4aa551ae3e9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -4,11 +4,13 @@ package downsample import ( + "context" "fmt" "math" "math/rand" "os" "path/filepath" + "sync" "time" "github.com/go-kit/log" @@ -22,7 +24,10 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/tsdbutil" + "golang.org/x/sync/errgroup" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/runutil" @@ -734,3 +739,106 @@ func SamplesFromTSDBSamples(samples []tsdbutil.Sample) []sample { } return res } + +// GatherNoDownsampleMarkFilter is a block.Fetcher filter that passes all metas. +// While doing it, it gathers all no-downsample-mark.json markers. +type GatherNoDownsampleMarkFilter struct { + logger log.Logger + bkt objstore.InstrumentedBucketReader + noDownsampleMarkedMap map[ulid.ULID]*metadata.NoDownsampleMark + concurrency int + mtx sync.Mutex +} + +// NewGatherNoDownsampleMarkFilter creates GatherNoDownsampleMarkFilter. +func NewGatherNoDownsampleMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader) *GatherNoDownsampleMarkFilter { + return &GatherNoDownsampleMarkFilter{ + logger: logger, + bkt: bkt, + concurrency: 1, + } +} + +// NoDownsampleMarkedBlocks returns block ids that were marked for no downsample. +func (f *GatherNoDownsampleMarkFilter) NoDownsampleMarkedBlocks() map[ulid.ULID]*metadata.NoDownsampleMark { + f.mtx.Lock() + copiedNoDownsampleMarked := make(map[ulid.ULID]*metadata.NoDownsampleMark, len(f.noDownsampleMarkedMap)) + for k, v := range f.noDownsampleMarkedMap { + copiedNoDownsampleMarked[k] = v + } + f.mtx.Unlock() + + return copiedNoDownsampleMarked +} + +// TODO (@rohitkochhar): reduce code duplication here by combining +// this code with that of GatherNoCompactionMarkFilter +// Filter passes all metas, while gathering no downsample markers. +func (f *GatherNoDownsampleMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced block.GaugeVec, modified block.GaugeVec) error { + f.mtx.Lock() + f.noDownsampleMarkedMap = make(map[ulid.ULID]*metadata.NoDownsampleMark) + f.mtx.Unlock() + + // Make a copy of block IDs to check, in order to avoid concurrency issues + // between the scheduler and workers. + blockIDs := make([]ulid.ULID, 0, len(metas)) + for id := range metas { + blockIDs = append(blockIDs, id) + } + + var ( + eg errgroup.Group + ch = make(chan ulid.ULID, f.concurrency) + ) + + for i := 0; i < f.concurrency; i++ { + eg.Go(func() error { + var lastErr error + for id := range ch { + m := &metadata.NoDownsampleMark{} + + if err := metadata.ReadMarker(ctx, f.logger, f.bkt, id.String(), m); err != nil { + if errors.Cause(err) == metadata.ErrorMarkerNotFound { + continue + } + if errors.Cause(err) == metadata.ErrorUnmarshalMarker { + level.Warn(f.logger).Log("msg", "found partial no-downsample-mark.json; if we will see it happening often for the same block, consider manually deleting no-downsample-mark.json from the object storage", "block", id, "err", err) + continue + } + // Remember the last error and continue draining the channel. + lastErr = err + continue + } + + f.mtx.Lock() + f.noDownsampleMarkedMap[id] = m + f.mtx.Unlock() + synced.WithLabelValues(block.MarkedForNoDownsampleMeta).Inc() + } + + return lastErr + }) + } + + // Workers scheduled, distribute blocks. + eg.Go(func() error { + defer close(ch) + + for _, id := range blockIDs { + select { + case ch <- id: + // Nothing to do. + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil + }) + + if err := eg.Wait(); err != nil { + return errors.Wrap(err, "filter blocks marked for no downsample") + } + + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index a7a5d3abd96..b1fbd898a25 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -37,6 +37,8 @@ import ( "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/index" "golang.org/x/sync/errgroup" + + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -101,6 +103,9 @@ const ( minBlockSyncConcurrency = 1 enableChunkHashCalculation = true + + // SeriesBatchSize is the default batch size when fetching series from object storage. + SeriesBatchSize = 10000 ) var ( @@ -137,6 +142,7 @@ type bucketStoreMetrics struct { seriesFetchDuration prometheus.Histogram postingsFetchDuration prometheus.Histogram + chunkFetchDuration prometheus.Histogram } func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { @@ -168,21 +174,25 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { }) m.seriesDataTouched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_touched", - Help: "How many items of a data type in a block were touched for a single series request.", + Name: "thanos_bucket_store_series_data_touched", + Help: "Number of items of a data type touched to fulfill a single Store API series request.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesDataFetched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_fetched", - Help: "How many items of a data type in a block were fetched for a single series request.", + Name: "thanos_bucket_store_series_data_fetched", + Help: "Number of items of a data type retrieved to fulfill a single Store API series request.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesDataSizeTouched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_size_touched_bytes", - Help: "Size of all items of a data type in a block were touched for a single series request.", + Name: "thanos_bucket_store_series_data_size_touched_bytes", + Help: "Total size of items of a data type touched to fulfill a single Store API series request in Bytes.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesDataSizeFetched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_size_fetched_bytes", - Help: "Size of all items of a data type in a block were fetched for a single series request.", + Name: "thanos_bucket_store_series_data_size_fetched_bytes", + Help: "Total size of items of a data type fetched to fulfill a single Store API series request in Bytes.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesBlocksQueried = promauto.With(reg).NewSummary(prometheus.SummaryOpts{ @@ -271,6 +281,12 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.chunkFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_chunks_fetch_duration_seconds", + Help: "The total time spent fetching chunks within a single request a store gateway.", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + }) + m.emptyPostingCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "thanos_bucket_store_empty_postings_total", Help: "Total number of empty postings when fetching block series.", @@ -301,6 +317,7 @@ type BucketStore struct { indexReaderPool *indexheader.ReaderPool buffers sync.Pool chunkPool pool.Bytes + seriesBatchSize int // Sets of blocks that have the same labels. They are indexed by a hash over their label set. mtx sync.RWMutex @@ -415,6 +432,12 @@ func WithChunkHashCalculation(enableChunkHashCalculation bool) BucketStoreOption } } +func WithSeriesBatchSize(seriesBatchSize int) BucketStoreOption { + return func(s *BucketStore) { + s.seriesBatchSize = seriesBatchSize + } +} + // NewBucketStore creates a new bucket backed store that implements the store API against // an object store bucket. It is optimized to work against high latency backends. func NewBucketStore( @@ -456,6 +479,7 @@ func NewBucketStore( postingOffsetsInMemSampling: postingOffsetsInMemSampling, enableSeriesResponseHints: enableSeriesResponseHints, enableChunkHashCalculation: enableChunkHashCalculation, + seriesBatchSize: SeriesBatchSize, } for _, option := range options { @@ -782,140 +806,218 @@ type seriesEntry struct { chks []storepb.AggrChunk } -type bucketSeriesSet struct { - set []seriesEntry - i int - err error -} +// blockSeriesClient is a storepb.Store_SeriesClient for a +// single TSDB block in object storage. +type blockSeriesClient struct { + grpc.ClientStream + ctx context.Context + logger log.Logger + extLset labels.Labels + + mint int64 + maxt int64 + indexr *bucketIndexReader + chunkr *bucketChunkReader + loadAggregates []storepb.Aggr + chunksLimiter ChunksLimiter + bytesLimiter BytesLimiter + + skipChunks bool + shardMatcher *storepb.ShardMatcher + calculateChunkHash bool + chunkFetchDuration prometheus.Histogram + + // Internal state. + i uint64 + postings []storage.SeriesRef + chkMetas []chunks.Meta + lset labels.Labels + symbolizedLset []symbolizedLabel + entries []seriesEntry + hasMorePostings bool + batchSize int +} + +func newBlockSeriesClient( + ctx context.Context, + logger log.Logger, + b *bucketBlock, + req *storepb.SeriesRequest, + limiter ChunksLimiter, + bytesLimiter BytesLimiter, + shardMatcher *storepb.ShardMatcher, + calculateChunkHash bool, + batchSize int, + chunkFetchDuration prometheus.Histogram, +) *blockSeriesClient { + var chunkr *bucketChunkReader + if !req.SkipChunks { + chunkr = b.chunkReader() + } -func newBucketSeriesSet(set []seriesEntry) *bucketSeriesSet { - return &bucketSeriesSet{ - set: set, - i: -1, + return &blockSeriesClient{ + ctx: ctx, + logger: logger, + extLset: b.extLset, + mint: req.MinTime, + maxt: req.MaxTime, + indexr: b.indexReader(), + chunkr: chunkr, + chunksLimiter: limiter, + bytesLimiter: bytesLimiter, + skipChunks: req.SkipChunks, + chunkFetchDuration: chunkFetchDuration, + + loadAggregates: req.Aggregates, + shardMatcher: shardMatcher, + calculateChunkHash: calculateChunkHash, + hasMorePostings: true, + batchSize: batchSize, } } -func (s *bucketSeriesSet) Next() bool { - if s.i >= len(s.set)-1 { - return false +func (b *blockSeriesClient) Close() { + if !b.skipChunks { + runutil.CloseWithLogOnErr(b.logger, b.chunkr, "series block") } - s.i++ - return true -} -func (s *bucketSeriesSet) At() (labels.Labels, []storepb.AggrChunk) { - return s.set[s.i].lset, s.set[s.i].chks + runutil.CloseWithLogOnErr(b.logger, b.indexr, "series block") } -func (s *bucketSeriesSet) Err() error { - return s.err +func (b *blockSeriesClient) MergeStats(stats *queryStats) *queryStats { + stats = stats.merge(b.indexr.stats) + if !b.skipChunks { + stats = stats.merge(b.chunkr.stats) + } + return stats } -// blockSeries returns series matching given matchers, that have some data in given time range. -func blockSeries( - ctx context.Context, - extLset labels.Labels, - indexr *bucketIndexReader, - chunkr *bucketChunkReader, +func (b *blockSeriesClient) ExpandPostings( matchers []*labels.Matcher, - chunksLimiter ChunksLimiter, seriesLimiter SeriesLimiter, - bytesLimiter BytesLimiter, // Rate limiter for used bytes. - skipChunks bool, - minTime, maxTime int64, - loadAggregates []storepb.Aggr, - shardMatcher *storepb.ShardMatcher, - emptyPostingsCount prometheus.Counter, - calculateChunkHash bool, -) (storepb.SeriesSet, *queryStats, error) { - ps, err := indexr.ExpandedPostings(ctx, matchers, bytesLimiter) +) error { + ps, err := b.indexr.ExpandedPostings(b.ctx, matchers, b.bytesLimiter) if err != nil { - return nil, nil, errors.Wrap(err, "expanded matching posting") + return errors.Wrap(err, "expanded matching posting") } if len(ps) == 0 { - emptyPostingsCount.Inc() - return storepb.EmptySeriesSet(), indexr.stats, nil + return nil } - // Reserve series seriesLimiter if err := seriesLimiter.Reserve(uint64(len(ps))); err != nil { - return nil, nil, errors.Wrap(err, "exceeded series limit") + return errors.Wrap(err, "exceeded series limit") } - // Preload all series index data. - // TODO(bwplotka): Consider not keeping all series in memory all the time. - // TODO(bwplotka): Do lazy loading in one step as `ExpandingPostings` method. - if err := indexr.PreloadSeries(ctx, ps, bytesLimiter); err != nil { - return nil, nil, errors.Wrap(err, "preload series") + b.postings = ps + if b.batchSize > len(ps) { + b.batchSize = len(ps) } + b.entries = make([]seriesEntry, 0, b.batchSize) + return nil +} - // Transform all series into the response types and mark their relevant chunks - // for preloading. - var ( - res []seriesEntry - symbolizedLset []symbolizedLabel - lset labels.Labels - chks []chunks.Meta - ) +func (b *blockSeriesClient) Recv() (*storepb.SeriesResponse, error) { + for len(b.entries) == 0 && b.hasMorePostings { + if err := b.nextBatch(); err != nil { + return nil, err + } + } + + if len(b.entries) == 0 { + if b.chunkr != nil { + b.chunkFetchDuration.Observe(b.chunkr.stats.ChunksFetchDurationSum.Seconds()) + } + return nil, io.EOF + } + + next := b.entries[0] + b.entries = b.entries[1:] + + return storepb.NewSeriesResponse(&storepb.Series{ + Labels: labelpb.ZLabelsFromPromLabels(next.lset), + Chunks: next.chks, + }), nil +} + +func (b *blockSeriesClient) nextBatch() error { + start := b.i + end := start + SeriesBatchSize + if end > uint64(len(b.postings)) { + end = uint64(len(b.postings)) + } + b.i = end + + postingsBatch := b.postings[start:end] + if len(postingsBatch) == 0 { + b.hasMorePostings = false + return nil + } + + b.indexr.reset() + if !b.skipChunks { + b.chunkr.reset() + } - for _, id := range ps { - ok, err := indexr.LoadSeriesForTime(id, &symbolizedLset, &chks, skipChunks, minTime, maxTime) + if err := b.indexr.PreloadSeries(b.ctx, postingsBatch, b.bytesLimiter); err != nil { + return errors.Wrap(err, "preload series") + } + + b.entries = b.entries[:0] + for i := 0; i < len(postingsBatch); i++ { + ok, err := b.indexr.LoadSeriesForTime(postingsBatch[i], &b.symbolizedLset, &b.chkMetas, b.skipChunks, b.mint, b.maxt) if err != nil { - return nil, nil, errors.Wrap(err, "read series") + return errors.Wrap(err, "read series") } if !ok { - // No matching chunks for this time duration, skip series. continue } - if err := indexr.LookupLabelsSymbols(symbolizedLset, &lset); err != nil { - return nil, nil, errors.Wrap(err, "Lookup labels symbols") + if err := b.indexr.LookupLabelsSymbols(b.symbolizedLset, &b.lset); err != nil { + return errors.Wrap(err, "Lookup labels symbols") } - completeLabelset := labelpb.ExtendSortedLabels(lset, extLset) - if !shardMatcher.MatchesLabels(completeLabelset) { + completeLabelset := labelpb.ExtendSortedLabels(b.lset, b.extLset) + if !b.shardMatcher.MatchesLabels(completeLabelset) { continue } - s := seriesEntry{} - s.lset = completeLabelset + s := seriesEntry{lset: completeLabelset} + if b.skipChunks { + b.entries = append(b.entries, s) + continue + } - if !skipChunks { - // Schedule loading chunks. - s.refs = make([]chunks.ChunkRef, 0, len(chks)) - s.chks = make([]storepb.AggrChunk, 0, len(chks)) - for j, meta := range chks { - // seriesEntry s is appended to res, but not at every outer loop iteration, - // therefore len(res) is the index we need here, not outer loop iteration number. - if err := chunkr.addLoad(meta.Ref, len(res), j); err != nil { - return nil, nil, errors.Wrap(err, "add chunk load") - } - s.chks = append(s.chks, storepb.AggrChunk{ - MinTime: meta.MinTime, - MaxTime: meta.MaxTime, - }) - s.refs = append(s.refs, meta.Ref) - } + // Schedule loading chunks. + s.refs = make([]chunks.ChunkRef, 0, len(b.chkMetas)) + s.chks = make([]storepb.AggrChunk, 0, len(b.chkMetas)) - // Ensure sample limit through chunksLimiter if we return chunks. - if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { - return nil, nil, errors.Wrap(err, "exceeded chunks limit") + for j, meta := range b.chkMetas { + if err := b.chunkr.addLoad(meta.Ref, len(b.entries), j); err != nil { + return errors.Wrap(err, "add chunk load") } + s.chks = append(s.chks, storepb.AggrChunk{ + MinTime: meta.MinTime, + MaxTime: meta.MaxTime, + }) + s.refs = append(s.refs, meta.Ref) } - res = append(res, s) - } + // Ensure sample limit through chunksLimiter if we return chunks. + if err := b.chunksLimiter.Reserve(uint64(len(b.chkMetas))); err != nil { + return errors.Wrap(err, "exceeded chunks limit") + } - if skipChunks { - return newBucketSeriesSet(res), indexr.stats, nil + b.entries = append(b.entries, s) } - if err := chunkr.load(ctx, res, loadAggregates, calculateChunkHash, bytesLimiter); err != nil { - return nil, nil, errors.Wrap(err, "load chunks") + if !b.skipChunks { + if err := b.chunkr.load(b.ctx, b.entries, b.loadAggregates, b.calculateChunkHash, b.bytesLimiter); err != nil { + return errors.Wrap(err, "load chunks") + } } - return newBucketSeriesSet(res), indexr.stats.merge(chunkr.stats), nil + return nil } func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Aggr, save func([]byte) ([]byte, error), calculateChecksum bool) error { @@ -1062,7 +1164,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes")) ctx = srv.Context() stats = &queryStats{} - res []storepb.SeriesSet + respSets []respSet mtx sync.Mutex g, gctx = errgroup.WithContext(ctx) resHints = &hintspb.SeriesResponseHints{} @@ -1097,64 +1199,63 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } for _, b := range blocks { - b := b + blk := b gctx := gctx if s.enableSeriesResponseHints { // Keep track of queried blocks. - resHints.AddQueriedBlock(b.meta.ULID) - } - - var chunkr *bucketChunkReader - // We must keep the readers open until all their data has been sent. - indexr := b.indexReader() - if !req.SkipChunks { - chunkr = b.chunkReader() - defer runutil.CloseWithLogOnErr(s.logger, chunkr, "series block") + resHints.AddQueriedBlock(blk.meta.ULID) } - // Defer all closes to the end of Series method. - defer runutil.CloseWithLogOnErr(s.logger, indexr, "series block") + shardMatcher := req.ShardInfo.Matcher(&s.buffers) + blockClient := newBlockSeriesClient( + srv.Context(), + s.logger, + blk, + req, + chunksLimiter, + bytesLimiter, + shardMatcher, + s.enableChunkHashCalculation, + s.seriesBatchSize, + s.metrics.chunkFetchDuration, + ) + defer blockClient.Close() g.Go(func() error { - span, newCtx := tracing.StartSpan(gctx, "bucket_store_block_series", tracing.Tags{ - "block.id": b.meta.ULID, - "block.mint": b.meta.MinTime, - "block.maxt": b.meta.MaxTime, - "block.resolution": b.meta.Thanos.Downsample.Resolution, + span, _ := tracing.StartSpan(gctx, "bucket_store_block_series", tracing.Tags{ + "block.id": blk.meta.ULID, + "block.mint": blk.meta.MinTime, + "block.maxt": blk.meta.MaxTime, + "block.resolution": blk.meta.Thanos.Downsample.Resolution, }) - defer span.Finish() - - shardMatcher := req.ShardInfo.Matcher(&s.buffers) - defer shardMatcher.Close() - part, pstats, err := blockSeries( - newCtx, - b.extLset, - indexr, - chunkr, - blockMatchers, - chunksLimiter, - seriesLimiter, - bytesLimiter, - req.SkipChunks, - req.MinTime, req.MaxTime, - req.Aggregates, + + if err := blockClient.ExpandPostings(blockMatchers, seriesLimiter); err != nil { + span.Finish() + return errors.Wrapf(err, "fetch series for block %s", blk.meta.ULID) + } + onClose := func() { + mtx.Lock() + stats = blockClient.MergeStats(stats) + mtx.Unlock() + } + part := newLazyRespSet( + srv.Context(), + span, + 10*time.Minute, + blk.meta.ULID.String(), + []labels.Labels{blk.extLset}, + onClose, + blockClient, shardMatcher, + false, s.metrics.emptyPostingCount, - s.enableChunkHashCalculation, ) - if err != nil { - return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) - } mtx.Lock() - res = append(res, part) - stats = stats.merge(pstats) + respSets = append(respSets, part) mtx.Unlock() - // No info about samples exactly, so pass at least chunks. - span.SetTag("processed.series", len(indexr.loadedSeries)) - span.SetTag("processed.chunks", pstats.chunksFetched) return nil }) } @@ -1204,47 +1305,53 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } return status.Error(code, err.Error()) } - stats.blocksQueried = len(res) + stats.blocksQueried = len(respSets) stats.GetAllDuration = time.Since(begin) s.metrics.seriesGetAllDuration.Observe(stats.GetAllDuration.Seconds()) s.metrics.seriesBlocksQueried.Observe(float64(stats.blocksQueried)) } + // Merge the sub-results from each selected block. tracing.DoInSpan(ctx, "bucket_store_merge_all", func(ctx context.Context) { + defer func() { + for _, resp := range respSets { + resp.Close() + } + }() begin := time.Now() - - // NOTE: We "carefully" assume series and chunks are sorted within each SeriesSet. This should be guaranteed by - // blockSeries method. In worst case deduplication logic won't deduplicate correctly, which will be accounted later. - set := storepb.MergeSeriesSets(res...) + set := NewDedupResponseHeap(NewProxyResponseHeap(respSets...)) for set.Next() { - var series storepb.Series - - stats.mergedSeriesCount++ - - var lset labels.Labels - if req.SkipChunks { - lset, _ = set.At() - } else { - lset, series.Chunks = set.At() + at := set.At() + warn := at.GetWarning() + if warn != "" { + // TODO(fpetkovski): Consider deprecating string based warnings in favor of a + // separate protobuf message containing the grpc code and + // a human readable error message. + err = status.Error(storepb.GRPCCodeFromWarn(warn), at.GetWarning()) + return + } - stats.mergedChunksCount += len(series.Chunks) - s.metrics.chunkSizeBytes.Observe(float64(chunksSize(series.Chunks))) + series := at.GetSeries() + if series != nil { + stats.mergedSeriesCount++ + if !req.SkipChunks { + stats.mergedChunksCount += len(series.Chunks) + s.metrics.chunkSizeBytes.Observe(float64(chunksSize(series.Chunks))) + } } - series.Labels = labelpb.ZLabelsFromPromLabels(lset) - if err = srv.Send(storepb.NewSeriesResponse(&series)); err != nil { + if err = srv.Send(at); err != nil { err = status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) return } } - if set.Err() != nil { - err = status.Error(codes.Unknown, errors.Wrap(set.Err(), "expand series set").Error()) - return - } stats.MergeDuration = time.Since(begin) s.metrics.seriesMergeDuration.Observe(stats.MergeDuration.Seconds()) err = nil }) + if err != nil { + return err + } if s.enableSeriesResponseHints { var anyHints *types.Any @@ -1342,7 +1449,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq } // Add a set for the external labels as well. - // We're not adding them directly to res because there could be duplicates. + // We're not adding them directly to refs because there could be duplicates. // b.extLset is already sorted by label name, no need to sort it again. extRes := make([]string, 0, len(b.extLset)) for _, l := range b.extLset { @@ -1351,40 +1458,43 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq result = strutil.MergeSlices(res, extRes) } else { - seriesSet, _, err := blockSeries( - newCtx, - b.extLset, - indexr, - nil, + seriesReq := &storepb.SeriesRequest{ + MinTime: req.Start, + MaxTime: req.End, + SkipChunks: true, + } + blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + + if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, - nil, seriesLimiter, - bytesLimiter, - true, - req.Start, - req.End, - nil, - nil, - s.metrics.emptyPostingCount, - false, - ) - if err != nil { - return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) + ); err != nil { + return err } // Extract label names from all series. Many label names will be the same, so we need to deduplicate them. // Note that label names will already include external labels (passed to blockSeries), so we don't need // to add them again. labelNames := map[string]struct{}{} - for seriesSet.Next() { - ls, _ := seriesSet.At() - for _, l := range ls { + for { + ls, err := blockClient.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrapf(err, "iterate series for block %s", b.meta.ULID) + } + + if ls.GetWarning() != "" { + return errors.Wrapf(errors.New(ls.GetWarning()), "iterate series for block %s", b.meta.ULID) + } + if ls.GetSeries() == nil { + continue + } + for _, l := range ls.GetSeries().Labels { labelNames[l.Name] = struct{}{} } } - if seriesSet.Err() != nil { - return errors.Wrapf(seriesSet.Err(), "iterate series for block %s", b.meta.ULID) - } result = make([]string, 0, len(labelNames)) for n := range labelNames { @@ -1522,40 +1632,44 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR } result = res } else { - seriesSet, _, err := blockSeries( - newCtx, - b.extLset, - indexr, - nil, + seriesReq := &storepb.SeriesRequest{ + MinTime: req.Start, + MaxTime: req.End, + SkipChunks: true, + } + blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + + if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, - nil, seriesLimiter, - bytesLimiter, - true, - req.Start, - req.End, - nil, - nil, - s.metrics.emptyPostingCount, - false, - ) - if err != nil { - return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) + ); err != nil { + return err } // Extract given label's value from all series and deduplicate them. // We don't need to deal with external labels, since they are already added by blockSeries. values := map[string]struct{}{} - for seriesSet.Next() { - ls, _ := seriesSet.At() - val := ls.Get(req.Label) + for { + ls, err := blockClient.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrapf(err, "iterate series for block %s", b.meta.ULID) + } + + if ls.GetWarning() != "" { + return errors.Wrapf(errors.New(ls.GetWarning()), "iterate series for block %s", b.meta.ULID) + } + if ls.GetSeries() == nil { + continue + } + + val := labelpb.ZLabelsToPromLabels(ls.GetSeries().Labels).Get(req.Label) if val != "" { // Should never be empty since we added labelName!="" matcher to the list of matchers. values[val] = struct{}{} } } - if seriesSet.Err() != nil { - return errors.Wrapf(seriesSet.Err(), "iterate series for block %s", b.meta.ULID) - } result = make([]string, 0, len(values)) for n := range values { @@ -1915,6 +2029,9 @@ func newBucketIndexReader(block *bucketBlock) *bucketIndexReader { } return r } +func (r *bucketIndexReader) reset() { + r.loadedSeries = map[storage.SeriesRef][]byte{} +} // ExpandedPostings returns postings in expanded list instead of index.Postings. // This is because we need to have them buffered anyway to perform efficient lookup @@ -2610,6 +2727,12 @@ func newBucketChunkReader(block *bucketBlock) *bucketChunkReader { } } +func (r *bucketChunkReader) reset() { + for i := range r.toLoad { + r.toLoad[i] = r.toLoad[i][:0] + } +} + func (r *bucketChunkReader) Close() error { r.block.pendingReaders.Done() @@ -2620,7 +2743,7 @@ func (r *bucketChunkReader) Close() error { } // addLoad adds the chunk with id to the data set to be fetched. -// Chunk will be fetched and saved to res[seriesEntry][chunk] upon r.load(res, <...>) call. +// Chunk will be fetched and saved to refs[seriesEntry][chunk] upon r.load(refs, <...>) call. func (r *bucketChunkReader) addLoad(id chunks.ChunkRef, seriesEntry, chunk int) error { var ( seq = int(id >> 32) @@ -2633,7 +2756,7 @@ func (r *bucketChunkReader) addLoad(id chunks.ChunkRef, seriesEntry, chunk int) return nil } -// load loads all added chunks and saves resulting aggrs to res. +// load loads all added chunks and saves resulting aggrs to refs. func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, calculateChunkChecksum bool, bytesLimiter BytesLimiter) error { g, ctx := errgroup.WithContext(ctx) @@ -2667,6 +2790,9 @@ func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs [ // This data range covers chunks starting at supplied offsets. func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, seq int, part Part, pIdxs []loadIdx, calculateChunkChecksum bool, bytesLimiter BytesLimiter) error { fetchBegin := time.Now() + defer func() { + r.stats.ChunksFetchDurationSum += time.Since(fetchBegin) + }() // Get a reader for the required range. reader, err := r.block.chunkRangeReader(ctx, seq, int64(part.Start), int64(part.End-part.Start)) @@ -2687,7 +2813,6 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a r.stats.chunksFetchCount++ r.stats.chunksFetched += len(pIdxs) - r.stats.ChunksFetchDurationSum += time.Since(fetchBegin) r.stats.ChunksFetchedSizeSum += units.Base2Bytes(int(part.End - part.Start)) var ( @@ -2776,7 +2901,6 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a locked = true r.stats.chunksFetchCount++ - r.stats.ChunksFetchDurationSum += time.Since(fetchBegin) r.stats.ChunksFetchedSizeSum += units.Base2Bytes(len(*nb)) err = populateChunk(&(res[pIdx.seriesEntry].chks[pIdx.chunk]), rawChunk((*nb)[n:]), aggrs, r.save, calculateChunkChecksum) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go index d354d4db066..5cdb5a0b781 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go @@ -260,11 +260,11 @@ func (h *ProxyResponseHeap) At() *storepb.SeriesResponse { } func (l *lazyRespSet) StoreID() string { - return l.st.String() + return l.storeName } func (l *lazyRespSet) Labelset() string { - return labelpb.PromLabelSetsToString(l.st.LabelSets()) + return labelpb.PromLabelSetsToString(l.storeLabelSets) } // lazyRespSet is a lazy storepb.SeriesSet that buffers @@ -273,12 +273,13 @@ func (l *lazyRespSet) Labelset() string { // in Next(). type lazyRespSet struct { // Generic parameters. - span opentracing.Span - cl storepb.Store_SeriesClient - closeSeries context.CancelFunc - st Client - frameTimeout time.Duration - ctx context.Context + span opentracing.Span + cl storepb.Store_SeriesClient + closeSeries context.CancelFunc + storeName string + storeLabelSets []labels.Labels + frameTimeout time.Duration + ctx context.Context // Internal bookkeeping. dataOrFinishEvent *sync.Cond @@ -358,13 +359,13 @@ func newLazyRespSet( ctx context.Context, span opentracing.Span, frameTimeout time.Duration, - st Client, + storeName string, + storeLabelSets []labels.Labels, closeSeries context.CancelFunc, cl storepb.Store_SeriesClient, shardMatcher *storepb.ShardMatcher, applySharding bool, emptyStreamResponses prometheus.Counter, - ) respSet { bufferedResponses := []*storepb.SeriesResponse{} bufferedResponsesMtx := &sync.Mutex{} @@ -373,7 +374,8 @@ func newLazyRespSet( respSet := &lazyRespSet{ frameTimeout: frameTimeout, cl: cl, - st: st, + storeName: storeName, + storeLabelSets: storeLabelSets, closeSeries: closeSeries, span: span, ctx: ctx, @@ -383,7 +385,7 @@ func newLazyRespSet( shardMatcher: shardMatcher, } - go func(st Client, l *lazyRespSet) { + go func(st string, l *lazyRespSet) { bytesProcessed := 0 seriesStats := &storepb.SeriesStatsCounter{} @@ -409,7 +411,7 @@ func newLazyRespSet( select { case <-l.ctx.Done(): - err := errors.Wrapf(l.ctx.Err(), "failed to receive any data from %s", st.String()) + err := errors.Wrapf(l.ctx.Err(), "failed to receive any data from %s", st) l.span.SetTag("err", err.Error()) l.bufferedResponsesMtx.Lock() @@ -434,9 +436,9 @@ func newLazyRespSet( // Most likely the per-Recv timeout has been reached. // There's a small race between canceling and the Recv() // but this is most likely true. - rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, st.String()) + rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, st) } else { - rerr = errors.Wrapf(err, "receive series from %s", st.String()) + rerr = errors.Wrapf(err, "receive series from %s", st) } l.span.SetTag("err", rerr.Error()) @@ -478,7 +480,7 @@ func newLazyRespSet( return } } - }(st, respSet) + }(storeName, respSet) return respSet } @@ -552,7 +554,8 @@ func newAsyncRespSet(ctx context.Context, seriesCtx, span, frameTimeout, - st, + st.String(), + st.LabelSets(), closeSeries, cl, shardMatcher, diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go index eaa96f1ede6..c1f4b9b8bfe 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -14,6 +14,7 @@ import ( "github.com/gogo/protobuf/types" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" + "google.golang.org/grpc/codes" "github.com/thanos-io/thanos/pkg/store/labelpb" ) @@ -51,6 +52,16 @@ func NewHintsSeriesResponse(hints *types.Any) *SeriesResponse { } } +func GRPCCodeFromWarn(warn string) codes.Code { + if strings.Contains(warn, "rpc error: code = ResourceExhausted") { + return codes.ResourceExhausted + } + if strings.Contains(warn, "rpc error: code = Code(422)") { + return 422 + } + return codes.Unknown +} + type emptySeriesSet struct{} func (emptySeriesSet) Next() bool { return false } diff --git a/vendor/modules.txt b/vendor/modules.txt index 33174cb5a8c..eb936cc0252 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -259,12 +259,14 @@ github.com/dustin/go-humanize # github.com/edsrzf/mmap-go v1.1.0 ## explicit; go 1.17 github.com/edsrzf/mmap-go -# github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd -## explicit; go 1.15 -github.com/efficientgo/tools/core/pkg/errcapture -github.com/efficientgo/tools/core/pkg/logerrcapture -github.com/efficientgo/tools/core/pkg/merrors -github.com/efficientgo/tools/core/pkg/testutil +# github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 +## explicit; go 1.17 +github.com/efficientgo/core/errcapture +github.com/efficientgo/core/errors +github.com/efficientgo/core/logerrcapture +github.com/efficientgo/core/merrors +github.com/efficientgo/core/testutil +github.com/efficientgo/core/testutil/internal # github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb ## explicit github.com/facette/natsort @@ -535,7 +537,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.37 +# github.com/minio/minio-go/v7 v7.0.45 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials @@ -667,7 +669,7 @@ github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/exporter-toolkit v0.7.1 +# github.com/prometheus/exporter-toolkit v0.7.3 ## explicit; go 1.14 github.com/prometheus/exporter-toolkit/web # github.com/prometheus/procfs v0.8.0 @@ -766,7 +768,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 +# github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 ## explicit; go 1.18 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp @@ -776,7 +778,7 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing -# github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d +# github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader From 06556f78fe6e0afe9579b93a868958ea9e2f95f0 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Thu, 15 Dec 2022 15:16:35 -0800 Subject: [PATCH 019/133] Fix tracing.NewRandomRatioBased panic (#5044) * fix NewRandomRatioBased panic Signed-off-by: Alan Protasio * changelog Signed-off-by: Alan Protasio * make lint happy Signed-off-by: Alan Protasio * adding back xray propagator Signed-off-by: Alan Protasio * rename the sampler Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/tracing/sampler/sampling.go | 32 ++++++++------ pkg/tracing/sampler/sampling_test.go | 64 +++++++++++++--------------- pkg/tracing/tracing.go | 17 ++++---- 4 files changed, 56 insertions(+), 58 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7ce27ee2ce..e413dc7ac0b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ * [FEATURE] Query Frontend/Scheduler: Add a new counter metric `cortex_request_queue_requests_total` for total requests going to queue. #5030 * [FEATURE] Build ARM docker images. #5041 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 +* [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 ## 1.14.0 2022-12-02 diff --git a/pkg/tracing/sampler/sampling.go b/pkg/tracing/sampler/sampling.go index 44bc60c612d..068f48a1e83 100644 --- a/pkg/tracing/sampler/sampling.go +++ b/pkg/tracing/sampler/sampling.go @@ -1,39 +1,43 @@ package sampler import ( + "encoding/binary" "fmt" + "math" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) -type randGenerator interface { - Float64() float64 -} - -type RandomRatioBased struct { +type xrayTraceIDRatioBased struct { sdktrace.Sampler - rnd randGenerator - fraction float64 + max uint64 } -// NewRandomRatioBased creates a sampler based on random number. +// NewXrayTraceIDRatioBased creates a sampler based on random number. // fraction parameter should be between 0 and 1 where: // fraction >= 1 it will always sample // fraction <= 0 it will never sample -func NewRandomRatioBased(fraction float64, rnd randGenerator) sdktrace.Sampler { +func NewXrayTraceIDRatioBased(fraction float64) sdktrace.Sampler { if fraction >= 1 { return sdktrace.AlwaysSample() } else if fraction <= 0 { return sdktrace.NeverSample() } - return &RandomRatioBased{rnd: rnd, fraction: fraction} + return &xrayTraceIDRatioBased{max: uint64(fraction * math.MaxUint64)} } -func (s *RandomRatioBased) ShouldSample(p sdktrace.SamplingParameters) sdktrace.SamplingResult { +func (s *xrayTraceIDRatioBased) ShouldSample(p sdktrace.SamplingParameters) sdktrace.SamplingResult { + // The default otel sampler pick the first 8 bytes to make the sampling decision and this is a problem to + // xray case as the first 4 bytes on the xray traceId is the time of the original request and the random part are + // the 12 last bytes, and so, this sampler pick the last 8 bytes to make the sampling decision. + // Xray Trace format: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html + // Xray Id Generator: https://github.com/open-telemetry/opentelemetry-go-contrib/blob/54f0bc5c0fd347cd6db9b7bc14c9f0c00dfcb36b/propagators/aws/xray/idgenerator.go#L58-L63 + // Ref: https://github.com/open-telemetry/opentelemetry-go/blob/7a60bc785d669fa6ad26ba70e88151d4df631d90/sdk/trace/sampling.go#L82-L95 + val := binary.BigEndian.Uint64(p.TraceID[8:16]) psc := trace.SpanContextFromContext(p.ParentContext) - shouldSample := s.rnd.Float64() < s.fraction + shouldSample := val < s.max if shouldSample { return sdktrace.SamplingResult{ Decision: sdktrace.RecordAndSample, @@ -46,6 +50,6 @@ func (s *RandomRatioBased) ShouldSample(p sdktrace.SamplingParameters) sdktrace. } } -func (s *RandomRatioBased) Description() string { - return fmt.Sprintf("RandomRatioBased{%g}", s.fraction) +func (s *xrayTraceIDRatioBased) Description() string { + return fmt.Sprintf("xrayTraceIDRatioBased{%v}", s.max) } diff --git a/pkg/tracing/sampler/sampling_test.go b/pkg/tracing/sampler/sampling_test.go index ff283f55cbe..15e27704a0f 100644 --- a/pkg/tracing/sampler/sampling_test.go +++ b/pkg/tracing/sampler/sampling_test.go @@ -2,24 +2,15 @@ package sampler import ( "context" - "math/rand" "testing" "github.com/stretchr/testify/require" + "go.opentelemetry.io/contrib/propagators/aws/xray" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) -type mockGenerator struct { - mockedValue float64 -} - -func (g *mockGenerator) Float64() float64 { - return g.mockedValue -} - func Test_ShouldSample(t *testing.T) { - traceID, _ := trace.TraceIDFromHex("4bf92f3577b34da6a3ce929d0e0e4736") parentCtx := trace.ContextWithSpanContext( context.Background(), trace.NewSpanContext(trace.SpanContextConfig{ @@ -27,43 +18,41 @@ func Test_ShouldSample(t *testing.T) { }), ) + generator := xray.NewIDGenerator() + tests := []struct { - name string - samplingDecision sdktrace.SamplingDecision - fraction float64 - generator randGenerator + name string + fraction float64 }{ { - name: "should always sample", - samplingDecision: sdktrace.RecordAndSample, - fraction: 1, - generator: rand.New(rand.NewSource(rand.Int63())), + name: "should always sample", + fraction: 1, }, { - name: "should nerver sample", - samplingDecision: sdktrace.Drop, - fraction: 0, - generator: rand.New(rand.NewSource(rand.Int63())), + name: "should nerver sample", + fraction: 0, }, { - name: "should sample when fraction is above generated", - samplingDecision: sdktrace.RecordAndSample, - fraction: 0.5, - generator: &mockGenerator{0.2}, + name: "should sample 50%", + fraction: 0.5, }, { - name: "should not sample when fraction is not above generated", - samplingDecision: sdktrace.Drop, - fraction: 0.5, - generator: &mockGenerator{0.8}, + name: "should sample 10%", + fraction: 0.1, + }, + { + name: "should sample 1%", + fraction: 0.01, }, } + totalIterations := 10000 for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := NewRandomRatioBased(tt.fraction, tt.generator) - for i := 0; i < 100; i++ { - + totalSampled := 0 + s := NewXrayTraceIDRatioBased(tt.fraction) + for i := 0; i < totalIterations; i++ { + traceID, _ := generator.NewIDs(context.Background()) r := s.ShouldSample( sdktrace.SamplingParameters{ ParentContext: parentCtx, @@ -71,9 +60,14 @@ func Test_ShouldSample(t *testing.T) { Name: "test", Kind: trace.SpanKindServer, }) - - require.Equal(t, tt.samplingDecision, r.Decision) + if r.Decision == sdktrace.RecordAndSample { + totalSampled++ + } } + + tolerance := 0.1 + expected := tt.fraction * float64(totalIterations) + require.InDelta(t, expected, totalSampled, expected*tolerance) }) } } diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index bdd34d45ea8..c562d771cf1 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -4,17 +4,15 @@ import ( "context" "flag" "fmt" - "math/rand" "strings" - "time" "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/weaveworks/common/tracing" + "go.opentelemetry.io/contrib/propagators/aws/xray" "google.golang.org/grpc/credentials" "github.com/opentracing/opentracing-go" - "go.opentelemetry.io/contrib/propagators/aws/xray" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" @@ -110,10 +108,10 @@ func SetupTracing(ctx context.Context, name string, c Config) (func(context.Cont return nil, fmt.Errorf("creating tracing resource: %w", err) } - tracerProvider := newTraceProvider(r, c, exporter) + propagator, tracerProvider := newTraceProvider(r, c, exporter) bridge, wrappedProvider := migration.NewCortexBridgeTracerWrapper(tracerProvider.Tracer("github.com/cortexproject/cortex/cmd/cortex")) - bridge.SetTextMapPropagator(propagation.TraceContext{}) + bridge.SetTextMapPropagator(propagator) opentracing.SetGlobalTracer(bridge) otel.SetTracerProvider(wrappedProvider) @@ -125,21 +123,22 @@ func SetupTracing(ctx context.Context, name string, c Config) (func(context.Cont }, nil } -func newTraceProvider(r *resource.Resource, c Config, exporter *otlptrace.Exporter) *sdktrace.TracerProvider { +func newTraceProvider(r *resource.Resource, c Config, exporter *otlptrace.Exporter) (propagation.TextMapPropagator, *sdktrace.TracerProvider) { options := []sdktrace.TracerProviderOption{ sdktrace.WithBatcher(exporter), sdktrace.WithResource(r), } - + var propagator propagation.TextMapPropagator = propagation.TraceContext{} switch strings.ToLower(c.Otel.ExporterType) { case "awsxray": options = append(options, sdktrace.WithIDGenerator(xray.NewIDGenerator())) - options = append(options, sdktrace.WithSampler(sdktrace.ParentBased(sampler.NewRandomRatioBased(c.Otel.SampleRatio, rand.New(rand.NewSource(time.Now().Unix())))))) + options = append(options, sdktrace.WithSampler(sdktrace.ParentBased(sampler.NewXrayTraceIDRatioBased(c.Otel.SampleRatio)))) + propagator = xray.Propagator{} default: options = append(options, sdktrace.WithSampler(sdktrace.ParentBased(sdktrace.TraceIDRatioBased(c.Otel.SampleRatio)))) } - return sdktrace.NewTracerProvider(options...) + return propagator, sdktrace.NewTracerProvider(options...) } func newResource(ctx context.Context, target string, detectors []resource.Detector) (*resource.Resource, error) { From 6cb8a34cdb620f4aa8d4c124c118c79d7d659cb1 Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Fri, 16 Dec 2022 18:12:02 +0100 Subject: [PATCH 020/133] Remove flapping test in tracing sampling (#5047) Signed-off-by: Friedrich Gonzalez Signed-off-by: Friedrich Gonzalez Signed-off-by: Alex Le --- pkg/tracing/sampler/sampling_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/tracing/sampler/sampling_test.go b/pkg/tracing/sampler/sampling_test.go index 15e27704a0f..892392ab2f6 100644 --- a/pkg/tracing/sampler/sampling_test.go +++ b/pkg/tracing/sampler/sampling_test.go @@ -40,10 +40,6 @@ func Test_ShouldSample(t *testing.T) { name: "should sample 10%", fraction: 0.1, }, - { - name: "should sample 1%", - fraction: 0.01, - }, } totalIterations := 10000 From 1f060d631de81ba0c6a6e879b4011e381f548055 Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Fri, 16 Dec 2022 22:41:27 +0100 Subject: [PATCH 021/133] Fix opsgenie validation (#5045) * Validate OpsGenie alertmanager configuration Signed-off-by: Friedrich Gonzalez * Update changelog Signed-off-by: Friedrich Gonzalez * Validate global config too Signed-off-by: Friedrich Gonzalez * fix pr number in changelog Signed-off-by: Friedrich Gonzalez Signed-off-by: Friedrich Gonzalez Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/alertmanager/api.go | 18 ++++++++++++++++++ pkg/alertmanager/api_test.go | 31 +++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e413dc7ac0b..4e50a507f75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # Changelog ## master / unreleased +* [CHANGE] Alertmanager: Local file disclosure vulnerability in OpsGenie configuration has been fixed. #5045 * [ENHANCEMENT] Update Go version to 1.19.3. #4988 * [ENHANCEMENT] Querier: limit series query to only ingesters if `start` param is not specified. #4976 * [ENHANCEMENT] Query-frontend/scheduler: add a new limit `frontend.max-outstanding-requests-per-tenant` for configuring queue size per tenant. Started deprecating two flags `-query-scheduler.max-outstanding-requests-per-tenant` and `-querier.max-outstanding-requests-per-tenant`, and change their value default to 0. Now if both the old flag and new flag are specified, the old flag's queue size will be picked. #5005 diff --git a/pkg/alertmanager/api.go b/pkg/alertmanager/api.go index 3ed63a6e4db..2389e8f2aad 100644 --- a/pkg/alertmanager/api.go +++ b/pkg/alertmanager/api.go @@ -45,6 +45,7 @@ var ( errTLSFileNotAllowed = errors.New("setting TLS ca_file, cert_file and key_file is not allowed") errSlackAPIURLFileNotAllowed = errors.New("setting Slack api_url_file and global slack_api_url_file is not allowed") errVictorOpsAPIKeyFileNotAllowed = errors.New("setting VictorOps api_key_file is not allowed") + errOpsGenieAPIKeyFileNotAllowed = errors.New("setting OpsGenie api_key_file is not allowed") ) // UserConfig is used to communicate a users alertmanager configs @@ -336,6 +337,11 @@ func validateAlertmanagerConfig(cfg interface{}) error { return err } + case reflect.TypeOf(config.OpsGenieConfig{}): + if err := validateOpsGenieConfig(v.Interface().(config.OpsGenieConfig)); err != nil { + return err + } + case reflect.TypeOf(commoncfg.TLSConfig{}): if err := validateReceiverTLSConfig(v.Interface().(commoncfg.TLSConfig)); err != nil { return err @@ -426,12 +432,24 @@ func validateReceiverTLSConfig(cfg commoncfg.TLSConfig) error { // validateGlobalConfig validates the Global config and returns an error if it contains // settings now allowed by Cortex. func validateGlobalConfig(cfg config.GlobalConfig) error { + if cfg.OpsGenieAPIKeyFile != "" { + return errOpsGenieAPIKeyFileNotAllowed + } if cfg.SlackAPIURLFile != "" { return errSlackAPIURLFileNotAllowed } return nil } +// validateOpsGenieConfig validates the OpsGenie config and returns an error if it contains +// settings now allowed by Cortex. +func validateOpsGenieConfig(cfg config.OpsGenieConfig) error { + if cfg.APIKeyFile != "" { + return errOpsGenieAPIKeyFileNotAllowed + } + return nil +} + // validateSlackConfig validates the Slack config and returns an error if it contains // settings now allowed by Cortex. func validateSlackConfig(cfg config.SlackConfig) error { diff --git a/pkg/alertmanager/api_test.go b/pkg/alertmanager/api_test.go index 31367918fd4..981f81b3101 100644 --- a/pkg/alertmanager/api_test.go +++ b/pkg/alertmanager/api_test.go @@ -371,6 +371,23 @@ alertmanager_config: | `, err: errors.Wrap(errOAuth2SecretFileNotAllowed, "error validating Alertmanager config"), }, + { + name: "Should return error if global opsgenie_api_key_file is set", + cfg: ` +alertmanager_config: | + global: + opsgenie_api_key_file: /secrets + + receivers: + - name: default-receiver + webhook_configs: + - url: http://localhost + + route: + receiver: 'default-receiver' +`, + err: errors.Wrap(errOpsGenieAPIKeyFileNotAllowed, "error validating Alertmanager config"), + }, { name: "Should return error if global slack_api_url_file is set", cfg: ` @@ -402,6 +419,20 @@ alertmanager_config: | `, err: errors.Wrap(errSlackAPIURLFileNotAllowed, "error validating Alertmanager config"), }, + { + name: "Should return error if OpsGenie api_key_file is set", + cfg: ` +alertmanager_config: | + receivers: + - name: default-receiver + opsgenie_configs: + - api_key_file: /secrets + + route: + receiver: 'default-receiver' +`, + err: errors.Wrap(errOpsGenieAPIKeyFileNotAllowed, "error validating Alertmanager config"), + }, { name: "Should return error if VictorOps api_key_file is set", cfg: ` From bcdfd6c697d3a038f2d0e760a5f4773a3c737f69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 14:00:16 -0800 Subject: [PATCH 022/133] Bump golang.org/x/net from 0.2.0 to 0.4.0 (#5054) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.2.0 to 0.4.0. - [Release notes](https://github.com/golang/net/releases) - [Commits](https://github.com/golang/net/compare/v0.2.0...v0.4.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 6 +- go.sum | 14 +- vendor/golang.org/x/net/http2/hpack/encode.go | 5 + vendor/golang.org/x/net/http2/server.go | 52 +- vendor/golang.org/x/net/http2/transport.go | 78 +- .../x/net/publicsuffix/data/children | Bin 0 -> 2876 bytes .../golang.org/x/net/publicsuffix/data/nodes | Bin 0 -> 48280 bytes .../golang.org/x/net/publicsuffix/data/text | 1 + vendor/golang.org/x/net/publicsuffix/list.go | 36 +- vendor/golang.org/x/net/publicsuffix/table.go | 10552 +--------------- .../golang.org/x/sys/execabs/execabs_go119.go | 8 +- .../x/sys/windows/syscall_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 7 + .../golang.org/x/text/unicode/bidi/trieval.go | 12 - vendor/modules.txt | 6 +- 15 files changed, 185 insertions(+), 10593 deletions(-) create mode 100644 vendor/golang.org/x/net/publicsuffix/data/children create mode 100644 vendor/golang.org/x/net/publicsuffix/data/nodes create mode 100644 vendor/golang.org/x/net/publicsuffix/data/text diff --git a/go.mod b/go.mod index 63b2f0e7b94..bf512659505 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,7 @@ require ( go.opentelemetry.io/otel/sdk v1.11.2 go.opentelemetry.io/otel/trace v1.11.2 go.uber.org/atomic v1.10.0 - golang.org/x/net v0.2.0 + golang.org/x/net v0.4.0 golang.org/x/sync v0.0.0-20220907140024-f12130a52804 golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 google.golang.org/grpc v1.51.0 @@ -201,8 +201,8 @@ require ( golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect golang.org/x/tools v0.1.12 // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect google.golang.org/api v0.97.0 // indirect diff --git a/go.sum b/go.sum index 7a6933a402c..36e9dd3e744 100644 --- a/go.sum +++ b/go.sum @@ -1181,8 +1181,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1319,12 +1319,12 @@ golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1334,8 +1334,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 6886dc163cb..46219da2b01 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -116,6 +116,11 @@ func (e *Encoder) SetMaxDynamicTableSize(v uint32) { e.dynTab.setMaxSize(v) } +// MaxDynamicTableSize returns the current dynamic header table size. +func (e *Encoder) MaxDynamicTableSize() (v uint32) { + return e.dynTab.maxSize +} + // SetMaxDynamicTableSizeLimit changes the maximum value that can be // specified in SetMaxDynamicTableSize to v. By default, it is set to // 4096, which is the same size of the default dynamic header table diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index d8a17aa9b02..4eb7617fa0d 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -98,6 +98,19 @@ type Server struct { // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a @@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +func (s *Server) maxDecoderHeaderTableSize() uint32 { + if v := s.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (s *Server) maxEncoderHeaderTableSize() uint32 { + if v := s.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + // maxQueuedControlFrames is the maximum number of control frames like // SETTINGS, PING and RST_STREAM that will be queued for writing before // the connection is closed to prevent memory exhaustion attacks. @@ -394,7 +421,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { advMaxStreams: s.maxConcurrentStreams(), initialStreamSendWindowSize: initialWindowSize, maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -424,12 +450,13 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) fr := NewFramer(sc.bw, c) if s.CountError != nil { fr.countError = s.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr @@ -559,9 +586,9 @@ type serverConn struct { streams map[uint32]*stream initialStreamSendWindowSize int32 maxFrameSize int32 - headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + canonHeaderKeysSize int // canonHeader keys size in bytes writingFrame bool // started writing a frame (on serve goroutine or separate) writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush @@ -740,6 +767,13 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } +// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size +// of the entries in the canonHeader cache. +// This should be larger than the size of unique, uncommon header keys likely to +// be sent by the peer, while not so high as to permit unreasonable memory usage +// if the peer sends an unbounded number of unique header keys. +const maxCachedCanonicalHeadersKeysSize = 2048 + func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() buildCommonHeaderMapsOnce() @@ -755,14 +789,10 @@ func (sc *serverConn) canonicalHeader(v string) string { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) - // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of - // entries in the canonHeader cache. This should be larger than the number - // of unique, uncommon header keys likely to be sent by the peer, while not - // so high as to permit unreasonable memory usage if the peer sends an unbounded - // number of unique header keys. - const maxCachedCanonicalHeaders = 32 - if len(sc.canonHeader) < maxCachedCanonicalHeaders { + size := 100 + len(v)*2 // 100 bytes of map overhead + key + value + if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize { sc.canonHeader[v] = cv + sc.canonHeaderKeysSize += size } return cv } @@ -864,6 +894,7 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) @@ -1661,7 +1692,6 @@ func (sc *serverConn) processSetting(s Setting) error { } switch s.ID { case SettingHeaderTableSize: - sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case SettingEnablePush: sc.pushEnabled = s.Val != 0 diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 46dda4dc316..30f706e6cb8 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -118,6 +118,28 @@ type Transport struct { // to mean no limit. MaxHeaderListSize uint32 + // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the + // initial settings frame. It is the size in bytes of the largest frame + // payload that the sender is willing to receive. If 0, no setting is + // sent, and the value is provided by the peer, which should be 16384 + // according to the spec: + // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2. + // Values are bounded in the range 16k to 16M. + MaxReadFrameSize uint32 + + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // StrictMaxConcurrentStreams controls whether the server's // SETTINGS_MAX_CONCURRENT_STREAMS should be respected // globally. If false, new TCP connections are created to the @@ -171,6 +193,19 @@ func (t *Transport) maxHeaderListSize() uint32 { return t.MaxHeaderListSize } +func (t *Transport) maxFrameReadSize() uint32 { + if t.MaxReadFrameSize == 0 { + return 0 // use the default provided by the peer + } + if t.MaxReadFrameSize < minMaxFrameSize { + return minMaxFrameSize + } + if t.MaxReadFrameSize > maxFrameSize { + return maxFrameSize + } + return t.MaxReadFrameSize +} + func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } @@ -293,10 +328,11 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -681,6 +717,20 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } +func (t *Transport) maxDecoderHeaderTableSize() uint32 { + if v := t.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (t *Transport) maxEncoderHeaderTableSize() uint32 { + if v := t.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } @@ -721,15 +771,19 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + if t.maxFrameReadSize() != 0 { + cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) + } if t.CountError != nil { cc.fr.countError = t.CountError } - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + maxHeaderTableSize := t.maxDecoderHeaderTableSize() + cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.peerMaxHeaderTableSize = initialHeaderTableSize if t.AllowHTTP { cc.nextStreamID = 3 @@ -744,9 +798,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro {ID: SettingEnablePush, Val: 0}, {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } + if max := t.maxFrameReadSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + if maxHeaderTableSize != initialHeaderTableSize { + initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize}) + } cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) @@ -2773,8 +2833,10 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc.cond.Broadcast() cc.initialWindowSize = s.Val + case SettingHeaderTableSize: + cc.henc.SetMaxDynamicTableSize(s.Val) + cc.peerMaxHeaderTableSize = s.Val default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. cc.vlogf("Unhandled Setting: %v", s) } return nil diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children new file mode 100644 index 0000000000000000000000000000000000000000..1038c561ade4683e91b37e8298fc5ead7c306779 GIT binary patch literal 2876 zcmWO8`CAib0svrUzRb+`nSk6zK|{VtNTRiJd7yyG8JU16mRliLD^_b=t;%||>na8l zP!yF%fdB@t+Ui#Avu#}!)CeZrS5%ZMplz*Iv9&5~*B{>h;dOCwadCeq;GIS9q`Z^& z4zVS!huE^`0kP%QL!#hTKe0dV5pkd}k|?!C6Nl&oqRgryj?$?_d0`G=rZt2)emhZX z-9en7jfADpL|Ci`i8}faai*}0IAc9YoTX2R&&HotpS7M5e;NNJao+kBaiQ><_=2^8 zxJch1F2>u5ONGtEC2I%qt+kW*&U&Bt!TN}}690_2YJE;zx4sqEGeBIQz$5DSQiP46 z346kOMDyNYqMeyTbTF|*XM&RGx}8MyGpQt*u$=@DVW1RXU~nZTDBVa8s31KJv7}dH zBIym6lHSS`(z|gP>8ng7eGQqUP?<$eHK@s{jhpc_xP=T*Zp8tHe~|%=yGSwoHz`r> z)<_JcSPBnfsj`ez7!GR`jVH+&@Dv%`cn*ia+c-qoh(iobI27K&p-MXrH8hiA?+&y|}@M@D2V1e1j9<8#Y&blbeWd+C1_t-LV zFPDvbjVnn9e-(BZ^RUCF!FO$1e2@DG-!tapd$u+BKKC)cZ(N7__@9t{+^1xpZ3BK_ z+^BiTZN?961>`V)8y?}C@Ca9iM~sK@DE|l^HJ0O1+cErze;hwDR^UgrD*Tvl#*evb z^0Bc7|IF3mpN(JPpKV{`C;ao|6Yc_jV*C$&V*3XF!oP@r;V$8){LA<$_h0g<@jLPv z|9kS8?F#uTca40(uP4WhjpT3q7V>v~7x}x*LB6)#C*N?7@EhZgCI~j&0$~Cx8>VVw!|d%~wxAQtHFbe- z!9%dvKG>A@uAl4OuxMFt@*X#=tTqgl#u|G&m!hma509ElUken0(Qe4A9O41^* zym>KL(aeFg;#82@KJFwSYKQPHo9H~8wqhMdMH`rIA02L+E*-E#6u$9T1*vgX6*vgj8Y?a#< zwkmlmTUAoPR<-;SnBBGkbMki9T(X0$F4@V}xb0$VN_Mj~Ero1t@?N&Kq=>C;*#|7i zMsTvE6r3(O#&d6}m3X+vNIX(vB_0RjBpz+?JkPcSo_8C^Qyoait;Ej8= z@c!=nmEv{&O$k=uMdO=r+-qkyl^6me1$6X8Kq1|gj8iuh`!2qi@qvt zD`oL5pw9FhpuXujMSXw7MqTasiE8$JOSPqkQ9bF4sRu_hsnJQBsh=j5Q_m-z);~|b zNsS%7MUC~gP`~%KQhyx1PmT8uQGfQ1QGchuqUqj0X(q#uM#8D|1fhf$2<3r-j3C<0 z5ll}ME}$otN6_w$DB80;hV~LB(q0Y~?JZnNPjaNtLSZgFIU|qubLeURjP$^7w=>?-ckWN6n~%?+Tm9zH?b#7@ zXLcOjdpwDD_^k?bWakAsj;rarej55OKV9Ho*?$E7b^JBslKn>JQb8~-eI!HV02%2| z$$&qUfeL|)m*d9pDm-MoK2I5)<0Yf}Cd-%{KN(XoRR;a1$zVk!2=9l1)T!@NY+X-;H1`;(b2(Nd->H-+gk zFOzlkFK4<%sZ4k73Z~oq0n^=|ChN&fXL`(;OizCn(<{oB_2%X<3z;Ev5i^{-j~O->Ll;qr{M`oRE(3&|2mo>-j|YhX z3QnwM$KsKCQt%ZKoA40!@ zPoRImdMK*?6sq!e!VGaR%uwgSj6pTb5^G_WdNo{GlMmJ6&2qJRH`I#vKz)q~>IaX& zj|Pvz{2DX-c<>}#J9r+h6JLbu)R*7}@nyJS@Fv`(zAfKW(++pmbjWuOzJZ^M-@-4% zT<`gC~Y|LJQsx>of=8Da~Pc23Nu}8Vfv&>)<(j8lJ}&;Q65|@XF9N z_&+=buWDYxtF^D;b^Hci*Sf%ZmVoucJlMc8u;B!R4Z{=Q4VDjYX$7!}^?^-V3AAaY zuw{5AY#&}A?_d*PhgJzYhL=ExmV=HHYUmi&z`Lv#KG5pmL+xj9h%JCaS_2%>7Qr{{ zVK}aRj7au5;yIy$(s?N;i;seG`Xbsn2|=A7nqm5d_}q!P)U)ktAE zfu`$Dq8XAiXom9~de3qm&D4E^f+Uwwkn=hUw%kA=Ix7m5G@($Z9fj#y(QHXOn(gdD zb1c1RuI?V1CwYM8IR{X2h9V^|P^9xEiqgG83nj17LgzSI zWceE{){`h&N}=cxh+^vaC|2)=V(UCmoZb_~kNBVjsUK2G{ZXPc043_>XsKR-mexh0 zl#wX3LK=h8q$wy(pMlawGEurT8)fQqP?lbgvPbgKO8t7YYUDGd)^9_ax;;oI-G_Ag Z1B%rnr6^x|0)0GUL2F0Oqfew4{}2BS2s{7) literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes new file mode 100644 index 0000000000000000000000000000000000000000..34751cd5b9d1b14c3a0977e4fa4b04db058b241d GIT binary patch literal 48280 zcmaH!2Xs``*2m9GGBc?V0-=T?iYSVxfMUZtbEnr#dJh5?RIDhrr;tup#4a`z6&1yT zVja>GniRXJ*q+b+?A`CTPk2UM?|qB4_Bv&k|K4Yxa?iaJlqxv8RVnAhWTmPq>`ImF zN)ke;Jk+67WgWf#E9&VT(xg;@k)o1}309S~=N5-jWrG7s#s9Tf$P%SoyXurOzi3d& z&dgd$%Mr_KpsoB-+7jo`R#MffZ0^%UpSwZXT8e4&-eoFP(_>e5BdtZL;zFNNZg+>W zC+5183OeXD9`FJ(+N@M~JE+WfvT_(F&|9z}@18xvrhIXFN((+_ea?XCXxs@vUeMvff|nREqPdT1qMS z;lD4aQWBrFf_@KT@vrEpXirtCiE&_Y9)w#3Uc`ch4Pd#wUZriGhz(;LF1T)Yt8^n5 z7{%vM+IvZxQogf$REA--!jgj1CHHly{>Euk12<4D8il^1QBB0}Hygy+Np(U8FfK)D zX&IQkA)m@NhNIMc*;Lt^Z*wVCz06btjp?=U9ow!3Cd$y~DM3|W8<<@^=yOl%MwSWOzy zs5uBgmG9KiTVyKho`_ZD7^i|D_#S~UP9-FjO<1eC9tHkYR70cGQ@ZG>dF@I6n- zcm=!0D{kU)l!MZb@Hvwh0OvR_&z52%2?QHz64m3k%a&$dNJtuS#Y*&*IW~g}?Hfpwj zCH9m}s>pixg3r4bmhk3eHFj6Ho`AXOaHqRrE)jjBlE8Njs@y?H{ALWfZZH{??_eo@ zqE#K3nBYtNdSSb6tW&`AhIs79|UzS~V9y zRdfC75F_AHha?U`!e5D@!tDSr{?J3#UE0W^HJ@tv%oKH~aY(zG;iAND=Gy2boDTul^^#8=zxgJdDSVr%?~`VAy6cy2oH>cA=>$+w z0+8lefHW_{xOvT1$UQOu>xJcWa>H$<3G|v z&h+%JRcE%uq~nqX$enHKY}eCNi_dANn$b!~uB2p~0U-Yz07YB9jODWs;}ZO>sc_@l zg>~v2!vIV8&3eNAJ-y*K(H9=jNMhcbg2ZC@%AO(yl|43A$D=4kciYLFH}TCs!L82k zn&4FzxK^6naywF5;zTY8)m{xamVLYE+qpgkDdti3eyVN}6Gf3jG zT9Vyycxh;umR|={|c^IwLrYwS#iX+1JoEIVzMeC3? z4mZ_BovUiq#pbu|bc9;fd?Tj?%{L)de6mf=-;A|OTDLAzmpXH;>N4XPdV^QtrLzt# z!HMWAm0T;`&hQj367mmvW9i+>nm7>V_>yOrToQ)vB)Wmv*Qt5~(ga&h1kF zFzj^r9_>+A#>!jORb{!2czLFu%(kJvygy@JFdpBWx#U4!7Qrr&kts_~Rad<^0pJlG zgZt`mbzrhu;9{he>&{M$cOx766lQm318q+d7te0mJg;=B1)GaNP%x_=eZ!^WYxulJ zBI})kQtvSiRU67=O1eK!rg~N?mALX|O!s1^L&MGVfwkZ%~qeaHE*TSTdR9Zvh^ zn5r%@A2H87VR@BQ@gJBZ1oi%Z0kZf8KOM5(2cE;4^KO&jIRf=X(*Yhl)sF1KI;>p? zNY793MP|U~`N@Xt*pYp*y9Q7)E(f!BT)nDy52#i3Z3F65gOP5f zBONWqKx~9h0fuu4HhfRFsD{K?dfij%Rb$V>TGeC>K|$H^053ZpYi2)eRBgAbro3uVe?951p^l!%fHT1dybcCk3k?`Sy_i_f$ zcQH094l~u##1vfclSs-~)uNV#c7Uq#8Q~j@AK~Hfx%V!jG?KW4CD>tqH>9!wa;}j2 zF14)nuw=E|dmf6H6fxNzo;dbPqCw5k<}<@i=`J@`uQ z^T7NJV#UKyP*n(U=ZT`~HB((}>k?M9c+h{DzRD&P1t2>N-lq%+c*?87eYQ|{4x+9U}A_eYA zChW(d%9GiFA5Tj4h9tGdC~m<)c@Bv9Y>>((a_wG$ab_!%{e1LA&aZ=9RHxP?R!}Wi zfoyRA1{PcY6<`!+RLx8(Gwb@ni73>_g)0`;1cQ>B%aQu)|`y(@}r^au8gz z0?b=m)#mMFyR|i1t6Gc$T#U^3C@or!4RfxEjWKqRx*)O39mIZKn`+tIU{$ToVK5hY zz#O}hj`BfX)!Na9jj|g+5P6|awHYO~s_oUc&=PsWP3~4NQti=>TGier0V`hEt~#PU zF4bvxQB^VpEoDPxl{%=MF!>k&qtmWBTb@CmnMH=>kH#Z!Im9?W2^X>&X&Z`|zd~$S zI#gHU?poDt?y!P6mo{%j1JEvQqH5xT=SQj)vJ-n6D8*TeyAyk0E?x;f@B5lWh)^Xd zP-tT!y01;f>2F~!lEt!cS3SLwlde;2swX@iiEzxLdRh;lBd^A*ws_>R!2LDGi|(kS zR0WH>$^$vYiwzH@{OjFnOY8Bi`0+~zs=jE)nS>3})5ZW~AQsA4s@9EqIqI1!}Cgt zdNFahRqZsc#E|=pI<@P?ATCtMEQ^1(2rUseyp%N-wjlq^(kt1_v|KRsTMW`}ZZmx5`wnIm1@<+74M{BCptpp@?6{bGXsvrT>Kx)@or z)Ks4)4xr?E9c%tCP4!tK$;wjj2__33N1|%Ahm^k)%+9Nbf%Enb^||qCt@=DM*o8^i zor9}a5|`!ln#Y>t+FD)mC~M0NE$VmUKJ-;S06ybBY?Q1>QonCL8(H(@B#ScMYN2BQ zy;XORNa1OX*!Y#$*YxWU!W1vxiyzLH>n#?evKB07wOcHSwHR`5MxSAAhrDW9tch2> zV3C^>W1X8U-fp)fRbAd_NlM6iUsZro{|7#ht^{-53t}i?T2fy;tr z){@b=fvW#5Jc`SfAC6)?e6x+wzd)08OJ?3IJJop*&%R``WQy04`Qq6~__C02|D=f^ zD0V}3qagpA21~!vsa8wBgx6~6pYV#RS@cF`gLI1r zh|9L^T*+n_pFv#WGg%qpXC(bL5(c5#_#kx$CyIU;v9om-h=cOK8lv;gXF#UEco)~_E-idT(lKl)Jd?1 z!xzd$B9t3o8Z7gmsss0ptR%}2=XywDY%K`J+bu(5-_jeEqlWixH0S^9whT?YPH*)Y z$(CW8FKMBa(*U45lP$Tf*{zn`#CnZmoj8@<%y!0I%hD;S}KQuD%H#;n&Hec3c8C`yMn`MkqWwne+9D;1PgWf8h zk5tr8$y@2itz(mk>8%|YnrO2e&=pKU%Vc2q=e0wwuVZ9bhaiag?yj?p^=xuk#wN0B zLC{WM{oN}*%gQ2#CtmbBU|2X4NFs{2PgFIns*1+2-W%BLxogN%MH0BUuk z<*RynZ4S#pi5z-e6;{hQV;^vheI`rz3G}+<2Cd}zcFVX{Ih^Dl2sHC$6a?l|ihkZ; zIoLImj=+n_mV*-+lxoJ|pmUlXYe`+O%m+cib*PHG=dp};zbjMYsWzNmipkKVR;;zu zS;i+?a3L@QrNJILoB=D=2AGyZjKi#!LlPD=mp=7a_5pFcn|z zr{nNcWcPMj4vlZIu_Ic+QZ~B-@&K9xRW{3^i3plYex{nY5`6LM4$B0;JlVv%>KX2H znjp*UmI<3*hRV;onEsNS@RK$n~!)cN489IS&lM|76xjz$8uE5beH95V?eEC zvVSh3SS|FvU`_^TJw7Wul;k@s%~({7TX`36l3(2C8P&S*rZ0 zwU(+xKNLh|t;=_|(>ugu`Yp3ll>=^IQ5~g1+VV@K?VMz+_4A_OQA+ujVJ&}Q&{7=< zqs4obhw3ehAvdEc{{iWJuo1EzX6ItNrMhLbmGM;_tQPGyMm3vo27Iu@-hn0djswlr z4MeTQ2j*L;nzwQTUL_lS)u?t$jj@o9s*jo=9q<`uo291Z&RUG;(B^%+-ZH}*bEEXF zEtvca^pVFpr39A9MoO+*TUiP|1lcME?E{MEsDR?&x^1ihFGo4jn0_qEo*$H`-R4IHNvBD3e z<8z`>_$$>SM=@kL){4rJEvlBbO|TSg>xO)f(qbfO96Ds7iimq=^@OW7bWmpRc=wghWsD>EQ# z5G#9x{j%(FOqM;H2T8;r9jGeXvJ|qNu$S%H1=&Ze%0B7``S%tf!t$*!|0r$0T+Ilo zYazBcBo*26jOCC47%CrH067x%`(~4nLb@In|gBUo6KCIXD?r zi`__k*lIZ~+EZ&eEwPri(1<3>>8?9nD3CMtaGp(-lgnUzm)84as2_t2Iqr{e5M5)D z37KG3Dy^Fc=@$xe@%DFS<44*G?-@*re#+2BrV|qPX&@~=6X5x)K~+-yBJ0(lfw=Gx z9f5~@0D1*4J+(dLWMwx~=(bccC!c`n_k#t$(BbkD67y_e#9ksU`V~&>7e=AR!yvnE z>m*F0GAY^W;ad@e+<{o{tBN70cRzw5Pd2^&30^>|HgOu?ToFL#zebzB?e>Hi54q#+ zy{Fc<(*>t3Cc4Wzuq%tX@dc{fho+&=AeHrfVsII7-B+SYUt$I1NL+O@EEOG;0vDjq zWhHc#Pi(<+d2laGrZ@gz0KU_E(6W>cL*A#9#jaybJdCAOKS;Ra@D-g5i|KEI<(WFL zbf7AhpiQ5Az4EB#m!qUU@CBMts*hmNT_7!dnb7GyIl2%9o)qF|j3MQFS9D2%KaIXT z|LTF)gRB#zHyLyaNJjLvxi(VO%%iPI`*TD0{z;$QGxTLsU;1c1*N+6pD(~(zi{)^M zuGp|lZJ<;@iW-kW@*W}e47t)XhEOs&CtB+tfJMC$R4V(|b|FL~2^}~iw^lkXRVTSQ$z10kOa7Sz^9KNTK7^{k#jt37{U!BsmZXF4)nrPO>nNSo zt}+rbIqlgQanZ;v3Lazt;jFBle z^nkjnlPC?6anqUe93bO*rIF`1*C><3bO_4`I>c^P@E+oe8wa|O zXsp{GKW(i~R;NH8vyx}sq=T`c-ujQVzWLTC))z?~KmB;)lEZOM1IhZwf}GR#&Pp{n zdV;?qGZwf zpS=Z}Fkba*GI72gyZXb9sJv&7Z9u&ibB|_rSL_V3RXzzm^HV=1d1%$Xd1k<@*B9qp zR5{OQiuWa^_XU`HmzyPwzFu?krghRLv8sLykK(r?rn9Ny+(s6Kw<5GTQXoYvmhqoq zDZ3vmMk|3ZatN6FFa|zQ(*tHnJww(%{rFJW1GE05D%h38F8&@-^RY6hs+kbO#wZkA zL`=gc0n+%amA$7QM1Mcz+$SZ;tN2DdsyG?GYIhO}TI=YLS86&q#lxu17=>G<$5Sdf zCzFHHD_x)pgFat2pbB{nShK!Wjo7SLr01n~G$D3BNry)knZhj~^%Uc!?+B`{G0XB~X!O7%`iMVpNn$hD?-3+VN$U+=GU z_}rlKonk^3K+0NatCYKI=MqTKiFJhOxF$7H&qniUdP^Rs%{UoiPLt|RNcBNPqs)^8 z=FbAySU_9;#5y&yU1H#zM29h$Hs@gC5AZO@`oEbQp5M z9(e`XXgBC}Mil3EDr-XT>`$8@7=nb;Y(-Yz2kV5|GfnkSZlc!I{wv`Orh#2jr>gCC$tm$v^WW zdP5JP%5^;?{xxEe#dW0eEF{Vvrpj#!TI4px$f3hu)=FExiOGLKf<09AmfD-FZFO|a zg5+PB1XFu$j!E~SBTsayXB72h*L7g(&n6Nr6jvS?`@-8 z8yYQA9RN|XsjCxcQ!<&9z)%8135*nh(YuWa3?(p91%?tBDf^^NU?d3)B`}o0Py!=a zU?_p11cpUm=uJr-CLIDp2@EAL{^x3@%$EPBH(4!0fh>o5Ym+tS0If;e8v#YC3UZoYkE^EJoZ1b+L}9AOkM`j=LnZvm+O}_ z^0uXSfj3%&Mb0Hod4XbBTObl4Pk*WIgM_}XRR>mi+??{Rr9i059xMrqP?_EjW^B#M($PERaG$S6xBPEj&5&Y9|O2v!z@JvR%d7LZnY+f z>fu(1Yy?UIAxFR7ji6vWb_+h`*?I3g#CrF58MnT3tPIqlZ!PmF>yppyIG6?*adNks z;45xY6ZU@QCmK8&9Tqt@NQiA+>hL=bXJPCoN0N;3eom1CL(017#>Hx4io8YcKfhZ| zyvEl>b|(4M5&lX@$JqGTc?h^4Q8blU_&in<+2SVYC}I| zWCkw>sXj?3AC;tz7NvjZ6PC$;>(F9qSqsbtAazpfgp-)|&Yr%B(>oVrWiY0=Abc-6 zb+c@I?x#?oN8c;Zy{_*ddcqpzv}qX5nY6iYwNjP6BK}0JI(C=ZN>!E--h@E1VbO03xJ;)s zf-1MVAl*n+-{I-2tBT`^o+kt9n7u`dm38@34t1S!wqVhjz;q90Fr1Nmvu0AP~HoZ(JWK4@3b`XC9)|;PI%g_ ztg~SmnT=R)i1oIJTh<17G?HEIxT_C^CG$p-L7#&3Oi}X4BgA^k4xo*^kXM(tzS2f% z56Lj3qlcV)z^YCdIh1u{G~b1*)KvgaT_wv1pEasM9Rt(}?YOG?JxMk913!Mq1OA{~ zR)O5awW?&vmrMvX4ns+4T%QaYF0=6SQp+-WE%$pNZ#O|cCD_SV0VAiIxn;?7kmHviJ6J}4@JIbo|HcYr2g4^TYw6Y* zxIgsr6jhe}cm@l=-D#@q^;41P|6>mxWl~DLy%~H#rley{7Y-`?$T^-;(il2)dMCZs zUs+dv5@Atzf%o0RUP}k_SY#~^QnK9R2bBZ2a@Ge?HJvuw*UZMW6KEUS%xDb?plZ~q zOJP}#AN}PHASr;2n@bGz(emfLtwYE1Mv9NcABpJtpKSAEZ-CJyP20fb0R>6!%cj&(osh9oJyNPun;`OBk$Sw8B0fl=kZK__h0Jz9 z&TfLp7fe}}G=}z2oX%atxFtVGTkdwUZs@oHbc|?Z**uR?$f#aSaAY6sKbokiv6Awa z!ZJucMe4T%l6DITtYp1%$PI4B#6^(f8J-M-fzK#PQ5Abe@t&#w(Os(2x1p1^rIN+V z$>Q9-sj6~wvZ!)v!l0z(Z>M)p7?9VaW#mJWK6~g$k(>O~19w6`Xa?W&C>?PmNt4z< z9;A0M$m#FoKz@ZUBfST*SW4^2oQ#d2?>}z`38LW39FjqT45TA-Jq~8Rv<(DlQOH2V z`i)!)xv~(_%-wLmmB{vcAclf(Ff{Z!@b%k?f_}e%s{i@)X0-!yz(-`*pyeUR@5`9` z(~uZ?1geH^L-VlDu3>e&7KsrT5Q7nqqblPCv}Ame2l+P=8Q;;7nM5@+)ep%)bLIpv z=dMkGJPk`$8kmRvt6No7edbqH+b$#anTuHFl4}=X@*mw)A7vO7ADyX`FBxCEL0j^* zX{tKjYG=7_&E&)HcjKxi?^70!?5|N^9g@bFf4_|j@LSXI=%0w$7A;XThDA_nU5)yo zD`DyXF0t>Aan=_fSOjENza|F$5hRjcM>gwH&}UtVTUiU?lLOz37Z>Ad*6k^fwM@T} zlbcy6t|IJ76VSo|P@NK)zeJs~dqoj0d`U-6S1QAE9O?(2f)>licrPIb*q` z1sBdE*tTn4#Kv0M?3E0>zO6{Vf`QLC4;YRkftK622+ftGZT1{OGGcfa3)wRPB;KX! z7=x>h>LrkQFgxzWuHz|K9D87KybSu3abd)MB0?FPJ0V}BF)}_)_0Q^YEG5Kz}0W2lk+T*xF?>S}WA- zx4N3~P0nj+{b13Tah6YTHM@}DTz^bhomzEd4=aB(s7~!Z)sNHX3W66Uew%#Q-GRRJ zWHAo<)3RSBm9yWJs%&@JUoKat-ElNSoAZbmn%<(UwJS|^Vo`ICIw{b#NKG$$E2>W3 zd+v7S3_jLRxr*-UQRYjv%ay0-$f)wZG;eF~L4?ytYVSeBU$t|o3M9VkQNhiYpbEWp zctAy}uSr$WSK7K&-i#x`r_YHaQ<;q;&mDmAyNFfJLNYsN5^Xv2NUo7Df^<{~NJm|b z3q#s6h=T!N<`!NB^xs5vU>zNUJ|=28JzI&|3A7D+<0hOA4kN@#uOWNrD#&rXdm1H2 zgHd}}P}G7|DjwU^t@3YsW|1l^s83ZzFKt<*iW7AKb>g0GMr)W{xTK!8T%GjS>U61| z92iEH{K2A~F$(E0x}~G<4E$FX=U-L1q-tta|4YIxO5IxNUsZWT*85*z_(a0@|HAN# zY~cU38x$>l4u<|(4NEorUrbqm4T;4Zuz z+yxc^X}x^`n~3FoZnqY_!c0*kWZC?Tbytfij&I5)YiIW3DXdMOLy~W)hh=CZ_~d-m zU)~gEtwE`!nI%ZcO|<1r?CKl!%rD>@9V8@LmDZf%LcNktsx0yrz*-MccO?;s-=x~e z9*`mrQ#muNOxT}Ug0d#EXsHWfk%wTV56ALQz5n`2p1}3nFK3aIjzz5ZnO&BD{XD22 z_Vz#2w9+fj_WD6gAN>1R^h*n0j(dM^Uq_#WN-^n`=cA-)c&Tp}inz{Perv1IW zpF_*FO`oiOP1$=VXta_W?$pdab`~Q|GiTW5{wIYE zznK~!@7@P&@RNUOoh)-tr!gM#y2~mDX6wEcn7rA?$P_Z5a%8tIYNC{}nvRs+3`O?W zZR*_m<=fSHW^S0aYiS#tLRvTu3@{C(G<=3gTuKt!zGv$lWMISgElP*~TdYlmFY9G` zt&_lFlRZ@5@#v$F?K`T22hou>wg;BU!aSJV=>N8t(#a7BXBsukw;jIU*y;Lpder%O zx6&q8LuxR;*H!%$&Sd1V@{drkSk|D=aSkMdjnukw7cAN3NMxT*?+`a%EDVWqaJ4=p z@e`uoj!5aeL=FR`lT}ZR!5fpnTt-G%jAS8bk(aE>{v{8_ix0%^pm}Umw(l9sl`~l= zZp&*{7wq^MllmpAzUI^V^zo?QbxT-UxEp~m3i3kLdfnPils?2g*TNs%>cS;oEMf$I z>0pw4!}1{SH2;)Cc6pB1AxV-O>jCn?-=FeX61Xohd*3ujmgt%C*Xyi5@kN)xqMzus zguYe#-%59jzG+F^B#`YFFZr^874tz8%yS4+pO{mw;QCDJYlmX_(zEYgUL)=Oz(TH1 zWC~qkd`rpNATf|R(Rfd*Ebz~*I60)A@z^e>zOY7 z4@Dvib=y0aEU|Cr8k4&|2 z@5yOMd}f7D1|`V>p9VhU{TLmxcIthrcjecMs$8D+z2WV<75c9clEu7lRQr-kK2gva zt8(rELBVDg8J#&*Elk~N`65{d*0~e(;a37&kG;mE8}Qob{rXbQCGT^thg8`(qYqov z!IB~F_gxcICG{s(`y9OAl9!Gyd1B5#j7J{#8BP1W>>Zw`F+T8&M(*o;OnbjvTgQ#o z-V{}N{~~I+4dRD>_b!tpcM=2eC%5#(`RlSCm(5FGE?Z+Ju|FA!KiqH3l^@Fu;j-($ zYti}qN80KT>#f82I>z-Lr6+dZ-E}Xt(f{d9+Q7K!UFh#1|6tKOaY}wbMj^!L%l7`a zsjGja@(1Sqn& z!YfnxNKt=5r|(%SP9h_!ekLcYKZLLPQ>r!hBQ|3oRW*Uw_l3Q84f~tZhrq1Au^FgH zV8;{WpZz_Y);HE^cXyGUqd=APhL^J>)#(E7|aq`y*|sVHGYRU<%{?#*)=M6(W{MDb-MAl1bq5e z_If;Aikl*=PO3+lZAm#gBk zp?1dOkpCFh-?tan!Q#0JyRvI2?;_$-IR^y#gW^AqTURG!fht}O^YG(gc6e=Q)<-`E z;2PItdV2?c3|iv3Xwl!?{c#;_UG~1Y-%CB|dY5n(yp>wBZY#PEXnHHs=div+(!oiV z4;|FJG&Cc2trpYC(8s67*3uidGGzLT07=KZmr+`f2dc8)>Che-b6u3=)urA8YHr0D zz3ys5zGIr{mA5kbU56pp%!atHt@Vz1K`m)3-;_It(A&F49N2;ueVsqB6$zO=6GQ5rj`oHX1^O3rf10%;L8?c~_cXoyf(HqmFY;-fzTMZ~t@rwt?1I^0g~-Q2 zYN5%RJ&SJ}bDmkX-z|-Pz@v-kJ)H3v_TZBJ2P#LpgHjQLocnL)WzxMpjLaO4bhZbV z((&&g2hs_|K!0;G;89Xa-{K@qLyP|K#IUzu=&58Btl++B`8Pb?jGW%cRBPR;t`B|A ztkr=XvMt0~{PYra{jOm?bwmEJpt|Ah6Y2rv$L^>`Y>b*pW?KunaTxptb|u6&23{sY zshf!IfE|>q&mxg)Fh=RmHc_nyc-ppfbz@>Rv$p@y@C}k*S>=>M(ppFmNB34Vy9tR* z`J)jAFn^%r#T2+-e)`Z0uL z!0aw{bCTgvH#hW0>41Fzvc&PtZVRbfLNytPh3LrKhevXhROxb^m1C`Aec6q^+QF!j z@AYnZeWuto9O|~%8b4U%sWR;v#v|j*9(CI*0ot->ap~0|%YHl= z^m2Xow^CnqgK@~^9H?_`px(c3F;ocd;?UI6+&*Z$M;Dp&G-TfUwZ zePX%$2yD)LLYy;&;Ea?t%ykfvtddi`kw)tILIFzh{efrI7T(E~Gv*Pw^EJ|IVNMWi z@4vXA@?6is#y5*^dOf#Xi^-?xbv{M>s*Vf8vLl_2%67yySXs2bqBnmOvWC3SieAzI z^Vgj8iw&5oJ|}916+UBX3(PGkl;o3#!bwOJUBZ#BXzJDIOSsYeNfCBuW73dqv9y@X zu9VkAm5%_j{B!(}zrv#og@WpH?UXL2sw1TTYL4-I3s~fSyz+Mu`#lbpXF=+Gmgg;J zE7tTM!g2l#i%agG<&WMd*OBNeOrUSZm=yq@$hDkn9c_9wb1fm8^k1~`K0(QIZzdgw z5m;j)&iMN8fDBv?IZNPHG9@cd$->nI#2{YBjH-5Ht+WJE)tV|UxR4!+UElZ&$OOV^ z$jjoGoW6`sO2#2V{#Zaw3uEcNSTIY(J*iEQ@jg(=dg@#av!{S8)_*dka5t`c7f0xj z)1mJz7MW-W%ri2RP&x<&W-EgdzuAREvB>r?w4R-!^wV0zisH!S9fwUn;3qPbrTl1-RA13PJ(i9TbKVA z8q0{U>FFT|0F`ipESS{vJtR1Q0l&3 zG#?0aVt#5(PfvWf+ zWJ&22$QoI+1?1OafxE02{{a$T!Jx!f;79xhsXhTQEG-y65s+q!h#gK<|AN;0xFqYF zL@iiMc18oBGA?FwSUl|zY5bokmR?K*p=&4=gX9G zONB2L9%U*dMW?dv17%wo`UfTFpcZ&ICqu25oT|{&^5TQHzuy z5xJb+${XR!zk#;+;%o+QH_Xuwt|j#{cR{AEfLu*GjEGQ#Q~`5bxPfxadb z^oJ$6l~J&ro5HmE7|o6|VM(6DoE}my!}A!iA2G+S*1r9-2hAs8Xh3%uiN|5Kufv-C z5j$dONH}&ZR`&+pZ&3Gkf8VL@3+66T_jUY$KHGV0MJj5;%cDimkqUsp_F!pEqOe$27G*Dh~*18mh9Qg6~1M zTEAsLntIrqSFaxKxv^b661u`W72?O>FI>X zaX)i;z1rYCx=wA_n$)Ns^>egWS+~G4^bE+5c>#{jN3^O(ce^{(WBKz^)MKrwjXYrt z^r^=^yhV9DF}g)Pv3WNkwx)OBbSkA3!>u-kXSb=1-S;-BC-vXANP54X(6RSCxn*So z#wTLPvc4G`FQCu*nUDexfDZA3;3gaA@exEQYYR#TTuR3Pkg5S!BawTgiI<1^3FqtL z)(WqB%6)2^dTMWJliK8-2LcB{RQ7xAYE#c)jp}K4qfI@%#q_CXO0Gvib{F~v%Ae~> zI;W0D_zzOhcPp-rtVA~Hy9V{lYtuT_v%&iosb_bLY=>oJmwGOCo=-j3Gao=1Z_}m@ zwBdr>Y4tk*^!<)R>CkKG&A!yGo-aQXLn#}n+IhTDIX{EcbsDRgTMT*nZ(%V%0a@Qx zO5sUMSRFH|7G$MEhAe^{C2dorP527pK}azpSWw9tR&dJ}$o*-oC2#hyjlIlTT=cGx z52zLggp|NiT)qS{i&E(=luGYh3|Sw7Y)FGVCe@9U$}gg~Tz+U=k+mH%2$l-@)o;ah zQhFJ_iq}#h@>p6S&&1V9^j3eq1@cV~>yZ2aw&ov8*vm4vsONjqo7D@!if;A77G5SM z^zR@?9z-;XUnES1t(9f?Ei%lRLJ+;**$KqObOa-`g=Brn-%d5;V9ZM#os=HNWb|PS zc_#TF6N#yBC#7JBQvSj1sCpIk?&c=cPiw=O_fZv_H(zFMaGO?-DCdt{-U2k2|ub%gsoJQdEG) z_S)e4kf8{tQLXk#TRlp>uftMeN3%IUjp}?ahMuM){%=Y(x1%q3o`c?Z+JW1FmVjK4 zgiU(OFSoOOJV~&F*;osnOIvXfHljti8k_5%e3*8&!p; zN_DOe&7U@7{2{air@$95Iw&2FaX0g_Hw8*xS`4!lmKdL!smKYiRA*z-lS8%0Y^3e= zWMKTjgblpgN$6M|RkQ))#d8|4cCa0#d%6L1F$e;WdvWGgX^YTS6d)~%+@wWiJE#Kp z&=J}Jmgp?1)%T#U`WE=g{{gDV0#!vJEoz25 z)CgI>h=4^pKp*#^S+djE*hRGgeFb)yi(hO9%V#Ladx9xhTniTSV>g=RU|k?Hs-mKY zNKQt=Jdmnvf&95!#C+{Q(=)|;6q)FYR3hcp#@J+a-N4D!B7X`JO>i0qQhAYsorjfd1E7DyU|zvF8X5YQ}E~#s@_Bg zN>2ozaUp!+C-9^EIgEQo(&qdX2R%b5xi=@HYKMnXA(1Tls0l+dc+MRt2);r{0uOqz zn-7b#jgUmg6R`X}^t$B>$&wWSuei!V=?dENWFhlhZW7(6(Hz)qr@D=j^CFa1cvEp& z-ou8r1qsuQsIU0J2fl5{>c5F9M-mluH$%$3*vM(zAGNTkH=oE6L?*u^ndfMO1k{r5fa}RQc;+32aP8>;@iI%yG2o zWDWe7ferb?7r|$-8$1rp5f6r385j?&L?W<~j_AkObyk8UREo*q+02Tfn=xc;qt|(e zn`*pgzccP>Fjw5%Ot9aEC3=#mf5uLj?uI$Y)}{*QHX`Ar!}kID%H`QFuoe=Ux`Ws+ zp;Ue^AWP*mTs0OF+C`YEWCR17ZD=`^-snD5g^r-A*QRJ8>I>Y!a80Kpcr9Au$D+P? zlpT<7kwZok#D7u%@*?_z_crd2ee6;sH0H>vF3E^3^uE?fV*UVgNREyEq zP1fR)!U*bbLX{!uUHqM!7~BoWz??4Jk{KWUm{Kqs5_l56NIUof8zJGB92n|IWz1(n z+!5T0H((<;!A_iCZyO&;<5g5+5loNFPm>1Zjo5{=3w$hx0wK%LW>n4(N6!&ikAl4!1?`ADo$^r z^d<^ineAw9MtxMyj`3+-$R3YuaV;!`mte>_HWkd*!xEei+`^|Q`Dgjb*lzUI+=iI@ z7?f5`LL#(*-U<)#^T<&=(gvSSW|OvGC7CO`%qR0EG;L;qP^pZuwtuw^O4 zLEh`m|ok@^7P+yzp5D@9C>bZxRyY|k^BmBZJch~OAbNuVv;^(+1~wq;GMdr1 zbrBM>pPTck7UeNoC6Btw>pUv#aSNXb2H8=*S`7~%B{MWx&Wd*$#Hy#dVL7OMKd;u?UK zko?URvl+3-(iE9i?S!NgYtb)+&qbSgRUCw0!ldA*c29~to zp`glJMZ#4{U~4uZk$*7?j3>OL&$lRb{|XlORL}lk=bdgxeGeq`5gooIKD2xab7>d_ z#Rp?IuK*WfE}o~OU)gYLCrYd3q~Q4#W|!RaN9QM_FQo^~Ly#yQD*DcBhFOj{RR_Qv z-qaRWuFd?Cre!Y=+#$E(G=SJT^(q-9V~fAY(cXF zLp5^7${5Qi7NUvrPw_%SJz^r@Co2n zUQ1i>Edo~lvklMPkn&$>3oLF0^RILmr%?4wq*PJS0bE)CjPu(ehXBYt5<~g(F;wzU zJs}QIEsbHLbSh%{jO2a>F=Gn~^x;lFOoht=0NN{>+mfuTTCo0N+omjWntAfNQ7pQ z7J18M>P~D#Hf(3e-lt@~jSHcjXpXcZTNJdR^q~k)!Wm=Mi*F~?*@TPFyjhJ<^xTHZa(U( z$h#2Zq%HtSW}Cm%Ltc>TBuqMgh3EkdIWSp$2dZ5CQDx3=NBuQ+lHn2Tnr9}1^y^02 zUP6KY9&A+KMs-HAozar#WdE%w&^uj8CctB7`7u5M=HQ5B$QDyA+>|1~z1Wark6ykC zPeeAfh8V>DEhBRvr<=Tk&>Rr;(H~)s%cDX8H?BM*LWUz zAWKWhOYj+sh`9S1FTJ~bsFK@?q8n`}m;+1oS9T(P4*2rVL5u$zY?R3F>4NV!Q~kCL zlkY9b2@CE=?Uy7kn8>@HN4WGHsgA3KfIeej1Pw_#{ z=mtv|kMtg@2Tvcj3T}frD%l=B0OQdO5V`2)&s_jA_yL)19Ezbl*?P-YqN?b(c1SE0 z%r7uq0`q`tj6@=ujYq}2=Hg9jCw|OEvvDY66nYY9=27@jGQmN$!=auiy0VG;=K)yL zpVb;q!B^nHMtB42L+4;Tn;xdA2R6h8V zcLXYshpLL38j*Oyhf?`SJ0woWFG~i&F0pYK>Z|8@Q1BgqoCjgDSRW?z%5{q$eVM2) zzYmkapqFY7F#Nl*Rv`DvHO)jQaBmaj0c^N`K|&8yNbVU*CtxTz6wM(?QC|lv{*!4d z{RJ(B{je6RLpF9ZRp0aQ#bqbaS688bBH_QG0ezpOfWD@Ssc{Ew)$?d8`34E&0P*PT zZpb~{tw$XgFU^CL-Ik7B*`3W)V3@6t&{`G+=fPMDy@g%9kw(u&m41t2?sO9?AHC6; zSPS3nWq5X@IeHvv<~$g?p*Z@&_aj>|98!D$NIgx>_>qZ8x10ox3q17753u!(Txf1P z=oc>{R!)=#S9YM~>aCF5xN8X|*hoV=QJ|0Zdg;?gwUP};lnlYR??WWaNe&cTgl1Ew zY3VH53d~IOB~jJec2t4DOo-UQ^p?MlT|H1WMk;OVFs?7?{Cn{-w35u$Cw+6BovQ4d z{uzk5U)h4v?*sUjfr9d9#X)(-@tgrEigcm$e0oE*Ot8YosphY5q$JNIt`8{{nc}Zp zy%=YE8Px_@yptPHa4H>^OmIq$C*|ycGn&yXXxj zFcjQ@$>3Ykt8XSwh#_`5w=??nfZltmq8=Rlz8M6AyHS5TC37MY?w&3>21d=2daYGP+wI_hkrHB#2!OJkEL-eeBsfU%$M)TqI)}Fk;8HH zq-L^M{$QeK78BNU1$^c^01DMZsxC%L{=-NVji)2fhWgN*_z`nD{(P`5co9RfWw;eu zj7NbLh((@3Ouq;!ksGe;zuK^?;c7ILbmiLFq;wW^vV-+$skI3`GhItp~INhO2-B5=&K@aBe&3DK8RRpLlf5SplZl03wTqA z^G7z2HU&ZOA)ZIRh3H%2s&C<&kpxSHTm0CEf*Hd*QCf+$ScMO}*Egf@JmC6o6p(tf z!nBo5b2GsTlfnE6eBtLHk?*llVPd!DtHs#3n6|tp9B4idKL4YTSZyPKR@4h2H%flR z1>-~jh2Cg`$T_y?cVHMcO8Wgwi99L<#E?~20XeB1@4{F`wi95 zHdyo#F|rUt#baqR=Gw6A_kEqy0l{EZ)v zF2`DEHxj{lpfa9-#9y}4A+Ja(d)iU;0l?$$!x!I;m)ehl-6?>SQ&r>|AE>sYG;lH= z>EnLMI5q-#_ul)SDf+c|o{Ca^jMo7R$a8#fTmUVVXmM8JrLhs>k~qiC23qvYMyg|2 zfg^H#67AQH#9g$N$c2GB7v|8n*yw#@cNba;laLM0?_k`Lfm>k1ZtvTh1k4`U#%7G9 zE&g6A9kM=0SKx=XTd>wcNzx~@5LEi0S^l~gEiPF6n;|8e>cR2=h9VDu&q!`Y%Nm&V z)1qD(3gy#heWe$Y*Rs{Cv8x|d^~VK;%*dT-oBwMZ@{ z^x-Ld1rqvYSW%d^@FfE1^bXpdZzgo<5OWDB@i8y=5pdT<|e_n72}mFZIFBE2tG-2g;GR)p*SO17e5$7HL}|q^5@M%9bTOF zL-b3oV7b7Ug-O4-75cy8&O1t~Dr@xjR(Dl(qG=jvkSG`t6N8E(>a85QsybJ7ZgfUW z7{=^i0y>e=0dp9Y?=y`f>gbFZrfHfE&<1ow5FK^U5i^L2+4tMKu#2gu^VVDIt?&J_ z*E#p>boSZzoOAa*VMQ45Bv#YNj;sM+n_PM&3x<-LizHmMcaKviOj!hzJb5X(UP`_% z(-vm2M4ojclL7nehmZwaHrdEwWD1ad4yoO7srBRO`coDsOD!cWwNO2+3d%rF7PL%Q>DqX$uO%OB{;x4z*}WpD zg346aVa}#c@B=-sFLsNQl-RH4)*c$_6M9IdCf1tpCwYgcu>s2+G#!m2FRh5Im~2shtaw{HWuWOY$;mGUW~_;^aw7tj6N zGuhxG+uOVQ9Nb*HT*+inWKUH6|f6+!B@sxx~CK$Zqji>Ldp98Z>X8nLb((5G2B*f?no&ob(f zVqld+k!JgJy9i3!jx^mBm0J0zswG`uprv*9yiNKheVO!KFk}OSDx~$$7gqNfsyTJf zkGV68C2p^yt}xwn^A+7JFxwwH%O+bRa|l^I_%_$Tv$YDkPp6-j$ zzC;CC>(Ea+54BZVdw7ngG;Kq23MJJ`rMz7`qg0z`NE|)=4HbV$QK+VBXCw2cCR6*R z1Zmdsgom|$eZhTbx2#RAnH_M3q)&<~r>eh?zwA#{RiVmTv1DWmO=dj^=fHNFwqjj? zHy}<6dYWk?Jxbp@n*#6b1p@fGtC>kl>&@U+?tJhha_o`0isx=-zLw0^n_hsT{xu3B zJ$%R4zYIlGcBSan_~43d!|&m(vU46I#IQkUW{dX9hTus(*i zGuogyRP>YCVbukUZx!;bRyLyxF2mth`v{=o3EZdTbezGe$);fK0#aw8T3J7?;u%fd ztU2iH?GMp4pM%J{A4TNvVBaA8^h$2e&0g%X4&<2 zp>D%(22qbOlqO^L-aQ|(x&qzk;qn7X(h!Y-8~;Ou@0LGB%FyK(P=v-@BKx%Xs&yal@iw>(ewI|S*C z&{Yz<@$|yp(~Dg^yM(hR3`pQF-qVogy$Pnl1d2EIppf?k6!OY+rpgN93?bP2Lg*aW z=ENEottWdqt>wz6W&AZzSZBf|a06D0#t@i@=Qo^v4WRTQtx1<&ilR`tCo9*8Ui zuZRX{EaGgXngXFX)Q-sNZulJpD{sCZ85=jCUmwr(YT}X{Y^C=T*G}xCMn6K~&&&c0 zMq2U);%Pmkru715kP7O5ji;!BtHg)g02&0UK~CNdETIxlE!n`5JitR#Kk#%rK)V0n z0W@RyNLcAjzdnyIqh0w;v z7xQj)p`WGSA5*AS^sY8%zDZni5{&mo2eh(TT`vr+ z_)RZLQNZ^?_L$avLG&39*^n)IU9Zh-e3%vpLoo>O`} z`~5201_KsCRzI9=JoR&A-^o^(Hca!O&<;}Wj20Ye6|HY=r9s`Yv^^-lT&l0hCcBwb z`61{WSWC*<1G3(U>5&mT}M4m;q z>Rd|WB?t;htL4|zv7QYIY(C&q>dZnH=axDgU+bNacqe~VAd#Au9=CYH$ z35u9K$m@48`su|l)>F{NuZApe(*kOA2f}sZS=ITFg-bV}Y14XW^FVR!hcwqU0QaTn z+;lC^+z>QOg-;{;%}nT0Tb=@j>For01RYst?I&@HRG?7EE|M; z>v&Uq>u6P<%oHlykx|+H68=1!ROD^g``dD0dKFo{tgv<%s=0G|C|!rIS#R^2)=LC3 zaT4sKp8>4A7`8fY5TG8zTAi4rY#4Y685RS#kxi_CU+r$H;F(1&gEe?lq#{6ez8wvQ zg=x3jK-OykYnJ8VDtCma`5G))e3)c6QW}4v9q@8Dc(=Bq#)?d;^$vnc?}tH9E@^6P zNBhKIeN^xS*z2`>B{w$X=r>}V#t>@g20HI|sG+yvg__W+em5wd5k1(d8rqs_Xyb>Y znp^(3N8W@?udCDlQPv-k7T%0%!T2%?UR!`6V)#|##vtlu!>_oo4XqlnefYO1*_04X zKSAfFeh2!!AnGn56&-`1^6D%=trIX?vU4ddpOXu}^=RO_6ycs8)CkCVRo+HKdV6Tb zutd~dv$9cg6{%1iT-+aEt11b_dcSJNJ71@ztMYh%tA@OT+8~ zpZsJ9jiEGl1+2WYn(>2!5UEq*)KMNft=;DXRi^eOFJg%FDZ4~=^t&hZb@y|u^fOW! zq^9R-p9RU2Oa>;~$}bHiIoTFn($r85^oon*%zWU}GVP;Kw}4xRxzSl-0Lkxw5jL_D z=3)EeyWlJ#R=KHouZ)3Ny`Dm$FI{rqqC{fcRc_;r4)%Wi!*f4|aE|NV3-oJIPyf-A zrHwPNxUC02826{e#+#kXXw~eG`;E75dj`+Rnnu=fKXP+iw8!ATd5IUa|pnVG`W*cueXYv}1$iM{*+2o!3 zDactna@ef&!SlvH^Cm51)77jfT5&|1ZRri5=sD5)mFk#4J@nX#)Pc`K9&JIc7eRJ; zyY+DAs z=yQA7P^RZF?_3P+aczK@fQJJZM`r8B95*$;i>!GJD7o_eJL*;Jl0TJac@xPd4;Ze> z0$Gj_Hl!K#+_${`T##p?!I(EOY>@|kdSWBNNzhzVNDd>`aq?qK5cH`Z; z+n~*?q?Q?1Ib#kfc&IR9(UVJYmImfGCeqC`1eqZ zJOvxr)@B?3DxHOw*e7A8;b)>+!N-&iDPhZ}9xS{6kzq_C$H|(PcN+g%zc|-;FBIN@ zZ~RDs;{>=EhhmcaPH6QslKkN)o+sPB^h{aj1=$#O6wjP@sLc75rPkXzY{abNv&_mxf=K%?@EZ90>AM8)xU82WL6U%otzY53-zrXv*3YU49{_Y$ngw6O<1c z1kbi?9iH)K6~hU-UGbREWzR z#OxC0GxRDJy){ZBXeKf~PWDLBeK{^d7h@=?WUptq$Z8x;`#Z0MS zRG=cQSJv=JvZEd7@RAWejooPHI}pH8ty0mqk!z&pQp=&wVqHUGEskAuUxQBTrWx`3 zuGdLv+loMeZ`4>*u zxSr;5oeaOo8@RH!tr^-XvUM4x!k1x^fCXOS0{FRq?SsKQbn&>`7=+uPjjqNn@h=?= za=(>=+3P`;-|p25(B4z*gY0t{R4gP_sXrLEI#I2X*Tr)TBIAcHKympZ@a3U><5hHw zQ8_53Y8y>0Og9Bbi47{z&x#}5YJ@>@;PXJ?mG42%C^%PsNvUTX>Uze(+1-en zxo>STc6J0inN|6Ej9tYiw^LdGO6}$Ap{)z=ZL*Mjnuq=_6vd*8g}=DYrYmsE@#gLm zpMz`@GNMQNF!2omY*5n&mjlr+$R=f@+Q1sFS3DR(J}$~005lwoAlECTyt0O`Y!2DT z-vNzJ@XR+J?S13hU~owmW;)vsa`s@dUk5PBE(dZSLz+8|Bl%=vT|0C>Rtr$zzY4j2 z`3DsK9F*Wis9XC~JAP1(R{k4l6#s27^~*cx-@4w|_1v%Up+F~Ucus(Q;A}hK6qp7Y z{D2n8-U3xCT!M%9ckLXwFgsHU*4cz4v_%FwP@lG_TxeDa^ zJf79xolU9CJH7IQF}TzV-f!0fZl_W7I*`~ItyuR)K)eC1>L+)j)h-{xE5Hj>^f24( z!A0w4hf#c$gUQ{86ja>{Mbm9Jd7kSh7K~w*-yduS~`ek#j?U z%Mck^A*^nof{}aufcw@XXjeC>$9Pt=9yR>4z>Du6K-~+;)=h3UKCfCyY0LxDXmKwU z97{n-26&AxQsBRqY*RgkXxd3>BtW*zPHFH8o&{cN$M&bRP!I)_Ue-qGD9Fmfw0^}& zP*g1JgUdY>MBl{uqwfmXu@LZK9&#TACH7|lPjms+gaA)306e<^uvW5NJ?L@)$kyw~ z4TL}r&h18_TtLm4aIS6TS>m%rRBJrQ{)=G{m<|Z-4B$hDW4b^Wsj@YGxO@wjguH&P zHIVrq%oO0j8t3GKcQ_)0E5UQ0nhou!Zl1Z@AzRH`5sy(z*9o9l6046Lj?NJY?KV9^ z*8e%F@>OWi_&5y83|h;-8M%SwWJC9L!_Px1x*FA@_ff&(qagGCD+-q)2V7PI8aIIA z+O{5Z(uEsddBH-zqQ}(Pbx8)o>%T^|+D{P_ls~dAS>sjtvfcQ+qqc;`YiI*B1p%j_ zU&)o-#uvUTY@{B8y?z$x0XK;+o<~WUDa4}Pa5+B*yJYp@G#{c>br9nOFQXtjH4DSK z0U_DHP<0ih9&r`x=m>S2hBSYx=-da2zdC4q(XnM7S^2#Z7k8|mv4HHCXli}bZhYw$ zv%9xp?D{7ut-N9hmA*8HpdW(9mmS0CT3mCRjXin~R(YV?*wax2zp?`{n0F-#S?`FT z`R!QkUD2b2LMAb*cSorN}g^vzP4gitMcJ43~?;9Rdc99)yu-= zMbwSI1YW2b4eEQ~5?stP_fj1w9k?VcC~9XQBkF3!E(w5-Il2+}zz&5( zw&S~`pOk2Y^|lXlRzg<#UN5OqvMvdHxaUJwlkLO?V|i93qf6~t7&MNj1*3oU<6N&o z7P*dR(Pv;9-R_l_iGo-!6qNytUG~>L-j8>vk?T1WFDVj`pcg1P70xARN_JZoVCQ-? zcsd7)VIkui%cL^prD))K64SZF!`v4F%BLXG6Gxh_A%w_#ppD+c06B9q$R!^l(mxhf z{;{BxhlFf81+^cMbzP07o|Vm%R(4Y$Tepg91h2E*_-1niR&k~VM*L1EH6PLk%0y6n z2L+99%R-{0gi1;u4#H&;Dfj3Fc*Mg#GxZ{ zu<=X=Yil`uY}r%rs~;4mzJEdk?_|tS$u96G^Q+l^476j6s02mjW(`0j`9$QbK=~&G4(diJrCcE}{5i9^ffrv{i4w zs_KIXU>CGipOcC=5$lUzL^gg8Oyd$6i$B;8=#bLq@&K=pN(@GBV(fCj1Obzu155!g zQA0x|Zh=AKc0$yNw}t?A45bBa=v?i9LAA`+s`H?zE_?w{1cT}c=vO@##j6)d_6~$s z-}^k^L8MjxO&Dxi3fNi-c!N+~wVdQ${Vp=9cP;_!7FK%%}JgjC8a%--pMm5a>+R&x8meSg5;ZobY z2+%Ii7RLZLNQ%7_Kh)mAu}ZZM!L;@X;%2oHFs*%$1DomwEdh)e44AMCP$j8q$m$wF zuDcvF)!mDrI@!8a_Yh51w|X()X~Fv&4OO>M7`!2++Xeg!&UGJ1>Bn%X&&dM}=2`vF zT)=+N)=z|U{Xt6slW@ZN<6Z{1_0Zbwk#o*1u71~Qrsp`huo0>C+&02fffri)?FbeW(m zMT4g6p9kCkS<|buOw*f4YuZ5rxNqRKS76Cnob=Q%x+ULdyogPU5VQ}n6lk{iXx_`f z5rBP4pQTg_D&@m2A&RF;Qz_+~%BE6!2&K}do@pv2NK?vLRnt%I)OUO;lA4Nq8V6%U z+ud|Q&O7PD^0)WW(;P-vqeDKg^-|p8ADVfV{QRe<(cgQn6W1|DT}-y@86TyoZ+7|e zckf3Bko+mItbgwp7=tN*soZ8t2U;@DbQxW;RIgt#}%ww9C&^LmD~fbl!!bPvZi{LkQvIPXWHtTj$jy?BIPXO??F7D^sNjr7(s`*QS4 z#u$$PMQ$xKbjgi#B4nv+C5D`YQ)h8GkKyQffz+N5LCWs|BNpM=BU+ci<)R2cW04QQ zCGWd7RFmI!gN7CZMq!OX;{;3;a1csrXj1PY(a@xxL7cxCopXQHjn$SyHtaS)foz1< zTd|XI#|J6RsAhOd#U1l!wu2%u#sUU#BUffiMH%1~`k~F~S@=^Faf%q{Xpr~0g=!=< zmpcHA!l9ztA-QID)e=}4^0%+B2#+Y7Dyd^zIbF!DM&5%rG zDa?`RJmezUt!S<^UU?X4+mMm=S0?=k`T_PSHGX!TmP9mDeOPK`VRDt34-L|~Qcp9& zRa)4&jy`b*AR<+@rey#mAeoA@CZnNqJq%voQ!W3Ulo|(-^}e4$c4#lN(kr~oullK0 zV1n{G-al+*aAlS%oA9&@l( zrs^a3`A4F2UIbmrDxYx><{CcS6zEaHGwr5je(oq&KHE`cmi%H>~v@Dxz2 z5Uj#C0wTxeFb>KBi})WXXc|Oz(BmjH=*1q?_%9!`py}A&V@InoQ{n8J4q5CDCj;p? zm|Cmt)VUuDn;j#@olw|j`mxJ5>yh@I&`M;-w+*ZLu7;w{3#&}=KV21(5Mwu92u0$n zR`|&_-@sU6_d)Z$nYKJxDgM53;wiNfq&I$WBt06V--HhrNDs zyXEIgnf#k5yF~CVqaeDZg}PnYiyvfBs&^SwJSG$);F8Egm+a#xb;);B>au}x1q=8w z@<|R=5Ou?+plL|Hn4{$mL@uyn=NM*+#lQ<)(+~SEBWOAqO@m+b!1)~181Wsc8V6iz z#%{#y6Djp)Bh8!LiXU7A=f-c)$~}yfF2Ib*7@)`I*Ni|~`9ahpaWAQ1`S6QexE_jA z5m_l;ZIvT==Gj7{G_3bidI_LhzJSV~f-F=6@QO1Oe$mA<(aL(0XYNUG8F>}74P8Ef zyav^m_fh%?J(A}_Y;TRF)6&D8>sNqWUWCrk4v=HrQt56eN*>Pu zyyWp_DB|E*PB)$?>)TzEVOqA7XEnD9`wJj*wUBj97d$zgSI^`PB|>DI;F9<=OkJ;m z=bIxGPO`z%0G3Iq_i2Fp5ojxZok78k6ojoVgvY%I+K+7c9uF3}jMC`iWGn2=07o~a z7k2LL3TzuGuxVdh;YQlP#49!bGtagoQhMsN9?p^;F4d8RYGIZ7%aiOyTjwLKO61O7 zM?L-@RlGipYo^KXzm=VOff*f5$X|N8Uqk$`g?Z-4{pya@Sp~6#| zR`i2x9SRsbhVCY<=U3$9|D*Bi=yQg9LB4%H?5Ci(s!{trGLpK8HS~~HUE`lxr$1c& zx~T?f>km?cgEIX&;fj)GA^ioWKj2do+KuSoHGC_!(!X=6pQ5C?@O^&P=`bdtzogki z*ucEC3)^p`@$|K7xLE8m8|NSVV=GhLv#_(aDooA&8vo2olAM62TtH?S%FnollzAuY zmFd_7?~x%MG47T&+~;OI){wST{cI~e=BTQ#10YL76_)Up=@e&d6d8sLe0C2`sPmWH zFDcExE{kV18O){L>o!Ntmv!MqLjZ>Kwt5B3;1f`{;Kljo=mIy|7p$bA#;_3F$dDr&ZVlbiptY1b&%>OBExCY&y>NcElN5pJ=n3PF9jzSl; zsajGUB%~vb&bdv zS#M$SUAMOi^3S?5N+Ty7^CKL|l8qtSsu{AU!LkdoV=Z7Dz_)829rsX7?AtY;XL1{b zl_jMzc^dVDw7HL_urlY;OG)6eG{KNx`Z)!b!LzEZl5&`P2UjbDRNY(5ODx-3fEdSe zy37gDn~>pse?3`gX=alhlQN`QdZ}EpZUc5M zI*A5NHkr6oQl$L;Hbw)~l3JScY!;;ht(q~-qsZm~rJWS)KOAJ;k}zyqC$z&d01in> z$E|O&-taa)=-+GIylY4edlC21(s4y>f4PNU0lE=}`@pf4GZTyFZKq$;i;>KuXb-Kz z_)P&^&jxbFtqw#?%fX8~Y0JXwW?cCWgs0Uqy@E2)OhZZTX%LI+O-O;05T52pdXvCD zuJ<*Uo~$U~z0~WP1`VQ(hVDnT(nk$(%rx5MwiX?Lq$xZr`j965kY@?~Rm#qzwxNbL zN?EiW7pTBs+Qa31L(L0XvHg&ZP>jD2YiQq8@a_ie661{@F8?s>xu6XHJv~Fg>7=yh zvdzPOj&CW=`J^4INq=RZSOT~P4>RSzf%aFKkAhcpS2y3L(rM+LLEVPQJZ$)(sB8QP zUalPQHE1gu*=|s> zTAd{M7px_3nUL4Y*n;V- z{b8WPiONtfNe(~*Z)0-k6@N(%+oIn-!{^xfaFH4Xq+7S->nZ8k1CF%MbS71|v@gDC zFwhGYleDLh)h;qzhD|Hqq#3Q<830jy?3_iUM5MkII#V6<9@$^CqBwVrX46`fUGy~)hulk5-RCD#Os!L$XHl@1-LC6=kCNSo)VNNtabWzuXw7FWIC;L5b zqhEd?r`*xCimbjB8M3v^JArY~6N4;r7Jd-vaxz5a0vdKjNL2vhi_tzL2dukVgjNDQ z(a*g!z!tJGxjrE(^K3m&UvG)g^;eNIdf+@$x&ny>c^`pGSi(T1UTUchQZrgpJc}=F zVE~t|H0d-F0V*4e8akm$$cm4KI{_{S1=bEI8iN=oSiAuln>*n=5ry=|kg^|yX$dH? zdr2AjEuSDz6!0pZ2)UE4O(|SwRAra#FDS|B3C|9f=vKz+N7Tjyx3`z z@}#=56H}4t`poI-428B#T(7jnoS`n2*G%=tmvY6$_|y3FGyXgNTdKufpyP7#ApP9^wcD|VBFE?x*($^J9R(snKcTo>CHK+GDg64@a z*>DZQiA?xJpm%K^HsPp|UQi=W4>ZfA6vhGBL3Hgxqe2C9`P zZj%j6wi~F?(B5KFpG3`*Ivj5EWR|3vCwCm=Hczp}begAhoIKC8bn!5sGN#p0;Wk~| z)MUCkk93=E%d&|Np*Xj+neNRp<8eRcF+J9|*pH~Q>FKy=9u+L4;m4G=nO@h6ou;>= zu+#KeSvJ$xQSLVV7RO-uJLCgl)a6uPPe^1Bj2XZk%|O?I^UR>-hhOfNjb^aJVKYM= zhuh3B)-}T&r*&ecRlR00Hw~G^9kp&V!sS6`r1x!`8KvhkqaC@OW{h{$jIBM!ZI)OW zoo30}(~zcHLqjjDGfO?6xy{nGhtD(1+?Lxc>v+*-mNTZ9;X zW=-f^K^-=Z_xx_E@SnA> z-x}JB;(p#k=kYoeiM1^}lW{P`H2SKrlDW6%?+gAZl( zD8*h4w$BtyW+*z#)f)Yq;)uM0Q8y)bpINThK>v8^Rl@ocNon=S1=Ze=`FKi@{*g$| zX=HektZmWuHu^+AKxuU{_LJ2psAXtNF{m#mlwBd`q*mO_2$t9&DOs6RAzwC?TL4v? z7&Yn+%@ROXZw%IZf>Rd*>jX`20Dn-{-8we*i&noaj< zJTamnK<5XMEz)_^c+Aro8~3_EaytEtb8pBoMVtK9GgfG4WB>;IK>uEAXiZx`>8AsY robrQ|owa>aoRhN_X(Mf!48lrA?l?kCzkiF4Q8iUAs#=ol_WAz{i*386 literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text new file mode 100644 index 00000000000..124dcd61f40 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/data/text @@ -0,0 +1 @@ +billustrationionjukudoyamakeupowiathletajimageandsoundandvision-riopretobishimagentositecnologiabiocelotenkawabipanasonicatfoodnetworkinggroupperbirdartcenterprisecloudaccesscamdvrcampaniabirkenesoddtangenovarahkkeravjuegoshikikiraraholtalenishikatakazakindependent-revieweirbirthplaceu-1bitbucketrzynishikatsuragirlyuzawabitternidiscoverybjarkoybjerkreimdbaltimore-og-romsdalp1bjugnishikawazukamishihoronobeautydalwaysdatabaseballangenkainanaejrietisalatinabenogatabitorderblackfridaybloombergbauernishimerabloxcms3-website-us-west-2blushakotanishinomiyashironocparachutingjovikarateu-2bmoattachmentsalangenishinoomotegovtattoolforgerockartuzybmsalon-1bmwellbeingzoneu-3bnrwesteuropenairbusantiquesaltdalomzaporizhzhedmarkaratsuginamikatagamilanotairesistanceu-4bondigitaloceanspacesaludishangrilanciabonnishinoshimatsusakahoginankokubunjindianapolis-a-bloggerbookonlinewjerseyboomlahppiacenzachpomorskienishiokoppegardiskussionsbereichattanooganordkapparaglidinglassassinationalheritageu-north-1boschaefflerdalondonetskarelianceu-south-1bostik-serveronagasukevje-og-hornnesalvadordalibabalatinord-aurdalipaywhirlondrinaplesknsalzburgleezextraspace-to-rentalstomakomaibarabostonakijinsekikogentappssejnyaarparalleluxembourglitcheltenham-radio-opensocialorenskogliwicebotanicalgardeno-staginglobodoes-itcouldbeworldisrechtranakamurataiwanairforcechireadthedocsxeroxfinitybotanicgardenishitosashimizunaminamiawajikindianmarketinglogowestfalenishiwakindielddanuorrindigenamsskoganeindustriabotanyanagawallonieruchomoscienceandindustrynissandiegoddabouncemerckmsdnipropetrovskjervoyageorgeorgiabounty-fullensakerrypropertiesamegawaboutiquebecommerce-shopselectaxihuanissayokkaichintaifun-dnsaliasamnangerboutireservditchyouriparasiteboyfriendoftheinternetflixjavaldaostathellevangerbozen-sudtirolottokorozawabozen-suedtirolouvreisenissedalovepoparisor-fronisshingucciprianiigataipeidsvollovesickariyakumodumeloyalistoragebplaceducatorprojectcmembersampalermomahaccapooguybrandywinevalleybrasiliadboxosascoli-picenorddalpusercontentcp4bresciaokinawashirosatobamagazineuesamsclubartowestus2brindisibenikitagataikikuchikumagayagawalmartgorybristoloseyouriparliamentjeldsundivtasvuodnakaniikawatanagurabritishcolumbialowiezaganiyodogawabroadcastlebtimnetzlgloomy-routerbroadwaybroke-itvedestrandivttasvuotnakanojohanamakindlefrakkestadiybrokerbrothermesaverdeatnulmemergencyachtsamsungloppennebrowsersafetymarketsandnessjoenl-ams-1brumunddalublindesnesandoybrunelastxn--0trq7p7nnbrusselsandvikcoromantovalle-daostavangerbruxellesanfranciscofreakunekobayashikaoirmemorialucaniabryanskodjedugit-pagespeedmobilizeroticagliaricoharuovatlassian-dev-builderscbglugsjcbnpparibashkiriabrynewmexicoacharterbuzzwfarmerseinebwhalingmbhartiffany-2bzhitomirbzzcodyn-vpndnsantacruzsantafedjeffersoncoffeedbackdropocznordlandrudupontariobranconavstackasaokamikoaniikappudownloadurbanamexhibitioncogretakamatsukawacollectioncolognewyorkshirebungoonordre-landurhamburgrimstadynamisches-dnsantamariakecolonialwilliamsburgripeeweeklylotterycoloradoplateaudnedalncolumbusheycommunexus-3community-prochowicecomobaravendbambleborkapsicilyonagoyauthgear-stagingivestbyglandroverhallair-traffic-controlleyombomloabaths-heilbronnoysunddnslivegarsheiheijibigawaustraliaustinnfshostrolekamisatokaizukameyamatotakadaustevollivornowtv-infolldalolipopmcdircompanychipstmncomparemarkerryhotelsantoandrepbodynaliasnesoddenmarkhangelskjakdnepropetrovskiervaapsteigenflfannefrankfurtjxn--12cfi8ixb8lutskashibatakashimarshallstatebankashiharacomsecaaskimitsubatamibuildingriwatarailwaycondoshichinohealth-carereformemsettlersanukindustriesteamfamberlevagangaviikanonjinfinitigotembaixadaconferenceconstructionconsuladogadollsaobernardomniweatherchanneluxuryconsultanthropologyconsultingroks-thisayamanobeokakegawacontactkmaxxn--12co0c3b4evalled-aostamayukinsuregruhostingrondarcontagematsubaravennaharimalborkashiwaracontemporaryarteducationalchikugodonnakaiwamizawashtenawsmppl-wawdev-myqnapcloudcontrolledogawarabikomaezakirunoopschlesischesaogoncartoonartdecologiacontractorskenconventureshinodearthickashiwazakiyosatokamachilloutsystemscloudsitecookingchannelsdvrdnsdojogaszkolancashirecifedexetercoolblogdnsfor-better-thanawassamukawatarikuzentakatairavpagecooperativano-frankivskygearapparochernigovernmentksatxn--1ck2e1bananarepublic-inquiryggeebinatsukigatajimidsundevelopmentatarantours3-external-1copenhagencyclopedichiropracticatholicaxiashorokanaiecoproductionsaotomeinforumzcorporationcorsicahcesuoloanswatch-and-clockercorvettenrissagaeroclubmedecincinnativeamericanantiquest-le-patron-k3sapporomuracosenzamamidorittoeigersundynathomebuiltwithdarkasserverrankoshigayaltakasugaintelligencecosidnshome-webservercellikescandypoppdaluzerncostumedicallynxn--1ctwolominamatargets-itlon-2couchpotatofriesardegnarutomobegetmyiparsardiniacouncilvivanovoldacouponsarlcozoracq-acranbrookuwanalyticsarpsborgrongausdalcrankyowariasahikawatchandclockasukabeauxartsandcraftsarufutsunomiyawakasaikaitabashijonawatecrdyndns-at-homedepotaruinterhostsolutionsasayamatta-varjjatmpartinternationalfirearmsaseboknowsitallcreditcardyndns-at-workshoppingrossetouchigasakitahiroshimansionsaskatchewancreditunioncremonashgabadaddjaguarqcxn--1lqs03ncrewhmessinarashinomutashinaintuitoyosatoyokawacricketnedalcrimeast-kazakhstanangercrotonecrownipartsassarinuyamashinazawacrsaudacruisesauheradyndns-blogsitextilegnicapetownnews-stagingroundhandlingroznycuisinellancasterculturalcentertainmentoyotapartysvardocuneocupcakecuritibabymilk3curvallee-d-aosteinkjerusalempresashibetsurugashimaringatlantajirinvestmentsavannahgacutegirlfriendyndns-freeboxoslocalzonecymrulvikasumigaurawa-mazowszexnetlifyinzairtrafficplexus-1cyonabarumesswithdnsaveincloudyndns-homednsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacyouthruherecipescaracaltanissettaishinomakilovecollegefantasyleaguernseyfembetsukumiyamazonawsglobalacceleratorahimeshimabaridagawatchesciencecentersciencehistoryfermockasuyamegurownproviderferraraferraris-a-catererferrerotikagoshimalopolskanlandyndns-picsaxofetsundyndns-remotewdyndns-ipasadenaroyfgujoinvilleitungsenfhvalerfidontexistmein-iservschulegallocalhostrodawarafieldyndns-serverdalfigueresindevicenzaolkuszczytnoipirangalsaceofilateliafilegear-augustowhoswholdingsmall-webthingscientistordalfilegear-debianfilegear-gbizfilegear-iefilegear-jpmorganfilegear-sg-1filminamiechizenfinalfinancefineartscrapper-sitefinlandyndns-weblikes-piedmonticellocus-4finnoyfirebaseappaviancarrdyndns-wikinkobearalvahkijoetsuldalvdalaskanittedallasalleasecuritytacticschoenbrunnfirenetoystre-slidrettozawafirenzefirestonefirewebpaascrappingulenfirmdaleikangerfishingoldpoint2thisamitsukefitjarvodkafjordyndns-workangerfitnessettlementozsdellogliastradingunmanxn--1qqw23afjalerfldrvalleeaosteflekkefjordyndns1flesberguovdageaidnunjargaflickragerogerscrysecretrosnubar0flierneflirfloginlinefloppythonanywhereggio-calabriafloraflorencefloridatsunangojomedicinakamagayahabackplaneapplinzis-a-celticsfanfloripadoval-daostavalleyfloristanohatakahamalselvendrellflorokunohealthcareerscwienflowerservehalflifeinsurancefltrani-andria-barletta-trani-andriaflynnhosting-clusterfnchiryukyuragifuchungbukharanzanfndynnschokokekschokoladenfnwkaszubytemarkatowicefoolfor-ourfor-somedio-campidano-mediocampidanomediofor-theaterforexrothachijolsterforgotdnservehttpbin-butterforli-cesena-forlicesenaforlillesandefjordynservebbscholarshipschoolbusinessebyforsaleirfjordynuniversityforsandasuolodingenfortalfortefortmissoulangevagrigentomologyeonggiehtavuoatnagahamaroygardencowayfortworthachinoheavyfosneservehumourfotraniandriabarlettatraniandriafoxfordecampobassociatest-iserveblogsytemp-dnserveirchitachinakagawashingtondchernivtsiciliafozfr-par-1fr-par-2franamizuhobby-sitefrancaiseharafranziskanerimalvikatsushikabedzin-addrammenuorochesterfredrikstadtvserveminecraftranoyfreeddnsfreebox-oservemp3freedesktopfizerfreemasonryfreemyiphosteurovisionfreesitefreetlservep2pgfoggiafreiburgushikamifuranorfolkebibleksvikatsuyamarugame-hostyhostingxn--2m4a15efrenchkisshikirkeneservepicservequakefreseniuscultureggio-emilia-romagnakasatsunairguardiannakadomarinebraskaunicommbankaufentigerfribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesignfrognfrolandynv6from-akrehamnfrom-alfrom-arfrom-azurewebsiteshikagamiishibukawakepnoorfrom-capitalonewportransipharmacienservicesevastopolefrom-coalfrom-ctranslatedynvpnpluscountryestateofdelawareclaimschoolsztynsettsupportoyotomiyazakis-a-candidatefrom-dchitosetodayfrom-dediboxafrom-flandersevenassisienarvikautokeinoticeablewismillerfrom-gaulardalfrom-hichisochikuzenfrom-iafrom-idyroyrvikingruenoharafrom-ilfrom-in-berlindasewiiheyaizuwakamatsubushikusakadogawafrom-ksharpharmacyshawaiijimarcheapartmentshellaspeziafrom-kyfrom-lanshimokawafrom-mamurogawatsonfrom-mdfrom-medizinhistorischeshimokitayamattelekommunikationfrom-mifunefrom-mnfrom-modalenfrom-mshimonitayanagit-reposts-and-telecommunicationshimonosekikawafrom-mtnfrom-nchofunatoriginstantcloudfrontdoorfrom-ndfrom-nefrom-nhktistoryfrom-njshimosuwalkis-a-chefarsundyndns-mailfrom-nminamifuranofrom-nvalleedaostefrom-nynysagamiharafrom-ohdattorelayfrom-oketogolffanshimotsukefrom-orfrom-padualstackazoologicalfrom-pratogurafrom-ris-a-conservativegashimotsumayfirstockholmestrandfrom-schmidtre-gauldalfrom-sdscloudfrom-tnfrom-txn--2scrj9chonanbunkyonanaoshimakanegasakikugawaltervistailscaleforcefrom-utsiracusaikirovogradoyfrom-vald-aostarostwodzislawildlifestylefrom-vtransportefrom-wafrom-wiardwebview-assetshinichinanfrom-wvanylvenneslaskerrylogisticshinjournalismartlabelingfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairkitapps-auction-rancherkasydneyfujinomiyadattowebhoptogakushimotoganefujiokayamandalfujisatoshonairlinedre-eikerfujisawafujishiroishidakabiratoridedyn-berlincolnfujitsuruokazakiryuohkurafujiyoshidavvenjargap-east-1fukayabeardubaiduckdnsncfdfukuchiyamadavvesiidappnodebalancertmgrazimutheworkpccwilliamhillfukudomigawafukuis-a-cpalacefukumitsubishigakisarazure-mobileirvikazteleportlligatransurlfukuokakamigaharafukuroishikarikaturindalfukusakishiwadazaifudaigokaseljordfukuyamagatakaharunusualpersonfunabashiriuchinadafunagatakahashimamakisofukushimangonnakatombetsumy-gatewayfunahashikamiamakusatsumasendaisenergyfundaciofunkfeuerfuoiskujukuriyamangyshlakasamatsudoomdnstracefuosskoczowinbar1furubirafurudonostiaafurukawajimaniwakuratefusodegaurafussaintlouis-a-anarchistoireggiocalabriafutabayamaguchinomihachimanagementrapaniizafutboldlygoingnowhere-for-morenakatsugawafuttsurutaharafuturecmshinjukumamotoyamashikefuturehostingfuturemailingfvghamurakamigoris-a-designerhandcraftedhandsonyhangglidinghangoutwentehannanmokuizumodenaklodzkochikuseihidorahannorthwesternmutualhanyuzenhapmircloudletshintokushimahappounzenharvestcelebrationhasamap-northeast-3hasaminami-alpshintomikasaharahashbangryhasudahasura-apphiladelphiaareadmyblogspotrdhasvikfh-muensterhatogayahoooshikamaishimofusartshinyoshitomiokamisunagawahatoyamazakitakatakanabeatshiojirishirifujiedahatsukaichikaiseiyoichimkentrendhostinghattfjelldalhayashimamotobusellfylkesbiblackbaudcdn-edgestackhero-networkisboringhazuminobushistoryhelplfinancialhelsinkitakyushuaiahembygdsforbundhemneshioyanaizuerichardlimanowarudahemsedalhepforgeblockshirahamatonbetsurgeonshalloffameiwamasoyheroyhetemlbfanhgtvaohigashiagatsumagoianiahigashichichibuskerudhigashihiroshimanehigashiizumozakitamigrationhigashikagawahigashikagurasoedahigashikawakitaaikitamotosunndalhigashikurumeeresinstaginghigashimatsushimarburghigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshirakokonoehigashinarusells-for-lesshiranukamitondabayashiogamagoriziahigashinehigashiomitamanortonsberghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitanakagusukumodernhigashitsunosegawahigashiurausukitashiobarahigashiyamatokoriyamanashifteditorxn--30rr7yhigashiyodogawahigashiyoshinogaris-a-doctorhippyhiraizumisatohnoshoohirakatashinagawahiranairportland-4-salernogiessennanjobojis-a-financialadvisor-aurdalhirarahiratsukaerusrcfastlylbanzaicloudappspotagerhirayaitakaokalmykiahistorichouseshiraois-a-geekhakassiahitachiomiyagildeskaliszhitachiotagonohejis-a-greenhitraeumtgeradegreehjartdalhjelmelandholeckodairaholidayholyhomegoodshiraokamitsuehomeiphilatelyhomelinkyard-cloudjiffyresdalhomelinuxn--32vp30hachiojiyahikobierzycehomeofficehomesecuritymacaparecidahomesecuritypchoseikarugamvikarlsoyhomesenseeringhomesklepphilipsynology-diskstationhomeunixn--3bst00minamiiserniahondahongooglecodebergentinghonjyoitakarazukaluganskharkivaporcloudhornindalhorsells-for-ustkanmakiwielunnerhortendofinternet-dnshiratakahagitapphoenixn--3ds443ghospitalhoteleshishikuis-a-guruhotelwithflightshisognehotmailhoyangerhoylandetakasagophonefosshisuifuettertdasnetzhumanitieshitaramahungryhurdalhurumajis-a-hard-workershizukuishimogosenhyllestadhyogoris-a-hunterhyugawarahyundaiwafuneis-into-carsiiitesilkharkovaresearchaeologicalvinklein-the-bandairtelebitbridgestoneenebakkeshibechambagricultureadymadealstahaugesunderseaportsinfolionetworkdalaheadjudygarlandis-into-cartoonsimple-urlis-into-gamesserlillyis-leetrentin-suedtirolis-lostre-toteneis-a-lawyeris-not-certifiedis-savedis-slickhersonis-uberleetrentino-a-adigeis-very-badajozis-a-liberalis-very-evillageis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandovre-eikerisleofmanaustdaljellybeanjenv-arubahccavuotnagaragusabaerobaticketsirdaljeonnamerikawauejetztrentino-aadigejevnakershusdecorativeartslupskhmelnytskyivarggatrentino-alto-adigejewelryjewishartgalleryjfkhplaystation-cloudyclusterjgorajlljls-sto1jls-sto2jls-sto3jmphotographysiojnjaworznospamproxyjoyentrentino-altoadigejoyokaichibajddarchitecturealtorlandjpnjprslzjurkotohiradomainstitutekotourakouhokutamamurakounosupabasembokukizunokunimilitarykouyamarylhurstjordalshalsenkouzushimasfjordenkozagawakozakis-a-llamarnardalkozowindowskrakowinnersnoasakatakkokamiminersokndalkpnkppspbarcelonagawakkanaibetsubamericanfamilyds3-fips-us-gov-west-1krasnikahokutokashikis-a-musiciankrasnodarkredstonekrelliankristiansandcatsolarssonkristiansundkrodsheradkrokstadelvalle-aostatic-accessolognekryminamiizukaminokawanishiaizubangekumanotteroykumatorinovecoregontrailroadkumejimashikis-a-nascarfankumenantokonamegatakatoris-a-nursells-itrentin-sud-tirolkunisakis-a-painteractivelvetrentin-sudtirolkunitachiaraindropilotsolundbecknx-serversellsyourhomeftphxn--3e0b707ekunitomigusukuleuvenetokigawakunneppuboliviajessheimpertrixcdn77-secureggioemiliaromagnamsosnowiechristiansburgminakamichiharakunstsammlungkunstunddesignkuokgroupimientaketomisatoolsomakurehabmerkurgankurobeeldengeluidkurogimimatakatsukis-a-patsfankuroisoftwarezzoologykuromatsunais-a-personaltrainerkuronkurotakikawasakis-a-photographerokussldkushirogawakustanais-a-playershiftcryptonomichigangwonkusupersalezajskomakiyosemitekutchanelkutnowruzhgorodeokuzumakis-a-republicanonoichinomiyakekvafjordkvalsundkvamscompute-1kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsomnatalkzmisakis-a-soxfanmisasaguris-a-studentalmisawamisconfusedmishimasudamissilemisugitokuyamatsumaebashikshacknetrentino-sued-tirolmitakeharamitourismilemitoyoakemiuramiyazurecontainerdpolicemiyotamatsukuris-a-teacherkassyno-dshowamjondalenmonstermontrealestatefarmequipmentrentino-suedtirolmonza-brianzapposor-odalmonza-e-della-brianzaptokyotangotpantheonsitemonzabrianzaramonzaebrianzamonzaedellabrianzamoonscalebookinghostedpictetrentinoa-adigemordoviamoriyamatsumotofukemoriyoshiminamiashigaramormonmouthachirogatakamoriokakudamatsuemoroyamatsunomortgagemoscowiosor-varangermoseushimodatemosjoenmoskenesorfoldmossorocabalena-devicesorreisahayakawakamiichikawamisatottoris-a-techietis-a-landscaperspectakasakitchenmosvikomatsushimarylandmoteginowaniihamatamakinoharamoviemovimientolgamozilla-iotrentinoaadigemtranbytomaritimekeepingmuginozawaonsensiositemuikaminoyamaxunispacemukoebenhavnmulhouseoullensvanguardmunakatanemuncienciamuosattemupinbarclaycards3-sa-east-1murmanskomforbar2murotorcraftrentinoalto-adigemusashinoharamuseetrentinoaltoadigemuseumverenigingmusicargodaddyn-o-saurlandesortlandmutsuzawamy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoruminamimakis-a-rockstarachowicemydattolocalcertificationmyddnsgeekgalaxymydissentrentinos-tirolmydobissmarterthanyoumydrobofageologymydsoundcastronomy-vigorlicemyeffectrentinostirolmyfastly-terrariuminamiminowamyfirewalledreplittlestargardmyforuminamioguni5myfritzmyftpaccessouthcarolinaturalhistorymuseumcentermyhome-servermyjinomykolaivencloud66mymailermymediapchristmasakillucernemyokohamamatsudamypepinkommunalforbundmypetsouthwest1-uslivinghistorymyphotoshibalashovhadanorth-kazakhstanmypicturestaurantrentinosud-tirolmypsxn--3pxu8kommunemysecuritycamerakermyshopblocksowamyshopifymyspreadshopwarendalenugmythic-beastspectruminamisanrikubetsuppliesoomytis-a-bookkeepermaritimodspeedpartnermytuleap-partnersphinxn--41amyvnchromediatechnologymywirepaircraftingvollohmusashimurayamashikokuchuoplantationplantspjelkavikomorotsukagawaplatformsharis-a-therapistoiaplatter-appinokofuefukihaboromskogplatterpioneerplazaplcube-serversicherungplumbingoplurinacionalpodhalepodlasiellaktyubinskiptveterinairealmpmnpodzonepohlpoivronpokerpokrovskomvuxn--3hcrj9choyodobashichikashukujitawaraumalatvuopmicrosoftbankarmoypoliticarrierpolitiendapolkowicepoltavalle-d-aostaticspydebergpomorzeszowitdkongsbergponpesaro-urbino-pesarourbinopesaromasvuotnarusawapordenonepornporsangerporsangugeporsgrunnanyokoshibahikariwanumatakinouepoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrprimetelemarkongsvingerprincipeprivatizehealthinsuranceprofesionalprogressivestfoldpromombetsupplypropertyprotectionprotonetrentinosued-tirolprudentialpruszkowithgoogleapiszprvcyberprzeworskogpulawypunyufuelveruminamiuonumassa-carrara-massacarraramassabuyshousesopotrentino-sud-tirolpupugliapussycateringebuzentsujiiepvhadselfiphdfcbankazunoticiashinkamigototalpvtrentinosuedtirolpwchungnamdalseidsbergmodellingmxn--11b4c3dray-dnsupdaterpzqhaebaruericssongdalenviknakayamaoris-a-cubicle-slavellinodeobjectshinshinotsurfashionstorebaselburguidefinimamateramochizukimobetsumidatlantichirurgiens-dentistes-en-franceqldqotoyohashimotoshimatsuzakis-an-accountantshowtimelbourneqponiatowadaqslgbtrentinsud-tirolqualifioappippueblockbusternopilawaquickconnectrentinsudtirolquicksytesrhtrentinsued-tirolquipelementsrltunestuff-4-saletunkonsulatrobeebyteappigboatsmolaquilanxessmushcdn77-sslingturystykaniepcetuscanytushuissier-justicetuvalleaostaverntuxfamilytwmailvestvagoyvevelstadvibo-valentiavibovalentiavideovillastufftoread-booksnestorfjordvinnicasadelamonedagestangevinnytsiavipsinaappiwatevirginiavirtual-uservecounterstrikevirtualcloudvirtualservervirtualuserveexchangevirtuelvisakuhokksundviterbolognagasakikonaikawagoevivianvivolkenkundenvixn--42c2d9avlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavminanovologdanskonyveloftrentino-stirolvolvolkswagentstuttgartrentinsuedtirolvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiecircustomer-ocimmobilienwixsitewloclawekoobindalwmcloudwmflabsurnadalwoodsidelmenhorstabackyardsurreyworse-thandawowithyoutuberspacekitagawawpdevcloudwpenginepoweredwphostedmailwpmucdnpixolinodeusercontentrentinosudtirolwpmudevcdnaccessokanagawawritesthisblogoipizzawroclawiwatsukiyonoshiroomgwtcirclerkstagewtfastvps-serverisignwuozuwzmiuwajimaxn--4gbriminingxn--4it168dxn--4it797kooris-a-libertarianxn--4pvxs4allxn--54b7fta0ccivilaviationredumbrellajollamericanexpressexyxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49civilisationrenderxn--5rtq34koperviklabudhabikinokawachinaganoharamcocottempurlxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationthewifiatmallorcafederation-webspacexn--80aaa0cvacationsusonoxn--80adxhksuzakananiimiharuxn--80ao21axn--80aqecdr1axn--80asehdbarclays3-us-east-2xn--80aswgxn--80aukraanghkembuchikujobservableusercontentrevisohughestripperxn--8dbq2axn--8ltr62koryokamikawanehonbetsuwanouchijiwadeliveryxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisenbahnxn--90a3academiamicable-modemoneyxn--90aeroportalabamagasakishimabaraffleentry-snowplowiczeladzxn--90aishobarakawaharaoxn--90amckinseyxn--90azhytomyrxn--9dbhblg6dietritonxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexcloudxn--asky-iraxn--aurskog-hland-jnbarefootballooningjerstadgcapebretonamicrolightingjesdalombardiadembroideryonagunicloudiherokuappanamasteiermarkaracoldwarszawauthgearappspacehosted-by-previderxn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsuzukanazawaxn--bck1b9a5dre4civilwarmiasadoesntexisteingeekarpaczest-a-la-maisondre-landrayddns5yxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurgeryxn--bjddar-ptargithubpreviewsaitohmannore-og-uvdalxn--blt-elabourxn--bmlo-graingerxn--bod-2naturalsciencesnaturellesuzukis-an-actorxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-acornxn--brum-voagatroandinosaureportrentoyonakagyokutoyakomaganexn--btsfjord-9zaxn--bulsan-sdtirol-nsbaremetalpha-myqnapcloud9guacuiababia-goracleaningitpagexlimoldell-ogliastraderxn--c1avgxn--c2br7gxn--c3s14mincomcastreserve-onlinexn--cck2b3bargainstances3-us-gov-west-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-an-actresshwindmillxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694barreaudiblebesbydgoszczecinemagnethnologyoriikaragandauthordalandroiddnss3-ap-southeast-2ix4432-balsan-suedtirolimiteddnskinggfakefurniturecreationavuotnaritakoelnayorovigotsukisosakitahatakahatakaishimoichinosekigaharaurskog-holandingitlaborxn--czrs0trogstadxn--czru2dxn--czrw28barrel-of-knowledgeappgafanquanpachicappacificurussiautomotivelandds3-ca-central-16-balsan-sudtirollagdenesnaaseinet-freaks3-ap-southeast-123websiteleaf-south-123webseiteckidsmynasushiobarackmazerbaijan-mayen-rootaribeiraogakibichuobiramusementdllpages3-ap-south-123sitewebhareidfjordvagsoyerhcloudd-dnsiskinkyolasiteastcoastaldefenceastus2038xn--d1acj3barrell-of-knowledgecomputerhistoryofscience-fictionfabricafjs3-us-west-1xn--d1alfaromeoxn--d1atromsakegawaxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmailukowhitesnow-dnsangohtawaramotoineppubtlsanjotelulubin-brbambinagisobetsuitagajoburgjerdrumcprequalifymein-vigorgebetsukuibmdeveloperauniteroizumizakinderoyomitanobninskanzakiyokawaraustrheimatunduhrennebulsan-suedtirololitapunk123kotisivultrobjectselinogradimo-siemenscaledekaascolipiceno-ipifony-1337xn--eckvdtc9dxn--efvn9svalbardunloppaderbornxn--efvy88hagakhanamigawaxn--ehqz56nxn--elqq16hagebostadxn--eveni-0qa01gaxn--f6qx53axn--fct429kosakaerodromegallupaasdaburxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsvchurchaseljeepsondriodejaneirockyotobetsuliguriaxn--fiq64barsycenterprisesakievennodesadistcgrouplidlugolekagaminord-frontierxn--fiqs8sveioxn--fiqz9svelvikoninjambylxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbssvizzeraxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grandrapidsvn-repostorjcloud-ver-jpchowderxn--frna-woaraisaijosoyroroswedenxn--frya-hraxn--fzc2c9e2cleverappsannanxn--fzys8d69uvgmailxn--g2xx48clicketcloudcontrolapparmatsuuraxn--gckr3f0fauskedsmokorsetagayaseralingenoamishirasatogliattipschulserverxn--gecrj9clickrisinglesannohekinannestadraydnsanokaruizawaxn--ggaviika-8ya47haibarakitakamiizumisanofidelitysfjordxn--gildeskl-g0axn--givuotna-8yasakaiminatoyookaneyamazoexn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-an-anarchistoricalsocietysnesigdalxn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45br5cylxn--gnstigliefern-wobihirosakikamijimatsushigexn--h-2failxn--h1aeghair-surveillancexn--h1ahnxn--h1alizxn--h2breg3eveneswidnicasacampinagrandebungotakadaemongolianxn--h2brj9c8clinichippubetsuikilatironporterxn--h3cuzk1digickoseis-a-linux-usershoujis-a-knightpointtohoboleslawieconomiastalbanshizuokamogawaxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinewhampshirealtychyattorneyagawakuyabukihokumakogeniwaizumiotsurugimbalsfjordeportexaskoyabeagleboardetroitskypecorivneatonoshoes3-eu-west-3utilitiesquare7xn--hebda8basicserversaillesjabbottateshinanomachildrensgardenhlfanhsbc66xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-aptibleangaviikadenaamesjevuemielnoboribetsuckswidnikkolobrzegersundxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugithubusercontentromsojamisonxn--io0a7is-an-artistgstagexn--j1adpkomonotogawaxn--j1aefbsbxn--1lqs71dyndns-office-on-the-webhostingrpassagensavonarviikamiokameokamakurazakiwakunigamihamadaxn--j1ael8basilicataniautoscanadaeguambulancentralus-2xn--j1amhakatanorthflankddiamondshinshiroxn--j6w193gxn--jlq480n2rgxn--jlq61u9w7basketballfinanzgorzeleccodespotenzakopanewspaperxn--jlster-byasuokannamihokkaidopaaskvollxn--jrpeland-54axn--jvr189miniserversusakis-a-socialistg-builderxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45brj9cistrondheimperiaxn--koluokta-7ya57hakodatexn--kprw13dxn--kpry57dxn--kput3is-an-engineeringxn--krager-gyatominamibosogndalxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdevcloudfunctionsimplesitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanoyakagexn--kvnangen-k0axn--l-1fairwindswiebodzin-dslattuminamiyamashirokawanabeepilepsykkylvenicexn--l1accentureklamborghinikolaeventswinoujscienceandhistoryxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52batochigifts3-us-west-2xn--lesund-huaxn--lgbbat1ad8jdfaststackschulplattformetacentrumeteorappassenger-associationxn--lgrd-poacctrusteexn--lhppi-xqaxn--linds-pramericanartrvestnestudioxn--lns-qlavagiskexn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacliniquedapliexn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddeswisstpetersburgxn--mgb9awbfbx-ostrowwlkpmguitarschwarzgwangjuifminamidaitomanchesterxn--mgba3a3ejtrycloudflarevistaplestudynamic-dnsrvaroyxn--mgba3a4f16axn--mgba3a4fra1-deloittevaksdalxn--mgba7c0bbn0axn--mgbaakc7dvfstdlibestadxn--mgbaam7a8hakonexn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscordsays3-website-ap-northeast-1xn--mgbai9azgqp6jejuniperxn--mgbayh7gpalmaseratis-an-entertainerxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskosherbrookegawaxn--mgbqly7c0a67fbclintonkotsukubankarumaifarmsteadrobaknoluoktachikawakayamadridvallee-aosteroyxn--mgbqly7cvafr-1xn--mgbt3dhdxn--mgbtf8flapymntrysiljanxn--mgbtx2bauhauspostman-echocolatemasekd1xn--mgbx4cd0abbvieeexn--mix082fbxoschweizxn--mix891fedorainfraclouderaxn--mjndalen-64axn--mk0axin-vpnclothingdustdatadetectjmaxxxn--12c1fe0bradescotlandrrxn--mk1bu44cn-northwest-1xn--mkru45is-bykleclerchoshibuyachiyodancexn--mlatvuopmi-s4axn--mli-tlavangenxn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-certifiedxn--mosjen-eyawaraxn--mot-tlazioxn--mre-og-romsdal-qqbuseranishiaritakurashikis-foundationxn--msy-ula0hakubaghdadultravelchannelxn--mtta-vrjjat-k7aflakstadaokagakicks-assnasaarlandxn--muost-0qaxn--mxtq1minisitexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45q11citadelhicampinashikiminohostfoldnavyxn--nit225koshimizumakiyosunnydayxn--nmesjevuemie-tcbalestrandabergamoarekeymachineustarnbergxn--nnx388axn--nodessakyotanabelaudiopsysynology-dstreamlitappittsburghofficialxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveftplanetariuminamitanexn--nvuotna-hwaxn--nyqy26axn--o1achernihivgubsxn--o3cw4hakuis-a-democratravelersinsurancexn--o3cyx2axn--od0algxn--od0aq3belementorayoshiokanumazuryukuhashimojibxos3-website-ap-southeast-1xn--ogbpf8flatangerxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--otu796dxn--p1acfedorapeoplegoismailillehammerfeste-ipatriaxn--p1ais-gonexn--pgbs0dhlx3xn--porsgu-sta26fedoraprojectoyotsukaidoxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cngreaterxn--qcka1pmcpenzaporizhzhiaxn--qqqt11minnesotaketakayamassivegridxn--qxa6axn--qxamsterdamnserverbaniaxn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabaris-into-animeetrentin-sued-tirolxn--rennesy-v1axn--rhkkervju-01afeiraquarelleasingujaratoyouraxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hakusanagochihayaakasakawaiishopitsitexn--rovu88bellevuelosangeles3-website-ap-southeast-2xn--rros-granvindafjordxn--rskog-uuaxn--rst-0naturhistorischesxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithaldenxn--s9brj9cnpyatigorskolecznagatorodoyxn--sandnessjen-ogbellunord-odalombardyn53xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4dbgdty6citichernovtsyncloudrangedaluccarbonia-iglesias-carboniaiglesiascarboniaxn--skierv-utazasxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5natuurwetenschappenginexn--slt-elabcieszynh-servebeero-stageiseiroumuenchencoreapigeelvinckoshunantankmpspawnextdirectrentino-s-tirolxn--smla-hraxn--smna-gratangentlentapisa-geekosugexn--snase-nraxn--sndre-land-0cbeneventochiokinoshimaintenancebinordreisa-hockeynutazurestaticappspaceusercontentateyamaveroykenglandeltaitogitsumitakagiizeasypanelblagrarchaeologyeongbuk0emmafann-arboretumbriamallamaceiobbcg123homepagefrontappchizip61123minsidaarborteaches-yogasawaracingroks-theatree123hjemmesidealerimo-i-rana4u2-localhistorybolzano-altoadigeometre-experts-comptables3-ap-northeast-123miwebcambridgehirn4t3l3p0rtarumizusawabogadobeaemcloud-fr123paginaweberkeleyokosukanrabruzzombieidskoguchikushinonsenasakuchinotsuchiurakawafaicloudineat-url-o-g-i-naval-d-aosta-valleyokote164-b-datacentermezproxyzgoraetnabudejjudaicadaquest-mon-blogueurodirumaceratabuseating-organicbcn-north-123saitamakawabartheshopencraftrainingdyniajuedischesapeakebayernavigationavoi234lima-cityeats3-ap-northeast-20001wwwedeployokozeastasiamunemurorangecloudplatform0xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyurihonjournalistjohnikonanporovnobserverxn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bulls-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbeppublishproxyusuharavocatanzarowegroweiboltashkentatamotorsitestingivingjemnes3-eu-central-1kappleadpages-12hpalmspringsakerxn--stre-toten-zcbeskidyn-ip24xn--t60b56axn--tckweddingxn--tiq49xqyjelasticbeanstalkhmelnitskiyamarumorimachidaxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbestbuyshoparenagareyamaizurugbyenvironmentalconservationflashdrivefsnillfjordiscordsezjampaleoceanographics3-website-eu-west-1xn--trentin-sdtirol-7vbetainaboxfuseekloges3-website-sa-east-1xn--trentino-sd-tirol-c3bhzcasertainaioirasebastopologyeongnamegawafflecellclstagemologicaliforniavoues3-eu-west-1xn--trentino-sdtirol-szbielawalbrzycharitypedreamhostersvp4xn--trentinosd-tirol-rzbiellaakesvuemieleccebizenakanotoddeninoheguriitatebayashiibahcavuotnagaivuotnagaokakyotambabybluebitelevisioncilla-speziaxarnetbank8s3-eu-west-2xn--trentinosdtirol-7vbieszczadygeyachimataijiiyamanouchikuhokuryugasakitaurayasudaxn--trentinsd-tirol-6vbievat-band-campaignieznombrendlyngengerdalces3-website-us-east-1xn--trentinsdtirol-nsbifukagawalesundiscountypeformelhusgardeninomiyakonojorpelandiscourses3-website-us-west-1xn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvestre-slidrexn--uc0ay4axn--uist22halsakakinokiaxn--uisz3gxn--unjrga-rtarnobrzegyptianxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtularvikonskowolayangroupiemontexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccerxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbigvalledaostaobaomoriguchiharag-cloud-championshiphoplixboxenirasakincheonishiazaindependent-commissionishigouvicasinordeste-idclkarasjohkamikitayamatsurindependent-inquest-a-la-masionishiharaxn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-qoa0jelenia-goraxn--vgu402cnsantabarbaraxn--vhquvestre-totennishiawakuraxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861biharstadotsubetsugaruhrxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cntjomeldaluroyxn--wgbl6axn--xhq521bihorologyusuisservegame-serverxn--xkc2al3hye2axn--xkc2dl3a5ee0hammarfeastafricaravantaaxn--y9a3aquariumintereitrentino-sudtirolxn--yer-znaumburgxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4dbrk0cexn--ystre-slidre-ujbikedaejeonbukarasjokarasuyamarriottatsunoceanographiquehimejindependent-inquiryuufcfanishiizunazukindependent-panelomoliseminemrxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bilbaogashimadachicagoboavistanbulsan-sudtirolbia-tempio-olbiatempioolbialystokkeliwebredirectme-south-1xnbayxz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go index 7caeeaa696d..d56e9e76244 100644 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -101,10 +101,10 @@ loop: break } - u := uint32(nodeValue(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) + u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) icannNode = u&(1<>= nodesBitsICANN - u = children[u&(1<>= childrenBitsLo hi = u & (1<>= nodesBitsTextLength offset := x & (1< Date: Mon, 19 Dec 2022 16:36:49 -0800 Subject: [PATCH 023/133] Bump github.com/golang-migrate/migrate/v4 from 4.7.0 to 4.15.2 (#5053) Bumps [github.com/golang-migrate/migrate/v4](https://github.com/golang-migrate/migrate) from 4.7.0 to 4.15.2. - [Release notes](https://github.com/golang-migrate/migrate/releases) - [Changelog](https://github.com/golang-migrate/migrate/blob/master/.goreleaser.yml) - [Commits](https://github.com/golang-migrate/migrate/compare/v4.7.0...v4.15.2) --- updated-dependencies: - dependency-name: github.com/golang-migrate/migrate/v4 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 4 +- go.sum | 921 +++++++++++++++++- .../golang-migrate/migrate/v4/.dockerignore | 1 - .../golang-migrate/migrate/v4/.gitignore | 2 + .../golang-migrate/migrate/v4/.golangci.yml | 7 +- .../golang-migrate/migrate/v4/.goreleaser.yml | 102 ++ .../golang-migrate/migrate/v4/.travis.yml | 25 +- .../golang-migrate/migrate/v4/Dockerfile | 23 +- .../migrate/v4/Dockerfile.circleci | 17 + .../migrate/v4/Dockerfile.github-actions | 11 + .../golang-migrate/migrate/v4/FAQ.md | 9 +- .../migrate/v4/GETTING_STARTED.md | 12 +- .../golang-migrate/migrate/v4/Makefile | 27 +- .../golang-migrate/migrate/v4/README.md | 29 +- .../migrate/v4/database/driver.go | 7 +- .../migrate/v4/database/multistmt/parse.go | 46 + .../migrate/v4/database/postgres/README.md | 11 + .../migrate/v4/database/postgres/TUTORIAL.md | 27 +- .../migrate/v4/database/postgres/postgres.go | 244 +++-- .../migrate/v4/database/util.go | 14 + .../golang-migrate/migrate/v4/migrate.go | 25 +- .../migrate/v4/source/driver.go | 4 +- .../migrate/v4/source/errors.go | 15 + .../migrate/v4/source/file/file.go | 105 +- .../migrate/v4/source/iofs/README.md | 3 + .../migrate/v4/source/iofs/doc.go | 10 + .../migrate/v4/source/iofs/iofs.go | 176 ++++ .../testdata/migrations/1_foobar.down.sql | 1 + .../iofs/testdata/migrations/1_foobar.up.sql | 1 + .../iofs/testdata/migrations/3_foobar.up.sql | 1 + .../testdata/migrations/4_foobar.down.sql | 1 + .../iofs/testdata/migrations/4_foobar.up.sql | 1 + .../testdata/migrations/5_foobar.down.sql | 1 + .../testdata/migrations/7_foobar.down.sql | 1 + .../iofs/testdata/migrations/7_foobar.up.sql | 1 + .../migrate/v4/source/migration.go | 18 +- .../golang-migrate/migrate/v4/util.go | 2 +- vendor/github.com/pkg/browser/browser.go | 10 +- .../github.com/pkg/browser/browser_darwin.go | 4 - .../github.com/pkg/browser/browser_freebsd.go | 2 - .../github.com/pkg/browser/browser_linux.go | 2 - .../github.com/pkg/browser/browser_netbsd.go | 14 + .../github.com/pkg/browser/browser_openbsd.go | 2 - .../pkg/browser/browser_unsupported.go | 5 +- .../github.com/pkg/browser/browser_windows.go | 10 +- .../pkg/browser/zbrowser_windows.go | 76 -- vendor/modules.txt | 8 +- 47 files changed, 1656 insertions(+), 382 deletions(-) create mode 100644 vendor/github.com/golang-migrate/migrate/v4/.goreleaser.yml create mode 100644 vendor/github.com/golang-migrate/migrate/v4/Dockerfile.circleci create mode 100644 vendor/github.com/golang-migrate/migrate/v4/Dockerfile.github-actions create mode 100644 vendor/github.com/golang-migrate/migrate/v4/database/multistmt/parse.go create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/errors.go create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/README.md create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/doc.go create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/iofs.go create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.down.sql create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.up.sql create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/3_foobar.up.sql create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.down.sql create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.up.sql create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/5_foobar.down.sql create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.down.sql create mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.up.sql create mode 100644 vendor/github.com/pkg/browser/browser_netbsd.go delete mode 100644 vendor/github.com/pkg/browser/zbrowser_windows.go diff --git a/go.mod b/go.mod index bf512659505..2aff2097d0b 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/go-redis/redis/v8 v8.11.5 github.com/gogo/protobuf v1.3.2 github.com/gogo/status v1.1.1 - github.com/golang-migrate/migrate/v4 v4.7.0 + github.com/golang-migrate/migrate/v4 v4.15.2 github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/gorilla/mux v1.8.0 @@ -167,7 +167,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/ncw/swift v1.0.53 // indirect github.com/oklog/run v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.7.3 // indirect diff --git a/go.sum b/go.sum index 36e9dd3e744..7a9f8f39798 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -27,6 +28,7 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= @@ -47,12 +49,14 @@ cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9U cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -63,6 +67,10 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= @@ -72,20 +80,38 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5 github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= @@ -93,12 +119,39 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 h1:PPfYWScYacO3Q6JMCLkyh6Ea2Q/REDTMgmiTAeiV8Jg= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -112,15 +165,20 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.23.1 h1:jR6wZggBxwWygeXcdNyguCOCIjPsZyNUNlAkTx2fu0U= github.com/alicebob/miniredis/v2 v2.23.1/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= @@ -128,10 +186,12 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -139,111 +199,293 @@ github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4 github.com/aws/aws-sdk-go v1.44.109 h1:+Na5JPeS0kiEHoBp5Umcuuf+IDqXqD0lXnM920E31YI= github.com/aws/aws-sdk-go v1.44.109/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/config v1.15.1 h1:hTIZFepYESYyowQUBo47lu69WSxsYqGUILY9Nu8+7pY= github.com/aws/aws-sdk-go-v2/config v1.15.1/go.mod h1:MZHGbuW2WnqIOQQBKu2ZkhTjuutZSTnn56TDq4QyydE= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/credentials v1.11.0 h1:gc4Uhs80s60nmLon5Z4JXWinX2BkAGT0YROoUT8h8U4= github.com/aws/aws-sdk-go-v2/credentials v1.11.0/go.mod h1:EdV1ZFgtZ4XM5RDHWcRWK8H+xW5duNVBqWj2oLu7tRo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1 h1:F9Je1nq5YXfMOv6451NHvMf6U0iTWeMnsG0MMIQoUmk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1/go.mod h1:Yph0XsTbQ5GGZ2+mO1a03P/SO9fdX3t1nejIp2tq79g= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7 h1:KUErSJgdqmqAPBWAp6Zx9CjL0YXfytXJeXcsWnuCM1c= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7/go.mod h1:oB9nZcxH1cGq7NPGurVJwxrO2vmJ9mmEBayCwcAlmT8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1 h1:feVfa9eJonhJiss7g51ikjNB2DrUzbNZNvPL8pw/54k= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1/go.mod h1:K4vz7lRYCyLYpYAMCLObODahFgARdD3YVa0MvQte9Co= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 h1:adr3PfiggFtqgFofAMUFCtdvwzpf3QxPES4ezK4M3iI= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8/go.mod h1:wLbQYt36AJqaRZUQiCNXzbtkNigyPfKHrotHuIDiCy8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1 h1:B/SPX7J+Y0Yrcjv60Nhbh1gC2uBN47SfN8JYre6Mp4M= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1/go.mod h1:2Hhr9Eh1gJzDatwACX/ozAZ/ljq5vzvPRu5cdu25tzc= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sso v1.11.1 h1:DyHctRsJIAWIvom1Itb4T84D2jwpIu+KIi3d0SFaswg= github.com/aws/aws-sdk-go-v2/service/sso v1.11.1/go.mod h1:CvFTucADIx7U/M44vjLs/ZttpQHdpxwK+62+dUGhDeY= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1LceH2lZKMUGZJKiZiM= github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/baidubce/bce-sdk-go v0.9.111 h1:yGgtPpZYUZW4uoVorQ4xnuEgVeddACydlcJKW87MDV4= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= -github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= -github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= -github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= -github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= -github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= -github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= +github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= github.com/digitalocean/godo v1.84.1 h1:VgPsuxhrO9pUygvij6qOhqXfAkxAsDZYRpmjSDMEaHo= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -257,11 +499,19 @@ github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 h1:rydBwnBoywKQMjWF0z8SriYtQ+uUcaFsxuijMjJr5PI= github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4/go.mod h1:kQa0V74HNYMfuJH6jiPiwNdpWXl4xd/K4tzlrcvYDQI= github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -274,17 +524,35 @@ github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRe github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -293,14 +561,21 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= @@ -309,9 +584,13 @@ github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= @@ -325,7 +604,9 @@ github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrC github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= @@ -340,9 +621,10 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -360,6 +642,7 @@ github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= @@ -369,17 +652,26 @@ github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGt github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -389,14 +681,18 @@ github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= -github.com/golang-migrate/migrate/v4 v4.7.0 h1:gONcHxHApDTKXDyLH/H97gEHmpu1zcnnbAaq2zgrPrs= -github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= +github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= +github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -410,6 +706,7 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -437,7 +734,9 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -454,11 +753,15 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -487,6 +790,7 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= @@ -506,18 +810,25 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b h1:UlCBLaqvS4wVYNrMKSfqTBVsed/EOY9dnenhYZMUnrA= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -525,6 +836,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 h1:guQyUpELu4I0wKgdsRBZDA5blfGiUleuppRSVy9Qbi0= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= @@ -532,13 +844,16 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQu github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.17.0 h1:aqytbw31uCPNn37ST+717IyGod+P1eTgSGu3yjRo4bs= github.com/hashicorp/consul/api v1.17.0/go.mod h1:ZNwemOPAdgtV4cCx9fqxNmw+PI3vliW6gYin2WD+F2g= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -555,6 +870,7 @@ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -580,6 +896,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= @@ -590,24 +907,82 @@ github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfE github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ionos-cloud/sdk-go/v6 v6.0.4 h1:4LoWeM7WtcDqYDjlntqQ3fD6XaENlCw2YqiVWkHQbNA= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -626,14 +1001,23 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= @@ -652,10 +1036,11 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= +github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= @@ -665,50 +1050,77 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v1.9.1 h1:29UpEPpYcGFnbwiJW8mbk/bjBZpgd/pv68io2IKTo34= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs= github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -721,13 +1133,26 @@ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTS github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -737,13 +1162,18 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= @@ -752,10 +1182,14 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= @@ -763,16 +1197,61 @@ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DV github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -795,28 +1274,43 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= +github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw= github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod h1:25h+Uz1WvXDBZYwqGX8PAb71RBkcjxEVV/R5wGnsq4I= @@ -826,6 +1320,7 @@ github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -834,13 +1329,18 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= @@ -850,19 +1350,28 @@ github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQY github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.3 h1:IYBn0CTGi/nYxstdTUKysuSofUNJ3DQW3FmZ/Ub6rgU= github.com/prometheus/exporter-toolkit v0.7.3/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/prometheus v0.39.1 h1:abZM6A+sKAv2eKTbRIaHq4amM/nT07MuxRm0+QTaTj0= github.com/prometheus/prometheus v0.39.1/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -872,65 +1381,109 @@ github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZV github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= @@ -940,34 +1493,52 @@ github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 h1:NRu4DwMAHgI github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5/go.mod h1:2bDlu7xW2TXrSt/TGBozMMVECjjrnqoLJKc/ZIiPngA= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= -github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae h1:Z8YibUpdBEdCq8nwrYXJQ8vYooevbmEBIdFpseXK3/8= github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae/go.mod h1:YfOOLoW1Q/jIIu0WLeSwgStmrKjuJEZSKTAUc+0KFvE= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -977,24 +1548,40 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A= go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1005,6 +1592,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 h1:qZ3KzA4qPzLBDtQyPk4ydjlg8zvXbNysnFHaVMKJbVo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0/go.mod h1:14Oo79mRwusSI02L0EfG3Gp1uF3+1wSL+D4zDysxyqs= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= @@ -1017,63 +1608,95 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 h1:edJTgwezAtLKUINAXfjxllJ go.opentelemetry.io/contrib/propagators/jaeger v1.9.0/go.mod h1:Q/AXutvrBTfEDSeRLwOmKhyviX5adJvTesg6JFTybYg= go.opentelemetry.io/contrib/propagators/ot v1.9.0 h1:+pYoqyFoA3H6EZ7Wie2ZQdqS4ZfG42PAGvj3eLUukHE= go.opentelemetry.io/contrib/propagators/ot v1.9.0/go.mod h1:D2GfaecHHX67fXT93/5iKl2DArjt8+H0XWtFD8b4Z+k= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 h1:jrqVQNRKglMRBUrSf40J5mBYnUyleAKL+g0iuyEyaGA= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1/go.mod h1:zZWeMK8RgtSJddl2Oox9MfjMRgD/HqzuLujCFdF0CZU= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.32.0 h1:lh5KMDB8xlMM4kwE38vlZJ3rZeiWrjw3As1vclfC01k= go.opentelemetry.io/otel/metric v0.32.0/go.mod h1:PVDNTt297p8ehm949jsIzd+Z2bIZJYQQG/uuHTeWFHY= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= @@ -1085,6 +1708,7 @@ golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -1094,6 +1718,12 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1117,11 +1747,14 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1130,19 +1763,24 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1153,13 +1791,16 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1167,12 +1808,21 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1183,10 +1833,10 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1223,6 +1873,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1230,7 +1881,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1239,28 +1889,47 @@ golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1269,47 +1938,77 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1323,6 +2022,8 @@ golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1340,11 +2041,17 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 h1:yuLAip3bfURHClMG9VBdzPrQvCWjWiWUTBGV+/fCbUs= golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1356,19 +2063,25 @@ golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1389,17 +2102,22 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1413,6 +2131,8 @@ golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyj golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1423,10 +2143,12 @@ golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1457,6 +2179,7 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= @@ -1468,6 +2191,7 @@ google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3 google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.97.0 h1:x/vEL1XDF/2V4xzdNgFPaKHluRESo2aTsL7QzHnBtGQ= google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1477,13 +2201,14 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -1493,6 +2218,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1507,12 +2233,14 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1529,6 +2257,7 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= @@ -1541,15 +2270,20 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -1583,22 +2317,32 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1609,6 +2353,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1619,7 +2364,13 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= +gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1627,18 +2378,94 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= k8s.io/api v0.25.1 h1:yL7du50yc93k17nH/Xe9jujAYrcDkI/i5DL1jPz4E3M= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= k8s.io/client-go v0.25.1 h1:uFj4AJKtE1/ckcSKz8IhgAuZTdRXZDKev8g387ndD58= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.0 h1:lyJt0TWMPaGoODa8B8bUuxgHS3W/m/bNr2cca3brA/g= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/golang-migrate/migrate/v4/.dockerignore b/vendor/github.com/golang-migrate/migrate/v4/.dockerignore index df33687f971..f12dc01d311 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/.dockerignore +++ b/vendor/github.com/golang-migrate/migrate/v4/.dockerignore @@ -2,7 +2,6 @@ FAQ.md README.md LICENSE -Makefile .gitignore .travis.yml CONTRIBUTING.md diff --git a/vendor/github.com/golang-migrate/migrate/v4/.gitignore b/vendor/github.com/golang-migrate/migrate/v4/.gitignore index acb4088f28c..23b56045c6e 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/.gitignore +++ b/vendor/github.com/golang-migrate/migrate/v4/.gitignore @@ -6,3 +6,5 @@ cli/migrate .godoc.pid vendor/ .vscode/ +.idea +dist/ diff --git a/vendor/github.com/golang-migrate/migrate/v4/.golangci.yml b/vendor/github.com/golang-migrate/migrate/v4/.golangci.yml index f447ee1ce08..0419e32befb 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/.golangci.yml +++ b/vendor/github.com/golang-migrate/migrate/v4/.golangci.yml @@ -1,6 +1,6 @@ run: # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 2m + timeout: 5m linters: enable: #- golint @@ -10,7 +10,6 @@ linters: - goconst - gofmt - misspell - - maligned - unparam - nakedret - prealloc @@ -19,8 +18,8 @@ linters-settings: misspell: locale: US issues: - max-same: 0 - max-per-linter: 0 + max-same-issues: 0 + max-issues-per-linter: 0 exclude-use-default: false exclude: # gosec: Duplicated errcheck checks diff --git a/vendor/github.com/golang-migrate/migrate/v4/.goreleaser.yml b/vendor/github.com/golang-migrate/migrate/v4/.goreleaser.yml new file mode 100644 index 00000000000..682248f65a4 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/.goreleaser.yml @@ -0,0 +1,102 @@ +project_name: migrate +before: + hooks: + - go mod tidy +builds: + - env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm + - arm64 + - 386 + goarm: + - 7 + main: ./cmd/migrate + ldflags: + - '-w -s -X main.Version={{ .Version }} -extldflags "static"' + flags: + - "-tags={{ .Env.DATABASE }} {{ .Env.SOURCE }}" + - "-trimpath" +nfpms: + - homepage: "https://github.com/golang-migrate/migrate" + maintainer: "dhui@users.noreply.github.com" + license: MIT + description: "Database migrations" + formats: + - deb + file_name_template: "{{ .ProjectName }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" +dockers: + - goos: linux + goarch: amd64 + dockerfile: Dockerfile.github-actions + use: buildx + ids: + - migrate + image_templates: + - 'migrate/migrate:{{ .Tag }}-amd64' + build_flag_templates: + - '--label=org.opencontainers.image.created={{ .Date }}' + - '--label=org.opencontainers.image.title={{ .ProjectName }}' + - '--label=org.opencontainers.image.revision={{ .FullCommit }}' + - '--label=org.opencontainers.image.version={{ .Version }}' + - "--label=org.opencontainers.image.source={{ .GitURL }}" + - "--platform=linux/amd64" + - goos: linux + goarch: arm64 + dockerfile: Dockerfile.github-actions + use: buildx + ids: + - migrate + image_templates: + - 'migrate/migrate:{{ .Tag }}-arm64' + build_flag_templates: + - '--label=org.opencontainers.image.created={{ .Date }}' + - '--label=org.opencontainers.image.title={{ .ProjectName }}' + - '--label=org.opencontainers.image.revision={{ .FullCommit }}' + - '--label=org.opencontainers.image.version={{ .Version }}' + - "--label=org.opencontainers.image.source={{ .GitURL }}" + - "--platform=linux/arm64" + +docker_manifests: +- name_template: 'migrate/migrate:{{ .Tag }}' + image_templates: + - 'migrate/migrate:{{ .Tag }}-amd64' + - 'migrate/migrate:{{ .Tag }}-arm64' +- name_template: 'migrate/migrate:{{ .Major }}' + image_templates: + - 'migrate/migrate:{{ .Tag }}-amd64' + - 'migrate/migrate:{{ .Tag }}-arm64' +- name_template: 'migrate/migrate:latest' + image_templates: + - 'migrate/migrate:{{ .Tag }}-amd64' + - 'migrate/migrate:{{ .Tag }}-arm64' +archives: + - name_template: "{{ .ProjectName }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip +checksum: + name_template: 'sha256sum.txt' +release: + draft: true + prerelease: auto +source: + enabled: true + format: zip +changelog: + skip: false + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + - Merge pull request + - Merge branch + - go mod tidy +snapshot: + name_template: "{{ .Tag }}-next" diff --git a/vendor/github.com/golang-migrate/migrate/v4/.travis.yml b/vendor/github.com/golang-migrate/migrate/v4/.travis.yml index 72781d45745..fdaea8cbd83 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/.travis.yml +++ b/vendor/github.com/golang-migrate/migrate/v4/.travis.yml @@ -6,8 +6,8 @@ matrix: - go: master include: # Supported versions of Go: https://golang.org/dl/ - - go: "1.12.x" - - go: "1.13.x" + - go: "1.14.x" + - go: "1.15.x" - go: master go_import_path: github.com/golang-migrate/migrate @@ -34,7 +34,7 @@ before_install: - sudo apt-get update - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce # Install golangci-lint - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.20.0 + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 - echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}" install: @@ -57,13 +57,16 @@ deploy: secure: hWH1HLPpzpfA8pXQ93T1qKQVFSpQp0as/JLQ7D91jHuJ8p+RxVeqblDrR6HQY/95R/nyiE9GJmvUolSuw5h449LSrGxPtVWhdh6EnkxlQHlen5XeMhVjRjFV0sE9qGe8v7uAkiTfRO61ktTWHrEAvw5qpyqnNISodmZS78XIasPODQbNlzwINhWhDTHIjXGb4FpizYaL3OGCanrxfR9fQyCaqKGGBjRq3Mfq8U6Yd4mApmsE+uJxgaZV8K5zBqpkSzQRWhcVGNL5DuLsU3gfSJOo7kZeA2G71SHffH577dBoqtCZ4VFv169CoUZehLWCb+7XKJZmHXVujCURATSySLGUOPc6EoLFAn3YtsCA04mS4bZVo5FZPWVwfhjmkhtDR4f6wscKp7r1HsFHSOgm59QfETQdrn4MnZ44H2Jd39axqndn5DvK9EcZVjPHynOPnueXP2u6mTuUgh2VyyWBCDO3CNo0fGlo7VJI69IkIWNSD87K9cHZWYMClyKZkUzS+PmRAhHRYbVd+9ZjKOmnU36kUHNDG/ft1D4ogsY+rhVtXB4lgWDM5adri+EIScYdYnB1/pQexLBigcJY9uE7nQTR0U6QgVNYvun7uRNs40E0c4voSfmPdFO0FlOD2y1oQhnaXfWLbu9nMcTcs4RFGrcC7NzkUN4/WjG8s285V6w= skip_cleanup: true on: - go: "1.13.x" + go: "1.15.x" repo: golang-migrate/migrate tags: true file: - cli/build/migrate.linux-amd64.tar.gz + - cli/build/migrate.linux-armv7.tar.gz + - cli/build/migrate.linux-arm64.tar.gz - cli/build/migrate.darwin-amd64.tar.gz - cli/build/migrate.windows-amd64.exe.tar.gz + - cli/build/migrate.windows-386.exe.tar.gz - cli/build/sha256sum.txt - dependency_tree.txt - provider: packagecloud @@ -75,7 +78,7 @@ deploy: package_glob: '*.deb' skip_cleanup: true on: - go: "1.13.x" + go: "1.15.x" repo: golang-migrate/migrate tags: true - provider: packagecloud @@ -87,7 +90,7 @@ deploy: package_glob: '*.deb' skip_cleanup: true on: - go: "1.13.x" + go: "1.15.x" repo: golang-migrate/migrate tags: true - provider: packagecloud @@ -95,11 +98,11 @@ deploy: username: golang-migrate token: secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o= - dist: ubuntu/cosmic + dist: ubuntu/focal package_glob: '*.deb' skip_cleanup: true on: - go: "1.13.x" + go: "1.15.x" repo: golang-migrate/migrate tags: true - provider: packagecloud @@ -111,7 +114,7 @@ deploy: package_glob: '*.deb' skip_cleanup: true on: - go: "1.13.x" + go: "1.15.x" repo: golang-migrate/migrate tags: true - provider: packagecloud @@ -123,13 +126,13 @@ deploy: package_glob: '*.deb' skip_cleanup: true on: - go: "1.13.x" + go: "1.15.x" repo: golang-migrate/migrate tags: true - provider: script script: ./docker-deploy.sh skip_cleanup: true on: - go: "1.13.x" + go: "1.15.x" repo: golang-migrate/migrate tags: true diff --git a/vendor/github.com/golang-migrate/migrate/v4/Dockerfile b/vendor/github.com/golang-migrate/migrate/v4/Dockerfile index cfab3553750..eb818821188 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/Dockerfile +++ b/vendor/github.com/golang-migrate/migrate/v4/Dockerfile @@ -1,23 +1,26 @@ -FROM golang:1.12-alpine3.10 AS downloader +FROM golang:1.16-alpine3.13 AS builder ARG VERSION -RUN apk add --no-cache git gcc musl-dev +RUN apk add --no-cache git gcc musl-dev make WORKDIR /go/src/github.com/golang-migrate/migrate -COPY . ./ - ENV GO111MODULE=on -ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver firebird" -ENV SOURCES="file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab" -RUN go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate +COPY go.mod go.sum ./ + +RUN go mod download + +COPY . ./ + +RUN make build-docker -FROM alpine:3.10 +FROM alpine:3.13 RUN apk add --no-cache ca-certificates -COPY --from=downloader /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /migrate +COPY --from=builder /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /usr/local/bin/migrate +RUN ln -s /usr/local/bin/migrate /migrate -ENTRYPOINT ["/migrate"] +ENTRYPOINT ["migrate"] CMD ["--help"] diff --git a/vendor/github.com/golang-migrate/migrate/v4/Dockerfile.circleci b/vendor/github.com/golang-migrate/migrate/v4/Dockerfile.circleci new file mode 100644 index 00000000000..b6b244d1964 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/Dockerfile.circleci @@ -0,0 +1,17 @@ +ARG DOCKER_IMAGE +FROM $DOCKER_IMAGE + +RUN apk add --no-cache git gcc musl-dev make + +WORKDIR /go/src/github.com/golang-migrate/migrate + +ENV GO111MODULE=on +ENV COVERAGE_DIR=/tmp/coverage + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY . ./ + +CMD ["make", "test"] diff --git a/vendor/github.com/golang-migrate/migrate/v4/Dockerfile.github-actions b/vendor/github.com/golang-migrate/migrate/v4/Dockerfile.github-actions new file mode 100644 index 00000000000..559c1e79c41 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/Dockerfile.github-actions @@ -0,0 +1,11 @@ +FROM alpine:3.13 + +RUN apk add --no-cache ca-certificates + +COPY migrate /usr/local/bin/migrate + +RUN ln -s /usr/local/bin/migrate /usr/bin/migrate +RUN ln -s /usr/local/bin/migrate /migrate + +ENTRYPOINT ["migrate"] +CMD ["--help"] \ No newline at end of file diff --git a/vendor/github.com/golang-migrate/migrate/v4/FAQ.md b/vendor/github.com/golang-migrate/migrate/v4/FAQ.md index 82ae4bf2106..28316281968 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/FAQ.md +++ b/vendor/github.com/golang-migrate/migrate/v4/FAQ.md @@ -16,7 +16,7 @@ NilMigration defines a migration without a body. NilVersion is defined as const -1. #### What is the difference between uint(version) and int(targetVersion)? - version refers to an existing migration version coming from a source and therefor can never be negative. + version refers to an existing migration version coming from a source and therefore can never be negative. targetVersion can either be a version OR represent a NilVersion, which equals -1. #### What's the difference between Next/Previous and Up/Down? @@ -53,7 +53,7 @@ Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though. The driver's functionality is dictated by migrate's interfaces. That means there should really just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing, - just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the + just implemented a bit differently, co-exist somewhere on GitHub. If users have to do research first to find the "best" available driver for a database in order to get started, we would have failed as an open source community. #### Can I mix multiple sources during a batch of migrations? @@ -73,4 +73,7 @@ Database-specific locking features are used by *some* database drivers to preven No, it is done automatically. #### Can I use migrate with a non-Go project? -Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework. \ No newline at end of file +Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework. + +#### I have got an error `Dirty database version 1. Fix and force version`. What should I do? +Keep calm and refer to [the getting started docs](GETTING_STARTED.md#forcing-your-database-version). diff --git a/vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md b/vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md index 41f293cc776..5946005cd23 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md +++ b/vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md @@ -23,7 +23,7 @@ This way if one of commands fails, our database will remain unchanged. Run your migrations through the CLI or your app and check if they applied expected changes. Just to give you an idea: ``` -migrate -database YOUR_DATBASE_URL -path PATH_TO_YOUR_MIGRATIONS up +migrate -database YOUR_DATABASE_URL -path PATH_TO_YOUR_MIGRATIONS up ``` Just add the code to your app and you're ready to go! @@ -34,6 +34,16 @@ It's also worth checking your migrations in a separate, containerized environmen **IMPORTANT:** If you would like to run multiple instances of your app on different machines be sure to use a database that supports locking when running migrations. Otherwise you may encounter issues. +## Forcing your database version +In case you run a migration that contained an error, migrate will not let you run other migrations on the same database. You will see an error like `Dirty database version 1. Fix and force version`, even when you fix the erred migration. This means your database was marked as 'dirty'. +You need to investigate the migration error - was your migration applied partially, or was it not applied at all? Once you know, you should force your database to a version reflecting it's real state. You can do so with `force` command: +``` +migrate -path PATH_TO_YOUR_MIGRATIONS -database YOUR_DATABASE_URL force VERSION +``` +Once you force the version and your migration was fixed, your database is 'clean' again and you can proceed with your migrations. + +For details and example of usage see [this comment](https://github.com/golang-migrate/migrate/issues/282#issuecomment-530743258). + ## Further reading: - [PostgreSQL tutorial](database/postgres/TUTORIAL.md) - [Best practices](MIGRATIONS.md) diff --git a/vendor/github.com/golang-migrate/migrate/v4/Makefile b/vendor/github.com/golang-migrate/migrate/v4/Makefile index 291c179cc7d..79fa85ffbfb 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/Makefile +++ b/vendor/github.com/golang-migrate/migrate/v4/Makefile @@ -1,15 +1,24 @@ -SOURCE ?= file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab -DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver firebird +SOURCE ?= file go_bindata github github_ee bitbucket aws_s3 google_cloud_storage godoc_vfs gitlab +DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver firebird neo4j pgx +DATABASE_TEST ?= $(DATABASE) sqlite sqlite3 sqlcipher VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-) TEST_FLAGS ?= REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)") COVERAGE_DIR ?= .coverage +build: + CGO_ENABLED=0 go build -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' ./cmd/migrate + +build-docker: + CGO_ENABLED=0 go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$(DATABASE) $(SOURCE)" ./cmd/migrate build-cli: clean -mkdir ./cli/build cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o ../../cli/build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' . + cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -a -o ../../cli/build/migrate.linux-armv7 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' . + cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -o ../../cli/build/migrate.linux-arm64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' . cd ./cmd/migrate && CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -a -o ../../cli/build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' . + cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=386 go build -a -o ../../cli/build/migrate.windows-386.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' . cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -a -o ../../cli/build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' . cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {} cd ./cli/build && shasum -a 256 * > sha256sum.txt @@ -32,7 +41,7 @@ test: test-with-flags: @echo SOURCE: $(SOURCE) - @echo DATABASE: $(DATABASE) + @echo DATABASE_TEST: $(DATABASE_TEST) @go test $(TEST_FLAGS) ./... @@ -89,6 +98,12 @@ release: git tag v$(V) @read -p "Press enter to confirm and push to origin ..." && git push origin v$(V) +echo-source: + @echo "$(SOURCE)" + +echo-database: + @echo "$(DATABASE)" + define external_deps @echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' @@ -96,10 +111,10 @@ define external_deps endef -.PHONY: build-cli clean test-short test test-with-flags html-coverage \ +.PHONY: build build-docker build-cli clean test-short test test-with-flags html-coverage \ restore-import-paths rewrite-import-paths list-external-deps release \ - docs kill-docs open-docs kill-orphaned-docker-containers + docs kill-docs open-docs kill-orphaned-docker-containers echo-source echo-database -SHELL = /bin/bash +SHELL = /bin/sh RAND = $(shell echo $$RANDOM) diff --git a/vendor/github.com/golang-migrate/migrate/v4/README.md b/vendor/github.com/golang-migrate/migrate/v4/README.md index 2f8af4de27c..be2a8311641 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/README.md +++ b/vendor/github.com/golang-migrate/migrate/v4/README.md @@ -1,11 +1,11 @@ -[![Build Status](https://img.shields.io/travis/com/golang-migrate/migrate/master.svg)](https://travis-ci.com/golang-migrate/migrate) -[![GoDoc](https://godoc.org/github.com/golang-migrate/migrate?status.svg)](https://godoc.org/github.com/golang-migrate/migrate) +[![GitHub Workflow Status (branch)](https://img.shields.io/github/workflow/status/golang-migrate/migrate/CI/master)](https://github.com/golang-migrate/migrate/actions/workflows/ci.yaml?query=branch%3Amaster) +[![GoDoc](https://pkg.go.dev/badge/github.com/golang-migrate/migrate)](https://pkg.go.dev/github.com/golang-migrate/migrate/v4) [![Coverage Status](https://img.shields.io/coveralls/github/golang-migrate/migrate/master.svg)](https://coveralls.io/github/golang-migrate/migrate?branch=master) [![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/golang-migrate/migrate?filter=debs) [![Docker Pulls](https://img.shields.io/docker/pulls/migrate/migrate.svg)](https://hub.docker.com/r/migrate/migrate/) -![Supported Go Versions](https://img.shields.io/badge/Go-1.12%2C%201.13-lightgrey.svg) +![Supported Go Versions](https://img.shields.io/badge/Go-1.16%2C%201.17-lightgrey.svg) [![GitHub Release](https://img.shields.io/github/release/golang-migrate/migrate.svg)](https://github.com/golang-migrate/migrate/releases) -[![Go Report Card](https://goreportcard.com/badge/github.com/golang-migrate/migrate)](https://goreportcard.com/report/github.com/golang-migrate/migrate) +[![Go Report Card](https://goreportcard.com/badge/github.com/golang-migrate/migrate)](https://goreportcard.com/report/github.com/golang-migrate/migrate) # migrate @@ -24,12 +24,15 @@ Forked from [mattes/migrate](https://github.com/mattes/migrate) Database drivers run migrations. [Add a new database?](database/driver.go) * [PostgreSQL](database/postgres) +* [PGX](database/pgx) * [Redshift](database/redshift) * [Ql](database/ql) * [Cassandra](database/cassandra) -* [SQLite](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165)) +* [SQLite](database/sqlite) +* [SQLite3](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165)) +* [SQLCipher](database/sqlcipher) * [MySQL/ MariaDB](database/mysql) -* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167)) +* [Neo4j](database/neo4j) * [MongoDB](database/mongodb) * [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170)) * [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171)) @@ -41,7 +44,7 @@ Database drivers run migrations. [Add a new database?](database/driver.go) ### Database URLs -Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?option1=true&option2=false` +Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?param1=true¶m2=false` Any [reserved URL characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) need to be escaped. Note, the `%` character also [needs to be escaped](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_the_percent_character) @@ -65,9 +68,12 @@ $ Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go) * [Filesystem](source/file) - read from filesystem +* [io/fs](source/iofs) - read from a Go [io/fs](https://pkg.go.dev/io/fs#FS) * [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata)) -* [Github](source/github) - read from remote Github repositories -* [Github Enterprise](source/github_ee) - read from remote Github Enterprise repositories +* [pkger](source/pkger) - read from embedded binary data ([markbates/pkger](https://github.com/markbates/pkger)) +* [GitHub](source/github) - read from remote GitHub repositories +* [GitHub Enterprise](source/github_ee) - read from remote GitHub Enterprise repositories +* [Bitbucket](source/bitbucket) - read from remote Bitbucket repositories * [Gitlab](source/gitlab) - read from remote Gitlab repositories * [AWS S3](source/aws_s3) - read from Amazon Web Services S3 * [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage @@ -136,7 +142,7 @@ func main() { m, err := migrate.NewWithDatabaseInstance( "file:///migrations", "postgres", driver) - m.Steps(2) + m.Up() // or m.Step(2) if you want to explicitly set the number of migrations to run } ``` @@ -146,7 +152,8 @@ Go to [getting started](GETTING_STARTED.md) ## Tutorials -- [PostgreSQL](database/postgres/TUTORIAL.md) +* [CockroachDB](database/cockroachdb/TUTORIAL.md) +* [PostgreSQL](database/postgres/TUTORIAL.md) (more tutorials to come) diff --git a/vendor/github.com/golang-migrate/migrate/v4/database/driver.go b/vendor/github.com/golang-migrate/migrate/v4/database/driver.go index 2c673caaffc..fa80f455c05 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/database/driver.go +++ b/vendor/github.com/golang-migrate/migrate/v4/database/driver.go @@ -13,7 +13,8 @@ import ( ) var ( - ErrLocked = fmt.Errorf("can't acquire lock") + ErrLocked = fmt.Errorf("can't acquire lock") + ErrNotLocked = fmt.Errorf("can't unlock, as not currently locked") ) const NilVersion int = -1 @@ -33,7 +34,7 @@ var drivers = make(map[string]Driver) // All other functions are tested by tests in database/testing. // Saves you some time and makes sure all database drivers behave the same way. // 5. Call Register in init(). -// 6. Create a migrate/cli/build_.go file +// 6. Create a internal/cli/build_.go file // 7. Add driver name in 'DATABASE' variable in Makefile // // Guidelines: @@ -61,7 +62,7 @@ type Driver interface { // all migrations have been run. Unlock() error - // Run applies a migration to the database. migration is garantueed to be not nil. + // Run applies a migration to the database. migration is guaranteed to be not nil. Run(migration io.Reader) error // SetVersion saves version and dirty state. diff --git a/vendor/github.com/golang-migrate/migrate/v4/database/multistmt/parse.go b/vendor/github.com/golang-migrate/migrate/v4/database/multistmt/parse.go new file mode 100644 index 00000000000..9a045767d8e --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/database/multistmt/parse.go @@ -0,0 +1,46 @@ +// Package multistmt provides methods for parsing multi-statement database migrations +package multistmt + +import ( + "bufio" + "bytes" + "io" +) + +// StartBufSize is the default starting size of the buffer used to scan and parse multi-statement migrations +var StartBufSize = 4096 + +// Handler handles a single migration parsed from a multi-statement migration. +// It's given the single migration to handle and returns whether or not further statements +// from the multi-statement migration should be parsed and handled. +type Handler func(migration []byte) bool + +func splitWithDelimiter(delimiter []byte) func(d []byte, atEOF bool) (int, []byte, error) { + return func(d []byte, atEOF bool) (int, []byte, error) { + // SplitFunc inspired by bufio.ScanLines() implementation + if atEOF { + if len(d) == 0 { + return 0, nil, nil + } + return len(d), d, nil + } + if i := bytes.Index(d, delimiter); i >= 0 { + return i + len(delimiter), d[:i+len(delimiter)], nil + } + return 0, nil, nil + } +} + +// Parse parses the given multi-statement migration +func Parse(reader io.Reader, delimiter []byte, maxMigrationSize int, h Handler) error { + scanner := bufio.NewScanner(reader) + scanner.Buffer(make([]byte, 0, StartBufSize), maxMigrationSize) + scanner.Split(splitWithDelimiter(delimiter)) + for scanner.Scan() { + cont := h(scanner.Bytes()) + if !cont { + break + } + } + return scanner.Err() +} diff --git a/vendor/github.com/golang-migrate/migrate/v4/database/postgres/README.md b/vendor/github.com/golang-migrate/migrate/v4/database/postgres/README.md index f6312392b05..bc823f4e163 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/database/postgres/README.md +++ b/vendor/github.com/golang-migrate/migrate/v4/database/postgres/README.md @@ -5,6 +5,10 @@ | URL Query | WithInstance Config | Description | |------------|---------------------|-------------| | `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `x-migrations-table-quoted` | `MigrationsTableQuoted` | By default, migrate quotes the migration table for SQL injection safety reasons. This option disable quoting and naively checks that you have quoted the migration table name. e.g. `"my_schema"."schema_migrations"` | +| `x-statement-timeout` | `StatementTimeout` | Abort any statement that takes more than the specified number of milliseconds | +| `x-multi-statement` | `MultiStatementEnabled` | Enable multi-statement execution (default: false) | +| `x-multi-statement-max-size` | `MultiStatementMaxSize` | Maximum size of single statement in bytes (default: 10MB) | | `dbname` | `DatabaseName` | The name of the database to connect to | | `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. | | `user` | | The user to sign in as | @@ -26,3 +30,10 @@ 2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration. 3. Download and install the latest migrate version. 4. Force the current migration version with `migrate force `. + +## Multi-statement mode + +In PostgreSQL running multiple SQL statements in one `Exec` executes them inside a transaction. Sometimes this +behavior is not desirable because some statements can be only run outside of transaction (e.g. +`CREATE INDEX CONCURRENTLY`). If you want to use `CREATE INDEX CONCURRENTLY` without activating multi-statement mode +you have to put such statements in a separate migration files. diff --git a/vendor/github.com/golang-migrate/migrate/v4/database/postgres/TUTORIAL.md b/vendor/github.com/golang-migrate/migrate/v4/database/postgres/TUTORIAL.md index 8b06a48642c..0f19c56ff46 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/database/postgres/TUTORIAL.md +++ b/vendor/github.com/golang-migrate/migrate/v4/database/postgres/TUTORIAL.md @@ -7,9 +7,9 @@ Our user here is `postgres`, password `password`, and host is `localhost`. ``` psql -h localhost -U postgres -w -c "create database example;" ``` -When using Migrate CLI we need to pass to database URL. Let's export it to a variable for convienience: +When using Migrate CLI we need to pass to database URL. Let's export it to a variable for convenience: ``` -export POSTGRESQL_URL=postgres://postgres:password@localhost:5432/example?sslmode=disable +export POSTGRESQL_URL='postgres://postgres:password@localhost:5432/example?sslmode=disable' ``` `sslmode=disable` means that the connection with our database will not be encrypted. Enabling it is left as an exercise. @@ -39,7 +39,7 @@ And in the `.down.sql` let's delete it: ``` DROP TABLE IF EXISTS users; ``` -By adding `IF EXISTS/IF NOT EXISTS` we are making migrations idempotent - you can read more about idempotency in [getting started](GETTING_STARTED.md#create-migrations) +By adding `IF EXISTS/IF NOT EXISTS` we are making migrations idempotent - you can read more about idempotency in [getting started](../../GETTING_STARTED.md#create-migrations) ## Run migrations ``` @@ -145,4 +145,23 @@ func main() { } } ``` -You can find details [here](README.md#use-in-your-go-project) \ No newline at end of file +You can find details [here](README.md#use-in-your-go-project) + +## Fix issue where migrations run twice + +When the schema and role names are the same, you might run into issues if you create this schema using migrations. +This is caused by the fact that the [default `search_path`](https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATH) is `"$user", public`. +In the first run (with an empty database) the migrate table is created in `public`. +When the migrations create the `$user` schema, the next run will store (a new) migrate table in this schema (due to order of schemas in `search_path`) and tries to apply all migrations again (most likely failing). + +To solve this you need to change the default `search_path` by removing the `$user` component, so the migrate table is always stored in the (available) `public` schema. +This can be done using the [`search_path` query parameter in the URL](https://github.com/jexia/migrate/blob/fix-postgres-version-table/database/postgres/README.md#postgres). + +For example to force the migrations table in the public schema you can use: +``` +export POSTGRESQL_URL='postgres://postgres:password@localhost:5432/example?sslmode=disable&search_path=public' +``` + +Note that you need to explicitly add the schema names to the table names in your migrations when you to modify the tables of the non-public schema. + +Alternatively you can add the non-public schema manually (before applying the migrations) if that is possible in your case and let the tool store the migrations table in this schema as well. diff --git a/vendor/github.com/golang-migrate/migrate/v4/database/postgres/postgres.go b/vendor/github.com/golang-migrate/migrate/v4/database/postgres/postgres.go index 9cd5cb17045..59cb569de3b 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/database/postgres/postgres.go +++ b/vendor/github.com/golang-migrate/migrate/v4/database/postgres/postgres.go @@ -1,3 +1,4 @@ +//go:build go1.9 // +build go1.9 package postgres @@ -9,12 +10,17 @@ import ( "io" "io/ioutil" nurl "net/url" + "regexp" "strconv" "strings" + "time" + + "go.uber.org/atomic" "github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4/database" - multierror "github.com/hashicorp/go-multierror" + "github.com/golang-migrate/migrate/v4/database/multistmt" + "github.com/hashicorp/go-multierror" "github.com/lib/pq" ) @@ -24,7 +30,12 @@ func init() { database.Register("postgresql", &db) } -var DefaultMigrationsTable = "schema_migrations" +var ( + multiStmtDelimiter = []byte(";") + + DefaultMigrationsTable = "schema_migrations" + DefaultMultiStatementMaxSize = 10 * 1 << 20 // 10 MB +) var ( ErrNilConfig = fmt.Errorf("no config") @@ -34,34 +45,40 @@ var ( ) type Config struct { - MigrationsTable string - DatabaseName string - SchemaName string + MigrationsTable string + MigrationsTableQuoted bool + MultiStatementEnabled bool + DatabaseName string + SchemaName string + migrationsSchemaName string + migrationsTableName string + StatementTimeout time.Duration + MultiStatementMaxSize int } type Postgres struct { // Locking and unlocking need to use the same connection conn *sql.Conn db *sql.DB - isLocked bool + isLocked atomic.Bool // Open and WithInstance need to guarantee that config is never nil config *Config } -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { +func WithConnection(ctx context.Context, conn *sql.Conn, config *Config) (*Postgres, error) { if config == nil { return nil, ErrNilConfig } - if err := instance.Ping(); err != nil { + if err := conn.PingContext(ctx); err != nil { return nil, err } if config.DatabaseName == "" { query := `SELECT CURRENT_DATABASE()` var databaseName string - if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + if err := conn.QueryRowContext(ctx, query).Scan(&databaseName); err != nil { return nil, &database.Error{OrigErr: err, Query: []byte(query)} } @@ -75,7 +92,7 @@ func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { if config.SchemaName == "" { query := `SELECT CURRENT_SCHEMA()` var schemaName string - if err := instance.QueryRow(query).Scan(&schemaName); err != nil { + if err := conn.QueryRowContext(ctx, query).Scan(&schemaName); err != nil { return nil, &database.Error{OrigErr: err, Query: []byte(query)} } @@ -90,15 +107,21 @@ func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { config.MigrationsTable = DefaultMigrationsTable } - conn, err := instance.Conn(context.Background()) - - if err != nil { - return nil, err + config.migrationsSchemaName = config.SchemaName + config.migrationsTableName = config.MigrationsTable + if config.MigrationsTableQuoted { + re := regexp.MustCompile(`"(.*?)"`) + result := re.FindAllStringSubmatch(config.MigrationsTable, -1) + config.migrationsTableName = result[len(result)-1][1] + if len(result) == 2 { + config.migrationsSchemaName = result[0][1] + } else if len(result) > 2 { + return nil, fmt.Errorf("\"%s\" MigrationsTable contains too many dot characters", config.MigrationsTable) + } } px := &Postgres{ conn: conn, - db: instance, config: config, } @@ -109,6 +132,26 @@ func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { return px, nil } +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + ctx := context.Background() + + if err := instance.Ping(); err != nil { + return nil, err + } + + conn, err := instance.Conn(ctx) + if err != nil { + return nil, err + } + + px, err := WithConnection(ctx, conn, config) + if err != nil { + return nil, err + } + px.db = instance + return px, nil +} + func (p *Postgres) Open(url string) (database.Driver, error) { purl, err := nurl.Parse(url) if err != nil { @@ -121,10 +164,52 @@ func (p *Postgres) Open(url string) (database.Driver, error) { } migrationsTable := purl.Query().Get("x-migrations-table") + migrationsTableQuoted := false + if s := purl.Query().Get("x-migrations-table-quoted"); len(s) > 0 { + migrationsTableQuoted, err = strconv.ParseBool(s) + if err != nil { + return nil, fmt.Errorf("Unable to parse option x-migrations-table-quoted: %w", err) + } + } + if (len(migrationsTable) > 0) && (migrationsTableQuoted) && ((migrationsTable[0] != '"') || (migrationsTable[len(migrationsTable)-1] != '"')) { + return nil, fmt.Errorf("x-migrations-table must be quoted (for instance '\"migrate\".\"schema_migrations\"') when x-migrations-table-quoted is enabled, current value is: %s", migrationsTable) + } + + statementTimeoutString := purl.Query().Get("x-statement-timeout") + statementTimeout := 0 + if statementTimeoutString != "" { + statementTimeout, err = strconv.Atoi(statementTimeoutString) + if err != nil { + return nil, err + } + } + + multiStatementMaxSize := DefaultMultiStatementMaxSize + if s := purl.Query().Get("x-multi-statement-max-size"); len(s) > 0 { + multiStatementMaxSize, err = strconv.Atoi(s) + if err != nil { + return nil, err + } + if multiStatementMaxSize <= 0 { + multiStatementMaxSize = DefaultMultiStatementMaxSize + } + } + + multiStatementEnabled := false + if s := purl.Query().Get("x-multi-statement"); len(s) > 0 { + multiStatementEnabled, err = strconv.ParseBool(s) + if err != nil { + return nil, fmt.Errorf("Unable to parse option x-multi-statement: %w", err) + } + } px, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + MigrationsTableQuoted: migrationsTableQuoted, + StatementTimeout: time.Duration(statementTimeout) * time.Millisecond, + MultiStatementEnabled: multiStatementEnabled, + MultiStatementMaxSize: multiStatementMaxSize, }) if err != nil { @@ -136,7 +221,11 @@ func (p *Postgres) Open(url string) (database.Driver, error) { func (p *Postgres) Close() error { connErr := p.conn.Close() - dbErr := p.db.Close() + var dbErr error + if p.db != nil { + dbErr = p.db.Close() + } + if connErr != nil || dbErr != nil { return fmt.Errorf("conn: %v, db: %v", connErr, dbErr) } @@ -145,52 +234,69 @@ func (p *Postgres) Close() error { // https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS func (p *Postgres) Lock() error { - if p.isLocked { - return database.ErrLocked - } - - aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName, p.config.SchemaName) - if err != nil { - return err - } + return database.CasRestoreOnErr(&p.isLocked, false, true, database.ErrLocked, func() error { + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName, p.config.migrationsSchemaName, p.config.migrationsTableName) + if err != nil { + return err + } - // This will wait indefinitely until the lock can be acquired. - query := `SELECT pg_advisory_lock($1)` - if _, err := p.conn.ExecContext(context.Background(), query, aid); err != nil { - return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} - } + // This will wait indefinitely until the lock can be acquired. + query := `SELECT pg_advisory_lock($1)` + if _, err := p.conn.ExecContext(context.Background(), query, aid); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } - p.isLocked = true - return nil + return nil + }) } func (p *Postgres) Unlock() error { - if !p.isLocked { - return nil - } - - aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName, p.config.SchemaName) - if err != nil { - return err - } + return database.CasRestoreOnErr(&p.isLocked, true, false, database.ErrNotLocked, func() error { + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName, p.config.migrationsSchemaName, p.config.migrationsTableName) + if err != nil { + return err + } - query := `SELECT pg_advisory_unlock($1)` - if _, err := p.conn.ExecContext(context.Background(), query, aid); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - p.isLocked = false - return nil + query := `SELECT pg_advisory_unlock($1)` + if _, err := p.conn.ExecContext(context.Background(), query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil + }) } func (p *Postgres) Run(migration io.Reader) error { + if p.config.MultiStatementEnabled { + var err error + if e := multistmt.Parse(migration, multiStmtDelimiter, p.config.MultiStatementMaxSize, func(m []byte) bool { + if err = p.runStatement(m); err != nil { + return false + } + return true + }); e != nil { + return e + } + return err + } migr, err := ioutil.ReadAll(migration) if err != nil { return err } + return p.runStatement(migr) +} - // run migration - query := string(migr[:]) - if _, err := p.conn.ExecContext(context.Background(), query); err != nil { +func (p *Postgres) runStatement(statement []byte) error { + ctx := context.Background() + if p.config.StatementTimeout != 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, p.config.StatementTimeout) + defer cancel() + } + query := string(statement) + if strings.TrimSpace(query) == "" { + return nil + } + if _, err := p.conn.ExecContext(ctx, query); err != nil { if pgErr, ok := err.(*pq.Error); ok { var line uint var col uint @@ -207,11 +313,10 @@ func (p *Postgres) Run(migration io.Reader) error { if pgErr.Detail != "" { message = fmt.Sprintf("%s, %s", message, pgErr.Detail) } - return database.Error{OrigErr: err, Err: message, Query: migr, Line: line} + return database.Error{OrigErr: err, Err: message, Query: statement, Line: line} } - return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + return database.Error{OrigErr: err, Err: "migration failed", Query: statement} } - return nil } @@ -256,7 +361,7 @@ func (p *Postgres) SetVersion(version int, dirty bool) error { return &database.Error{OrigErr: err, Err: "transaction start failed"} } - query := `TRUNCATE ` + pq.QuoteIdentifier(p.config.MigrationsTable) + query := `TRUNCATE ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + pq.QuoteIdentifier(p.config.migrationsTableName) if _, err := tx.Exec(query); err != nil { if errRollback := tx.Rollback(); errRollback != nil { err = multierror.Append(err, errRollback) @@ -264,8 +369,11 @@ func (p *Postgres) SetVersion(version int, dirty bool) error { return &database.Error{OrigErr: err, Query: []byte(query)} } - if version >= 0 { - query = `INSERT INTO ` + pq.QuoteIdentifier(p.config.MigrationsTable) + ` (version, dirty) VALUES ($1, $2)` + // Also re-write the schema version for nil dirty versions to prevent + // empty schema version for failed down migration on the first migration + // See: https://github.com/golang-migrate/migrate/issues/330 + if version >= 0 || (version == database.NilVersion && dirty) { + query = `INSERT INTO ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + pq.QuoteIdentifier(p.config.migrationsTableName) + ` (version, dirty) VALUES ($1, $2)` if _, err := tx.Exec(query, version, dirty); err != nil { if errRollback := tx.Rollback(); errRollback != nil { err = multierror.Append(err, errRollback) @@ -282,7 +390,7 @@ func (p *Postgres) SetVersion(version int, dirty bool) error { } func (p *Postgres) Version() (version int, dirty bool, err error) { - query := `SELECT version, dirty FROM ` + pq.QuoteIdentifier(p.config.MigrationsTable) + ` LIMIT 1` + query := `SELECT version, dirty FROM ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + pq.QuoteIdentifier(p.config.migrationsTableName) + ` LIMIT 1` err = p.conn.QueryRowContext(context.Background(), query).Scan(&version, &dirty) switch { case err == sql.ErrNoRows: @@ -325,6 +433,9 @@ func (p *Postgres) Drop() (err error) { tableNames = append(tableNames, tableName) } } + if err := tables.Err(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } if len(tableNames) > 0 { // delete one by one ... @@ -357,7 +468,24 @@ func (p *Postgres) ensureVersionTable() (err error) { } }() - query := `CREATE TABLE IF NOT EXISTS ` + pq.QuoteIdentifier(p.config.MigrationsTable) + ` (version bigint not null primary key, dirty boolean not null)` + // This block checks whether the `MigrationsTable` already exists. This is useful because it allows read only postgres + // users to also check the current version of the schema. Previously, even if `MigrationsTable` existed, the + // `CREATE TABLE IF NOT EXISTS...` query would fail because the user does not have the CREATE permission. + // Taken from https://github.com/mattes/migrate/blob/master/database/postgres/postgres.go#L258 + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_schema = $1 AND table_name = $2 LIMIT 1` + row := p.conn.QueryRowContext(context.Background(), query, p.config.migrationsSchemaName, p.config.migrationsTableName) + + var count int + err = row.Scan(&count) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if count == 1 { + return nil + } + + query = `CREATE TABLE IF NOT EXISTS ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + pq.QuoteIdentifier(p.config.migrationsTableName) + ` (version bigint not null primary key, dirty boolean not null)` if _, err = p.conn.ExecContext(context.Background(), query); err != nil { return &database.Error{OrigErr: err, Query: []byte(query)} } diff --git a/vendor/github.com/golang-migrate/migrate/v4/database/util.go b/vendor/github.com/golang-migrate/migrate/v4/database/util.go index 976ad3534d2..de66d5b8021 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/database/util.go +++ b/vendor/github.com/golang-migrate/migrate/v4/database/util.go @@ -2,6 +2,7 @@ package database import ( "fmt" + "go.uber.org/atomic" "hash/crc32" "strings" ) @@ -17,3 +18,16 @@ func GenerateAdvisoryLockId(databaseName string, additionalNames ...string) (str sum = sum * uint32(advisoryLockIDSalt) return fmt.Sprint(sum), nil } + +// CasRestoreOnErr CAS wrapper to automatically restore the lock state on error +func CasRestoreOnErr(lock *atomic.Bool, o, n bool, casErr error, f func() error) error { + if !lock.CAS(o, n) { + return casErr + } + if err := f(); err != nil { + // Automatically unlock/lock on error + lock.Store(o) + return err + } + return nil +} diff --git a/vendor/github.com/golang-migrate/migrate/v4/migrate.go b/vendor/github.com/golang-migrate/migrate/v4/migrate.go index 1478bc2a004..a57dceb5d5b 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/migrate.go +++ b/vendor/github.com/golang-migrate/migrate/v4/migrate.go @@ -1,6 +1,6 @@ // Package migrate reads migrations from sources and runs them against databases. // Sources are defined by the `source.Driver` and databases by the `database.Driver` -// interface. The driver interfaces are kept "dump", all migration logic is kept +// interface. The driver interfaces are kept "dumb", all migration logic is kept // in this package. package migrate @@ -487,7 +487,7 @@ func (m *Migrate) read(from int, to int, ret chan<- interface{}) { } prev, err := m.sourceDrv.Prev(suint(from)) - if os.IsNotExist(err) && to == -1 { + if errors.Is(err, os.ErrNotExist) && to == -1 { // apply nil migration migr, err := m.newMigration(suint(from), -1) if err != nil { @@ -580,7 +580,7 @@ func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) { // apply next migration next, err := m.sourceDrv.Next(suint(from)) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // no limit, but no migrations applied? if limit == -1 && count == 0 { ret <- ErrNoChange @@ -666,7 +666,7 @@ func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) { } prev, err := m.sourceDrv.Prev(suint(from)) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // no limit or haven't reached limit, apply "first" migration if limit == -1 || limit-count > 0 { firstVersion, err := m.sourceDrv.First() @@ -785,9 +785,9 @@ func (m *Migrate) versionExists(version uint) (result error) { } }() } - if os.IsExist(err) { + if errors.Is(err, os.ErrExist) { return nil - } else if !os.IsNotExist(err) { + } else if !errors.Is(err, os.ErrNotExist) { return err } @@ -800,14 +800,15 @@ func (m *Migrate) versionExists(version uint) (result error) { } }() } - if os.IsExist(err) { + if errors.Is(err, os.ErrExist) { return nil - } else if !os.IsNotExist(err) { + } else if !errors.Is(err, os.ErrNotExist) { return err } - m.logErr(fmt.Errorf("no migration found for version %d", version)) - return os.ErrNotExist + err = fmt.Errorf("no migration found for version %d: %w", version, err) + m.logErr(err) + return err } // stop returns true if no more migrations should be run against the database @@ -835,7 +836,7 @@ func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, err if targetVersion >= int(version) { r, identifier, err := m.sourceDrv.ReadUp(version) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // create "empty" migration migr, err = NewMigration(nil, "", version, targetVersion) if err != nil { @@ -855,7 +856,7 @@ func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, err } else { r, identifier, err := m.sourceDrv.ReadDown(version) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // create "empty" migration migr, err = NewMigration(nil, "", version, targetVersion) if err != nil { diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/driver.go b/vendor/github.com/golang-migrate/migrate/v4/source/driver.go index 05f97adabf3..94bdef317aa 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/source/driver.go +++ b/vendor/github.com/golang-migrate/migrate/v4/source/driver.go @@ -33,7 +33,7 @@ var drivers = make(map[string]Driver) // * Drivers are supposed to be read only. // * Ideally don't load any contents (into memory) in Open or WithInstance. type Driver interface { - // Open returns a a new driver instance configured with parameters + // Open returns a new driver instance configured with parameters // coming from the URL string. Migrate will call this function // only once per instance. Open(url string) (Driver, error) @@ -87,7 +87,7 @@ func Open(url string) (Driver, error) { d, ok := drivers[u.Scheme] driversMu.RUnlock() if !ok { - return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme) + return nil, fmt.Errorf("source driver: unknown driver '%s' (forgotten import?)", u.Scheme) } return d.Open(url) diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/errors.go b/vendor/github.com/golang-migrate/migrate/v4/source/errors.go new file mode 100644 index 00000000000..93d66e0d4b1 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/errors.go @@ -0,0 +1,15 @@ +package source + +import "os" + +// ErrDuplicateMigration is an error type for reporting duplicate migration +// files. +type ErrDuplicateMigration struct { + Migration + os.FileInfo +} + +// Error implements error interface. +func (e ErrDuplicateMigration) Error() string { + return "duplicate migration file: " + e.Name() +} diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/file/file.go b/vendor/github.com/golang-migrate/migrate/v4/source/file/file.go index a0c6419318d..d8b21dffb26 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/source/file/file.go +++ b/vendor/github.com/golang-migrate/migrate/v4/source/file/file.go @@ -1,15 +1,12 @@ package file import ( - "fmt" - "io" - "io/ioutil" nurl "net/url" "os" - "path" "path/filepath" "github.com/golang-migrate/migrate/v4/source" + "github.com/golang-migrate/migrate/v4/source/iofs" ) func init() { @@ -17,17 +14,31 @@ func init() { } type File struct { - url string - path string - migrations *source.Migrations + iofs.PartialDriver + url string + path string } func (f *File) Open(url string) (source.Driver, error) { - u, err := nurl.Parse(url) + p, err := parseURL(url) if err != nil { return nil, err } + nf := &File{ + url: url, + path: p, + } + if err := nf.Init(os.DirFS(p), "."); err != nil { + return nil, err + } + return nf, nil +} +func parseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } // concat host and path to restore full path // host might be `.` p := u.Opaque @@ -39,7 +50,7 @@ func (f *File) Open(url string) (source.Driver, error) { // default to current directory if no path wd, err := os.Getwd() if err != nil { - return nil, err + return "", err } p = wd @@ -47,81 +58,9 @@ func (f *File) Open(url string) (source.Driver, error) { // make path absolute if relative abs, err := filepath.Abs(p) if err != nil { - return nil, err + return "", err } p = abs } - - // scan directory - files, err := ioutil.ReadDir(p) - if err != nil { - return nil, err - } - - nf := &File{ - url: url, - path: p, - migrations: source.NewMigrations(), - } - - for _, fi := range files { - if !fi.IsDir() { - m, err := source.DefaultParse(fi.Name()) - if err != nil { - continue // ignore files that we can't parse - } - if !nf.migrations.Append(m) { - return nil, fmt.Errorf("unable to parse file %v", fi.Name()) - } - } - } - return nf, nil -} - -func (f *File) Close() error { - // nothing do to here - return nil -} - -func (f *File) First() (version uint, err error) { - if v, ok := f.migrations.First(); ok { - return v, nil - } - return 0, &os.PathError{Op: "first", Path: f.path, Err: os.ErrNotExist} -} - -func (f *File) Prev(version uint) (prevVersion uint, err error) { - if v, ok := f.migrations.Prev(version); ok { - return v, nil - } - return 0, &os.PathError{Op: fmt.Sprintf("prev for version %v", version), Path: f.path, Err: os.ErrNotExist} -} - -func (f *File) Next(version uint) (nextVersion uint, err error) { - if v, ok := f.migrations.Next(version); ok { - return v, nil - } - return 0, &os.PathError{Op: fmt.Sprintf("next for version %v", version), Path: f.path, Err: os.ErrNotExist} -} - -func (f *File) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := f.migrations.Up(version); ok { - r, err := os.Open(path.Join(f.path, m.Raw)) - if err != nil { - return nil, "", err - } - return r, m.Identifier, nil - } - return nil, "", &os.PathError{Op: fmt.Sprintf("read version %v", version), Path: f.path, Err: os.ErrNotExist} -} - -func (f *File) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := f.migrations.Down(version); ok { - r, err := os.Open(path.Join(f.path, m.Raw)) - if err != nil { - return nil, "", err - } - return r, m.Identifier, nil - } - return nil, "", &os.PathError{Op: fmt.Sprintf("read version %v", version), Path: f.path, Err: os.ErrNotExist} + return p, nil } diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/README.md b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/README.md new file mode 100644 index 00000000000..d75b328b951 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/README.md @@ -0,0 +1,3 @@ +# iofs + +https://pkg.go.dev/github.com/golang-migrate/migrate/v4/source/iofs diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/doc.go b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/doc.go new file mode 100644 index 00000000000..6b2c862e0f2 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/doc.go @@ -0,0 +1,10 @@ +/* +Package iofs provides the Go 1.16+ io/fs#FS driver. + +It can accept various file systems (like embed.FS, archive/zip#Reader) implementing io/fs#FS. + +This driver cannot be used with Go versions 1.15 and below. + +Also, Opening with a URL scheme is not supported. +*/ +package iofs diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/iofs.go b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/iofs.go new file mode 100644 index 00000000000..dc934a5fe25 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/iofs.go @@ -0,0 +1,176 @@ +//go:build go1.16 +// +build go1.16 + +package iofs + +import ( + "errors" + "fmt" + "io" + "io/fs" + "path" + "strconv" + + "github.com/golang-migrate/migrate/v4/source" +) + +type driver struct { + PartialDriver +} + +// New returns a new Driver from io/fs#FS and a relative path. +func New(fsys fs.FS, path string) (source.Driver, error) { + var i driver + if err := i.Init(fsys, path); err != nil { + return nil, fmt.Errorf("failed to init driver with path %s: %w", path, err) + } + return &i, nil +} + +// Open is part of source.Driver interface implementation. +// Open cannot be called on the iofs passthrough driver. +func (d *driver) Open(url string) (source.Driver, error) { + return nil, errors.New("Open() cannot be called on the iofs passthrough driver") +} + +// PartialDriver is a helper service for creating new source drivers working with +// io/fs.FS instances. It implements all source.Driver interface methods +// except for Open(). New driver could embed this struct and add missing Open() +// method. +// +// To prepare PartialDriver for use Init() function. +type PartialDriver struct { + migrations *source.Migrations + fsys fs.FS + path string +} + +// Init prepares not initialized IoFS instance to read migrations from a +// io/fs#FS instance and a relative path. +func (d *PartialDriver) Init(fsys fs.FS, path string) error { + entries, err := fs.ReadDir(fsys, path) + if err != nil { + return err + } + + ms := source.NewMigrations() + for _, e := range entries { + if e.IsDir() { + continue + } + m, err := source.DefaultParse(e.Name()) + if err != nil { + continue + } + file, err := e.Info() + if err != nil { + return err + } + if !ms.Append(m) { + return source.ErrDuplicateMigration{ + Migration: *m, + FileInfo: file, + } + } + } + + d.fsys = fsys + d.path = path + d.migrations = ms + return nil +} + +// Close is part of source.Driver interface implementation. +// Closes the file system if possible. +func (d *PartialDriver) Close() error { + c, ok := d.fsys.(io.Closer) + if !ok { + return nil + } + return c.Close() +} + +// First is part of source.Driver interface implementation. +func (d *PartialDriver) First() (version uint, err error) { + if version, ok := d.migrations.First(); ok { + return version, nil + } + return 0, &fs.PathError{ + Op: "first", + Path: d.path, + Err: fs.ErrNotExist, + } +} + +// Prev is part of source.Driver interface implementation. +func (d *PartialDriver) Prev(version uint) (prevVersion uint, err error) { + if version, ok := d.migrations.Prev(version); ok { + return version, nil + } + return 0, &fs.PathError{ + Op: "prev for version " + strconv.FormatUint(uint64(version), 10), + Path: d.path, + Err: fs.ErrNotExist, + } +} + +// Next is part of source.Driver interface implementation. +func (d *PartialDriver) Next(version uint) (nextVersion uint, err error) { + if version, ok := d.migrations.Next(version); ok { + return version, nil + } + return 0, &fs.PathError{ + Op: "next for version " + strconv.FormatUint(uint64(version), 10), + Path: d.path, + Err: fs.ErrNotExist, + } +} + +// ReadUp is part of source.Driver interface implementation. +func (d *PartialDriver) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := d.migrations.Up(version); ok { + body, err := d.open(path.Join(d.path, m.Raw)) + if err != nil { + return nil, "", err + } + return body, m.Identifier, nil + } + return nil, "", &fs.PathError{ + Op: "read up for version " + strconv.FormatUint(uint64(version), 10), + Path: d.path, + Err: fs.ErrNotExist, + } +} + +// ReadDown is part of source.Driver interface implementation. +func (d *PartialDriver) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := d.migrations.Down(version); ok { + body, err := d.open(path.Join(d.path, m.Raw)) + if err != nil { + return nil, "", err + } + return body, m.Identifier, nil + } + return nil, "", &fs.PathError{ + Op: "read down for version " + strconv.FormatUint(uint64(version), 10), + Path: d.path, + Err: fs.ErrNotExist, + } +} + +func (d *PartialDriver) open(path string) (fs.File, error) { + f, err := d.fsys.Open(path) + if err == nil { + return f, nil + } + // Some non-standard file systems may return errors that don't include the path, that + // makes debugging harder. + if !errors.As(err, new(*fs.PathError)) { + err = &fs.PathError{ + Op: "open", + Path: path, + Err: err, + } + } + return nil, err +} diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.down.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.down.sql new file mode 100644 index 00000000000..4267951a5b6 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.down.sql @@ -0,0 +1 @@ +1 down diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.up.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.up.sql new file mode 100644 index 00000000000..046fd5a5dc0 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/1_foobar.up.sql @@ -0,0 +1 @@ +1 up diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/3_foobar.up.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/3_foobar.up.sql new file mode 100644 index 00000000000..77c1b77dcbd --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/3_foobar.up.sql @@ -0,0 +1 @@ +3 up diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.down.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.down.sql new file mode 100644 index 00000000000..b405d8bd024 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.down.sql @@ -0,0 +1 @@ +4 down diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.up.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.up.sql new file mode 100644 index 00000000000..eba61bb94ea --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/4_foobar.up.sql @@ -0,0 +1 @@ +4 up diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/5_foobar.down.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/5_foobar.down.sql new file mode 100644 index 00000000000..6dc96e20688 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/5_foobar.down.sql @@ -0,0 +1 @@ +5 down diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.down.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.down.sql new file mode 100644 index 00000000000..46636016b3a --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.down.sql @@ -0,0 +1 @@ +7 down diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.up.sql b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.up.sql new file mode 100644 index 00000000000..cdbc410ee90 --- /dev/null +++ b/vendor/github.com/golang-migrate/migrate/v4/source/iofs/testdata/migrations/7_foobar.up.sql @@ -0,0 +1 @@ +7 up diff --git a/vendor/github.com/golang-migrate/migrate/v4/source/migration.go b/vendor/github.com/golang-migrate/migrate/v4/source/migration.go index b8bb79020b7..74f6523cb74 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/source/migration.go +++ b/vendor/github.com/golang-migrate/migrate/v4/source/migration.go @@ -66,11 +66,13 @@ func (i *Migrations) Append(m *Migration) (ok bool) { } func (i *Migrations) buildIndex() { - i.index = make(uintSlice, 0) + i.index = make(uintSlice, 0, len(i.migrations)) for version := range i.migrations { i.index = append(i.index, version) } - sort.Sort(i.index) + sort.Slice(i.index, func(x, y int) bool { + return i.index[x] < i.index[y] + }) } func (i *Migrations) First() (version uint, ok bool) { @@ -126,18 +128,6 @@ func (i *Migrations) findPos(version uint) int { type uintSlice []uint -func (s uintSlice) Len() int { - return len(s) -} - -func (s uintSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s uintSlice) Less(i, j int) bool { - return s[i] < s[j] -} - func (s uintSlice) Search(x uint) int { return sort.Search(len(s), func(i int) bool { return s[i] >= x }) } diff --git a/vendor/github.com/golang-migrate/migrate/v4/util.go b/vendor/github.com/golang-migrate/migrate/v4/util.go index 26131a3ffaa..bcf90077d28 100644 --- a/vendor/github.com/golang-migrate/migrate/v4/util.go +++ b/vendor/github.com/golang-migrate/migrate/v4/util.go @@ -53,7 +53,7 @@ func FilterCustomQuery(u *nurl.URL) *nurl.URL { ux := *u vx := make(nurl.Values) for k, v := range ux.Query() { - if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") { + if len(k) <= 1 || k[0:2] != "x-" { vx[k] = v } } diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go index 3e596906425..d7969d74d80 100644 --- a/vendor/github.com/pkg/browser/browser.go +++ b/vendor/github.com/pkg/browser/browser.go @@ -30,7 +30,7 @@ func OpenFile(path string) error { // OpenReader consumes the contents of r and presents the // results in a new browser window. func OpenReader(r io.Reader) error { - f, err := ioutil.TempFile("", "browser") + f, err := ioutil.TempFile("", "browser.*.html") if err != nil { return fmt.Errorf("browser: could not create temporary file: %v", err) } @@ -41,12 +41,7 @@ func OpenReader(r io.Reader) error { if err := f.Close(); err != nil { return fmt.Errorf("browser: caching temporary file failed: %v", err) } - oldname := f.Name() - newname := oldname + ".html" - if err := os.Rename(oldname, newname); err != nil { - return fmt.Errorf("browser: renaming temporary file failed: %v", err) - } - return OpenFile(newname) + return OpenFile(f.Name()) } // OpenURL opens a new browser window pointing to url. @@ -58,6 +53,5 @@ func runCmd(prog string, args ...string) error { cmd := exec.Command(prog, args...) cmd.Stdout = Stdout cmd.Stderr = Stderr - setFlags(cmd) return cmd.Run() } diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go index 6dff0403c71..8507cf7c2b4 100644 --- a/vendor/github.com/pkg/browser/browser_darwin.go +++ b/vendor/github.com/pkg/browser/browser_darwin.go @@ -1,9 +1,5 @@ package browser -import "os/exec" - func openBrowser(url string) error { return runCmd("open", url) } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_freebsd.go b/vendor/github.com/pkg/browser/browser_freebsd.go index 8cc0a7f539b..4fc7ff0761b 100644 --- a/vendor/github.com/pkg/browser/browser_freebsd.go +++ b/vendor/github.com/pkg/browser/browser_freebsd.go @@ -12,5 +12,3 @@ func openBrowser(url string) error { } return err } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go index ab9b4f6bd04..d26cdddf9c1 100644 --- a/vendor/github.com/pkg/browser/browser_linux.go +++ b/vendor/github.com/pkg/browser/browser_linux.go @@ -19,5 +19,3 @@ func openBrowser(url string) error { return &exec.Error{Name: strings.Join(providers, ","), Err: exec.ErrNotFound} } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_netbsd.go b/vendor/github.com/pkg/browser/browser_netbsd.go new file mode 100644 index 00000000000..65a5e5a2934 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_netbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from pkgsrc(7)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go index 8cc0a7f539b..4fc7ff0761b 100644 --- a/vendor/github.com/pkg/browser/browser_openbsd.go +++ b/vendor/github.com/pkg/browser/browser_openbsd.go @@ -12,5 +12,3 @@ func openBrowser(url string) error { } return err } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go index 5eb17b013a9..7c5c17d34d2 100644 --- a/vendor/github.com/pkg/browser/browser_unsupported.go +++ b/vendor/github.com/pkg/browser/browser_unsupported.go @@ -1,15 +1,12 @@ -// +build !linux,!windows,!darwin,!openbsd,!freebsd +// +build !linux,!windows,!darwin,!openbsd,!freebsd,!netbsd package browser import ( "fmt" - "os/exec" "runtime" ) func openBrowser(url string) error { return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go index a2b30d39bff..63e192959a5 100644 --- a/vendor/github.com/pkg/browser/browser_windows.go +++ b/vendor/github.com/pkg/browser/browser_windows.go @@ -1,13 +1,7 @@ -//go:generate mkwinsyscall -output zbrowser_windows.go browser_windows.go -//sys ShellExecute(hwnd int, verb string, file string, args string, cwd string, showCmd int) (err error) = shell32.ShellExecuteW package browser -import "os/exec" -const SW_SHOWNORMAL = 1 +import "golang.org/x/sys/windows" func openBrowser(url string) error { - return ShellExecute(0, "", url, "", "", SW_SHOWNORMAL) -} - -func setFlags(cmd *exec.Cmd) { + return windows.ShellExecute(0, nil, windows.StringToUTF16Ptr(url), nil, nil, windows.SW_SHOWNORMAL) } diff --git a/vendor/github.com/pkg/browser/zbrowser_windows.go b/vendor/github.com/pkg/browser/zbrowser_windows.go deleted file mode 100644 index cbb25ba639a..00000000000 --- a/vendor/github.com/pkg/browser/zbrowser_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package browser - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modshell32 = windows.NewLazySystemDLL("shell32.dll") - - procShellExecuteW = modshell32.NewProc("ShellExecuteW") -) - -func ShellExecute(hwnd int, verb string, file string, args string, cwd string, showCmd int) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(verb) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(file) - if err != nil { - return - } - var _p2 *uint16 - _p2, err = syscall.UTF16PtrFromString(args) - if err != nil { - return - } - var _p3 *uint16 - _p3, err = syscall.UTF16PtrFromString(cwd) - if err != nil { - return - } - return _ShellExecute(hwnd, _p0, _p1, _p2, _p3, showCmd) -} - -func _ShellExecute(hwnd int, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/modules.txt b/vendor/modules.txt index c6d23ff7421..085d98101f3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -370,14 +370,16 @@ github.com/gogo/status # github.com/golang-jwt/jwt v3.2.1+incompatible ## explicit github.com/golang-jwt/jwt -# github.com/golang-migrate/migrate/v4 v4.7.0 -## explicit; go 1.12 +# github.com/golang-migrate/migrate/v4 v4.15.2 +## explicit; go 1.16 github.com/golang-migrate/migrate/v4 github.com/golang-migrate/migrate/v4/database +github.com/golang-migrate/migrate/v4/database/multistmt github.com/golang-migrate/migrate/v4/database/postgres github.com/golang-migrate/migrate/v4/internal/url github.com/golang-migrate/migrate/v4/source github.com/golang-migrate/migrate/v4/source/file +github.com/golang-migrate/migrate/v4/source/iofs # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -592,7 +594,7 @@ github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log github.com/opentracing/opentracing-go/mocktracer -# github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 +# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 ## explicit; go 1.14 github.com/pkg/browser # github.com/pkg/errors v0.9.1 From dac9421f14e381664d4ce99692abbc77d340218d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 16:37:30 -0800 Subject: [PATCH 024/133] Bump github.com/armon/go-metrics from 0.4.0 to 0.4.1 (#5050) Bumps [github.com/armon/go-metrics](https://github.com/armon/go-metrics) from 0.4.0 to 0.4.1. - [Release notes](https://github.com/armon/go-metrics/releases) - [Commits](https://github.com/armon/go-metrics/compare/v0.4.0...v0.4.1) --- updated-dependencies: - dependency-name: github.com/armon/go-metrics dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 ++-- .../github.com/armon/go-metrics/prometheus/prometheus.go | 7 +++---- vendor/modules.txt | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 2aff2097d0b..aee43baff8a 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/NYTimes/gziphandler v1.1.1 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alicebob/miniredis/v2 v2.23.1 - github.com/armon/go-metrics v0.4.0 + github.com/armon/go-metrics v0.4.1 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.0 diff --git a/go.sum b/go.sum index 7a9f8f39798..388216e36dc 100644 --- a/go.sum +++ b/go.sum @@ -180,8 +180,8 @@ github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= diff --git a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go index d8221fbf014..5b2eeb43ca5 100644 --- a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go +++ b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go @@ -6,7 +6,6 @@ package prometheus import ( "fmt" "log" - "regexp" "strings" "sync" "time" @@ -248,15 +247,15 @@ func initCounters(m *sync.Map, counters []CounterDefinition, help map[string]str return } -var forbiddenChars = regexp.MustCompile("[ .=\\-/]") +var forbiddenCharsReplacer = strings.NewReplacer(" ", "_", ".", "_", "=", "_", "-", "_", "/", "_") func flattenKey(parts []string, labels []metrics.Label) (string, string) { key := strings.Join(parts, "_") - key = forbiddenChars.ReplaceAllString(key, "_") + key = forbiddenCharsReplacer.Replace(key) hash := key for _, label := range labels { - hash += fmt.Sprintf(";%s=%s", label.Name, label.Value) + hash += ";" + label.Name + "=" + label.Value } return key, hash diff --git a/vendor/modules.txt b/vendor/modules.txt index 085d98101f3..5e9b0e40376 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -94,7 +94,7 @@ github.com/alicebob/miniredis/v2/geohash github.com/alicebob/miniredis/v2/hyperloglog github.com/alicebob/miniredis/v2/metro github.com/alicebob/miniredis/v2/server -# github.com/armon/go-metrics v0.4.0 +# github.com/armon/go-metrics v0.4.1 ## explicit; go 1.12 github.com/armon/go-metrics github.com/armon/go-metrics/prometheus From 8dfd02b4529891e872e09ede235c81ff6a07e99a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 18:04:20 -0800 Subject: [PATCH 025/133] Bump github.com/felixge/fgprof from 0.9.2 to 0.9.3 (#5051) Bumps [github.com/felixge/fgprof](https://github.com/felixge/fgprof) from 0.9.2 to 0.9.3. - [Release notes](https://github.com/felixge/fgprof/releases) - [Commits](https://github.com/felixge/fgprof/compare/v0.9.2...v0.9.3) --- updated-dependencies: - dependency-name: github.com/felixge/fgprof dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/felixge/fgprof/README.md | 15 +- vendor/github.com/felixge/fgprof/fgprof.go | 225 ++++++++++++++++++--- vendor/github.com/felixge/fgprof/format.go | 105 ---------- vendor/github.com/felixge/fgprof/pprof.go | 56 ----- vendor/modules.txt | 2 +- 7 files changed, 202 insertions(+), 207 deletions(-) delete mode 100644 vendor/github.com/felixge/fgprof/format.go delete mode 100644 vendor/github.com/felixge/fgprof/pprof.go diff --git a/go.mod b/go.mod index aee43baff8a..590c345675a 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/felixge/fgprof v0.9.2 + github.com/felixge/fgprof v0.9.3 github.com/go-kit/log v0.2.1 github.com/go-openapi/strfmt v0.21.3 github.com/go-openapi/swag v0.22.3 diff --git a/go.sum b/go.sum index 388216e36dc..5d3536330d2 100644 --- a/go.sum +++ b/go.sum @@ -519,8 +519,8 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/fgprof v0.9.2 h1:tAMHtWMyl6E0BimjVbFt7fieU6FpjttsZN7j0wT5blc= -github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRealT5sg= +github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= +github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= diff --git a/vendor/github.com/felixge/fgprof/README.md b/vendor/github.com/felixge/fgprof/README.md index 1c1b20536fe..15b16794b95 100644 --- a/vendor/github.com/felixge/fgprof/README.md +++ b/vendor/github.com/felixge/fgprof/README.md @@ -10,6 +10,8 @@ Go's builtin sampling CPU profiler can only show On-CPU time, but it's better th fgprof is designed for analyzing applications with mixed I/O and CPU workloads. This kind of profiling is also known as wall-clock profiling. +⚠️ Please upgrade to Go 1.19 or newer. In older versions of Go fgprof can cause significant STW latencies in applications with a lot of goroutines (> 1-10k). See [CL 387415](https://go-review.googlesource.com/c/go/+/387415) for more details. + ## Quick Start If this is the first time you hear about fgprof, you should start by reading about [The Problem](#the-problem) & [How it Works](#how-it-works). @@ -175,7 +177,7 @@ fgprof is implemented as a background goroutine that wakes up 99 times per secon This data is used to maintain an in-memory stack counter which can be converted to the pprof or folded output format. The meat of the implementation is super simple and < 100 lines of code, you should [check it out](./fgprof.go). -The overhead of fgprof increases with the number of active goroutines (including those waiting on I/O, Channels, Locks, etc.) executed by your program. If your program typically has less than 1000 active goroutines, you shouldn't have much to worry about. However, at 10k or more goroutines fgprof is likely to become unable to maintain its sampling rate and to significantly degrade the performance of your application, see [BenchmarkProfilerGoroutines.txt](./BenchmarkProfilerGoroutines.txt). The latter is due to `runtime.GoroutineProfile()` calling `stopTheWorld()`. For now the advise is to test the impact of the profiler on a development environment before running it against production instances. There are ideas for making fgprof more scalable and safe for programs with a high number of goroutines, but they will likely need improved APIs from the Go core. +The overhead of fgprof increases with the number of active goroutines (including those waiting on I/O, Channels, Locks, etc.) executed by your program. If your program typically has less than 1000 active goroutines, you shouldn't have much to worry about. However, at 10k or more goroutines fgprof might start to cause some noticeable overhead. ### Go's builtin CPU Profiler @@ -183,21 +185,10 @@ The builtin Go CPU profiler uses the [setitimer(2)](https://linux.die.net/man/2/ Since Go uses non-blocking I/O, Goroutines that wait on I/O are parked and not running on any threads. Therefore they end up being largely invisible to Go's builtin CPU profiler. -## The Future of Go Profiling - -There is a great proposal for [hardware performance counters for CPU profiling](https://go.googlesource.com/proposal/+/refs/changes/08/219508/2/design/36821-perf-counter-pprof.md#5-empirical-evidence-on-the-accuracy-and-precision-of-pmu-profiles) in Go. The proposal is aimed at making the builtin CPU Profiler even more accurate, especially under highly parallel workloads on many CPUs. It also includes a very in-depth analysis of the current profiler. Based on the design, I think the proposed profiler would also be blind to I/O workloads, but still seems appealing for CPU based workloads. - -As far as fgprof itself is concerned, I might implement streaming output, leaving the final aggregation to other tools. This would open the door to even more advanced analysis, perhaps by integrating with tools such as [flamescope](https://github.com/Netflix/flamescope). - -Additionally I'm also open to the idea of contributing fgprof to the Go project itself. I've [floated the idea](https://groups.google.com/g/golang-dev/c/LCJyvL90xv8) on the golang-dev mailing list, so let's see what happens. - - ## Known Issues There is no perfect approach to profiling, and fgprof is no exception. Below is a list of known issues that will hopefully not be of practical concern for most users, but are important to highlight. -- fgprof can't catch goroutines while they are running in loops without function calls, only when they get asynchronously preempted. This can lead to reporting inaccuracies. Use the builtin CPU profiler if this is a problem for you. -- fgprof may not work in Go 1.13 if another goroutine is in a loop without function calls the whole time. Async preemption in Go 1.14 should mostly fix this issue. - Internal C functions are not showing up in the stack traces, e.g. `runtime.nanotime` which is called by `time.Since` in the example program. - The current implementation is relying on the Go scheduler to schedule the internal goroutine at a fixed sample rate. Scheduler delays, especially biased ones, might cause inaccuracies. diff --git a/vendor/github.com/felixge/fgprof/fgprof.go b/vendor/github.com/felixge/fgprof/fgprof.go index 31871c8395a..18f7d6f5fe4 100644 --- a/vendor/github.com/felixge/fgprof/fgprof.go +++ b/vendor/github.com/felixge/fgprof/fgprof.go @@ -4,16 +4,34 @@ package fgprof import ( + "fmt" "io" "runtime" + "sort" "strings" "time" + + "github.com/google/pprof/profile" +) + +// Format decides how the output is rendered to the user. +type Format string + +const ( + // FormatFolded is used by Brendan Gregg's FlameGraph utility, see + // https://github.com/brendangregg/FlameGraph#2-fold-stacks. + FormatFolded Format = "folded" + // FormatPprof is used by Google's pprof utility, see + // https://github.com/google/pprof/blob/master/proto/README.md. + FormatPprof Format = "pprof" ) // Start begins profiling the goroutines of the program and returns a function // that needs to be invoked by the caller to stop the profiling and write the // results to w using the given format. func Start(w io.Writer, format Format) func() error { + startTime := time.Now() + // Go's CPU profiler uses 100hz, but 99hz might be less likely to result in // accidental synchronization with the program we're profiling. const hz = 99 @@ -21,7 +39,7 @@ func Start(w io.Writer, format Format) func() error { stopCh := make(chan struct{}) prof := &profiler{} - stackCounts := stackCounter{} + profile := newWallclockProfile() go func() { defer ticker.Stop() @@ -30,7 +48,7 @@ func Start(w io.Writer, format Format) func() error { select { case <-ticker.C: stacks := prof.GoroutineProfile() - stackCounts.Update(stacks) + profile.Add(stacks) case <-stopCh: return } @@ -39,7 +57,9 @@ func Start(w io.Writer, format Format) func() error { return func() error { stopCh <- struct{}{} - return writeFormat(w, stackCounts.HumanMap(prof.SelfFrame()), format, hz) + endTime := time.Now() + profile.Ignore(prof.SelfFrames()...) + return profile.Export(w, format, hz, startTime, endTime) } } @@ -85,58 +105,203 @@ func (p *profiler) GoroutineProfile() []runtime.StackRecord { } } -func (p *profiler) SelfFrame() *runtime.Frame { - return p.selfFrame +// SelfFrames returns frames that belong to the profiler so that we can ignore +// them when exporting the final profile. +func (p *profiler) SelfFrames() []*runtime.Frame { + if p.selfFrame != nil { + return []*runtime.Frame{p.selfFrame} + } + return nil } -type stringStackCounter map[string]int +func newWallclockProfile() *wallclockProfile { + return &wallclockProfile{stacks: map[[32]uintptr]*wallclockStack{}} +} -func (s stringStackCounter) Update(p []runtime.StackRecord) { - for _, pp := range p { - frames := runtime.CallersFrames(pp.Stack()) +// wallclockProfile holds a wallclock profile that can be exported in different +// formats. +type wallclockProfile struct { + stacks map[[32]uintptr]*wallclockStack + ignore []*runtime.Frame +} - var stack []string - for { - frame, more := frames.Next() - stack = append([]string{frame.Function}, stack...) - if !more { - break +// wallclockStack holds the symbolized frames of a stack trace and the number +// of times it has been seen. +type wallclockStack struct { + frames []*runtime.Frame + count int +} + +// Ignore sets a list of frames that should be ignored when exporting the +// profile. +func (p *wallclockProfile) Ignore(frames ...*runtime.Frame) { + p.ignore = frames +} + +// Add adds the given stack traces to the profile. +func (p *wallclockProfile) Add(stackRecords []runtime.StackRecord) { + for _, stackRecord := range stackRecords { + if _, ok := p.stacks[stackRecord.Stack0]; !ok { + ws := &wallclockStack{} + // symbolize pcs into frames + frames := runtime.CallersFrames(stackRecord.Stack()) + for { + frame, more := frames.Next() + ws.frames = append(ws.frames, &frame) + if !more { + break + } + } + p.stacks[stackRecord.Stack0] = ws + } + p.stacks[stackRecord.Stack0].count++ + } +} + +func (p *wallclockProfile) Export(w io.Writer, f Format, hz int, startTime, endTime time.Time) error { + switch f { + case FormatFolded: + return p.exportFolded(w) + case FormatPprof: + return p.exportPprof(hz, startTime, endTime).Write(w) + default: + return fmt.Errorf("unknown format: %q", f) + } +} + +// exportStacks returns the stacks in this profile except those that have been +// set to Ignore(). +func (p *wallclockProfile) exportStacks() []*wallclockStack { + stacks := make([]*wallclockStack, 0, len(p.stacks)) +nextStack: + for _, ws := range p.stacks { + for _, f := range ws.frames { + for _, igf := range p.ignore { + if f.Entry == igf.Entry { + continue nextStack + } } } - key := strings.Join(stack, ";") - s[key]++ + stacks = append(stacks, ws) } + return stacks } -type stackCounter map[[32]uintptr]int +func (p *wallclockProfile) exportFolded(w io.Writer) error { + var lines []string + stacks := p.exportStacks() + for _, ws := range stacks { + var foldedStack []string + for _, f := range ws.frames { + foldedStack = append(foldedStack, f.Function) + } + line := fmt.Sprintf("%s %d", strings.Join(foldedStack, ";"), ws.count) + lines = append(lines, line) + } + sort.Strings(lines) + _, err := io.WriteString(w, strings.Join(lines, "\n")+"\n") + return err +} + +func (p *wallclockProfile) exportPprof(hz int, startTime, endTime time.Time) *profile.Profile { + prof := &profile.Profile{} + m := &profile.Mapping{ID: 1, HasFunctions: true} + prof.Period = int64(1e9 / hz) // Number of nanoseconds between samples. + prof.TimeNanos = startTime.UnixNano() + prof.DurationNanos = int64(endTime.Sub(startTime)) + prof.Mapping = []*profile.Mapping{m} + prof.SampleType = []*profile.ValueType{ + { + Type: "samples", + Unit: "count", + }, + { + Type: "time", + Unit: "nanoseconds", + }, + } + prof.PeriodType = &profile.ValueType{ + Type: "wallclock", + Unit: "nanoseconds", + } + + type functionKey struct { + Name string + Filename string + } + funcIdx := map[functionKey]*profile.Function{} -func (s stackCounter) Update(p []runtime.StackRecord) { - for _, pp := range p { - s[pp.Stack0]++ + type locationKey struct { + Function functionKey + Line int } + locationIdx := map[locationKey]*profile.Location{} + for _, ws := range p.exportStacks() { + sample := &profile.Sample{ + Value: []int64{ + int64(ws.count), + int64(1000 * 1000 * 1000 / hz * ws.count), + }, + } + + for _, frame := range ws.frames { + fnKey := functionKey{Name: frame.Function, Filename: frame.File} + function, ok := funcIdx[fnKey] + if !ok { + function = &profile.Function{ + ID: uint64(len(prof.Function)) + 1, + Name: frame.Function, + SystemName: frame.Function, + Filename: frame.File, + } + funcIdx[fnKey] = function + prof.Function = append(prof.Function, function) + } + + locKey := locationKey{Function: fnKey, Line: frame.Line} + location, ok := locationIdx[locKey] + if !ok { + location = &profile.Location{ + ID: uint64(len(prof.Location)) + 1, + Mapping: m, + Line: []profile.Line{{ + Function: function, + Line: int64(frame.Line), + }}, + } + locationIdx[locKey] = location + prof.Location = append(prof.Location, location) + } + sample.Location = append(sample.Location, location) + } + prof.Sample = append(prof.Sample, sample) + } + return prof } -// @TODO(fg) create a better interface that avoids the pprof output having to -// split the stacks using the `;` separator. -func (s stackCounter) HumanMap(exclude *runtime.Frame) map[string]int { - m := map[string]int{} +type symbolizedStacks map[[32]uintptr][]frameCount + +func (w wallclockProfile) Symbolize(exclude *runtime.Frame) symbolizedStacks { + m := make(symbolizedStacks) outer: - for stack0, count := range s { + for stack0, ws := range w.stacks { frames := runtime.CallersFrames((&runtime.StackRecord{Stack0: stack0}).Stack()) - var stack []string for { frame, more := frames.Next() if frame.Entry == exclude.Entry { continue outer } - stack = append([]string{frame.Function}, stack...) + m[stack0] = append(m[stack0], frameCount{Frame: &frame, Count: ws.count}) if !more { break } } - key := strings.Join(stack, ";") - m[key] = count } return m } + +type frameCount struct { + *runtime.Frame + Count int +} diff --git a/vendor/github.com/felixge/fgprof/format.go b/vendor/github.com/felixge/fgprof/format.go deleted file mode 100644 index c9bfe9fd2e9..00000000000 --- a/vendor/github.com/felixge/fgprof/format.go +++ /dev/null @@ -1,105 +0,0 @@ -package fgprof - -import ( - "fmt" - "io" - "sort" - "strings" - - "github.com/google/pprof/profile" -) - -// Format decides how the ouput is rendered to the user. -type Format string - -const ( - // FormatFolded is used by Brendan Gregg's FlameGraph utility, see - // https://github.com/brendangregg/FlameGraph#2-fold-stacks. - FormatFolded Format = "folded" - // FormatPprof is used by Google's pprof utility, see - // https://github.com/google/pprof/blob/master/proto/README.md. - FormatPprof Format = "pprof" -) - -func writeFormat(w io.Writer, s map[string]int, f Format, hz int) error { - switch f { - case FormatFolded: - return writeFolded(w, s) - case FormatPprof: - return toPprof(s, hz).Write(w) - default: - return fmt.Errorf("unknown format: %q", f) - } -} - -func writeFolded(w io.Writer, s map[string]int) error { - for _, stack := range sortedKeys(s) { - count := s[stack] - if _, err := fmt.Fprintf(w, "%s %d\n", stack, count); err != nil { - return err - } - } - return nil -} - -func toPprof(s map[string]int, hz int) *profile.Profile { - functionID := uint64(1) - locationID := uint64(1) - line := int64(1) - - p := &profile.Profile{} - m := &profile.Mapping{ID: 1, HasFunctions: true} - p.Mapping = []*profile.Mapping{m} - p.SampleType = []*profile.ValueType{ - { - Type: "samples", - Unit: "count", - }, - { - Type: "time", - Unit: "nanoseconds", - }, - } - - for stack, count := range s { - sample := &profile.Sample{ - Value: []int64{ - int64(count), - int64(1000 * 1000 * 1000 / hz * count), - }, - } - for _, fnName := range strings.Split(stack, ";") { - function := &profile.Function{ - ID: functionID, - Name: fnName, - } - p.Function = append(p.Function, function) - - location := &profile.Location{ - ID: locationID, - Mapping: m, - Line: []profile.Line{{Function: function}}, - } - p.Location = append(p.Location, location) - sample.Location = append([]*profile.Location{location}, sample.Location...) - - line++ - - locationID++ - functionID++ - } - p.Sample = append(p.Sample, sample) - } - return p -} - -func sortedKeys(s map[string]int) []string { - keys := make([]string, len(s)) - i := 0 - for stack := range s { - keys[i] = stack - i++ - } - sort.Strings(keys) - return keys -} diff --git a/vendor/github.com/felixge/fgprof/pprof.go b/vendor/github.com/felixge/fgprof/pprof.go deleted file mode 100644 index 4312ea7157d..00000000000 --- a/vendor/github.com/felixge/fgprof/pprof.go +++ /dev/null @@ -1,56 +0,0 @@ -package fgprof - -import ( - "strings" - - "github.com/google/pprof/profile" -) - -func toProfile(s map[string]int, hz int) *profile.Profile { - functionID := uint64(1) - locationID := uint64(1) - - p := &profile.Profile{} - m := &profile.Mapping{ID: 1, HasFunctions: true} - p.Mapping = []*profile.Mapping{m} - p.SampleType = []*profile.ValueType{ - { - Type: "samples", - Unit: "count", - }, - { - Type: "time", - Unit: "nanoseconds", - }, - } - - for _, stack := range sortedKeys(s) { - count := s[stack] - sample := &profile.Sample{ - Value: []int64{ - int64(count), - int64(1000 * 1000 * 1000 / hz * count), - }, - } - for _, fnName := range strings.Split(stack, ";") { - function := &profile.Function{ - ID: functionID, - Name: fnName, - } - p.Function = append(p.Function, function) - - location := &profile.Location{ - ID: locationID, - Mapping: m, - Line: []profile.Line{{Function: function}}, - } - p.Location = append(p.Location, location) - sample.Location = append(sample.Location, location) - - locationID++ - functionID++ - } - p.Sample = append(p.Sample, sample) - } - return p -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5e9b0e40376..10e90aa85d6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -273,7 +273,7 @@ github.com/facette/natsort # github.com/fatih/color v1.13.0 ## explicit; go 1.13 github.com/fatih/color -# github.com/felixge/fgprof v0.9.2 +# github.com/felixge/fgprof v0.9.3 ## explicit; go 1.14 github.com/felixge/fgprof # github.com/felixge/httpsnoop v1.0.3 From 6c8befc351ea73a57f7d920b1738920f90ca4af8 Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Fri, 30 Dec 2022 08:26:40 +0100 Subject: [PATCH 026/133] Test example config with filesystem backend (#5072) Signed-off-by: Friedrich Gonzalez Signed-off-by: Friedrich Gonzalez Signed-off-by: Alex Le --- ...tarted_single_process_config_local_test.go | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 integration/getting_started_single_process_config_local_test.go diff --git a/integration/getting_started_single_process_config_local_test.go b/integration/getting_started_single_process_config_local_test.go new file mode 100644 index 00000000000..34eec0d661e --- /dev/null +++ b/integration/getting_started_single_process_config_local_test.go @@ -0,0 +1,60 @@ +//go:build requires_docker +// +build requires_docker + +package integration + +import ( + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/e2e" + "github.com/cortexproject/cortex/integration/e2ecortex" +) + +func TestGettingStartedSingleProcessConfigWithFilesystem(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start Cortex components. + require.NoError(t, copyFileToSharedDir(s, "docs/configuration/single-process-config-blocks-local.yaml", cortexConfigFile)) + + flags := map[string]string{} + + cortex := e2ecortex.NewSingleBinaryWithConfigFile("cortex-1", cortexConfigFile, flags, "", 9009, 9095) + require.NoError(t, s.StartAndWaitReady(cortex)) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + // Push some series to Cortex. + now := time.Now() + series, expectedVector := generateSeries("series_1", now, prompb.Label{Name: "foo", Value: "bar"}) + + res, err := c.Push(series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + // Query the series. + result, err := c.Query("series_1", now) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector, result.(model.Vector)) + + labelValues, err := c.LabelValues("foo", time.Time{}, time.Time{}, nil) + require.NoError(t, err) + require.Equal(t, model.LabelValues{"bar"}, labelValues) + + labelNames, err := c.LabelNames(time.Time{}, time.Time{}) + require.NoError(t, err) + require.Equal(t, []string{"__name__", "foo"}, labelNames) + + // Check that a range query does not return an error to sanity check the queryrange tripperware. + _, err = c.QueryRange("series_1", now.Add(-15*time.Minute), now, 15*time.Second) + require.NoError(t, err) +} From fce38be84d422df0602040df96642c95d2c13254 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Dec 2022 07:35:35 -0800 Subject: [PATCH 027/133] Bump github.com/hashicorp/consul/api from 1.17.0 to 1.18.0 (#5066) Bumps [github.com/hashicorp/consul/api](https://github.com/hashicorp/consul) from 1.17.0 to 1.18.0. - [Release notes](https://github.com/hashicorp/consul/releases) - [Changelog](https://github.com/hashicorp/consul/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/consul/compare/api/v1.17.0...api/v1.18.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/consul/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 ++-- .../consul/api/config_entry_discoverychain.go | 12 ++++++++++++ vendor/modules.txt | 2 +- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 590c345675a..ff093519c3a 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/hashicorp/consul/api v1.17.0 + github.com/hashicorp/consul/api v1.18.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.5.0 diff --git a/go.sum b/go.sum index 5d3536330d2..2e08cb19bc7 100644 --- a/go.sum +++ b/go.sum @@ -846,8 +846,8 @@ github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.17.0 h1:aqytbw31uCPNn37ST+717IyGod+P1eTgSGu3yjRo4bs= -github.com/hashicorp/consul/api v1.17.0/go.mod h1:ZNwemOPAdgtV4cCx9fqxNmw+PI3vliW6gYin2WD+F2g= +github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= +github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go index 734f9454e46..ff92125dcdc 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -69,6 +69,7 @@ type ServiceRouteDestination struct { Partition string `json:",omitempty"` PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"` RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` + IdleTimeout time.Duration `json:",omitempty" alias:"idle_timeout"` NumRetries uint32 `json:",omitempty" alias:"num_retries"` RetryOnConnectFailure bool `json:",omitempty" alias:"retry_on_connect_failure"` RetryOnStatusCodes []uint32 `json:",omitempty" alias:"retry_on_status_codes"` @@ -81,14 +82,19 @@ func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) { type Alias ServiceRouteDestination exported := &struct { RequestTimeout string `json:",omitempty"` + IdleTimeout string `json:",omitempty"` *Alias }{ RequestTimeout: e.RequestTimeout.String(), + IdleTimeout: e.IdleTimeout.String(), Alias: (*Alias)(e), } if e.RequestTimeout == 0 { exported.RequestTimeout = "" } + if e.IdleTimeout == 0 { + exported.IdleTimeout = "" + } return json.Marshal(exported) } @@ -97,6 +103,7 @@ func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error { type Alias ServiceRouteDestination aux := &struct { RequestTimeout string + IdleTimeout string *Alias }{ Alias: (*Alias)(e), @@ -110,6 +117,11 @@ func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error { return err } } + if aux.IdleTimeout != "" { + if e.IdleTimeout, err = time.ParseDuration(aux.IdleTimeout); err != nil { + return err + } + } return nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 10e90aa85d6..ef87f118360 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -443,7 +443,7 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2/util/metautils github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hashicorp/consul/api v1.17.0 +# github.com/hashicorp/consul/api v1.18.0 ## explicit; go 1.12 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.1.0 From caabc49bfe06137fa61e4198efb05b9e041039d9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Dec 2022 07:36:47 -0800 Subject: [PATCH 028/133] Bump go.etcd.io/etcd/client/v3 from 3.5.4 to 3.5.6 (#5064) Bumps [go.etcd.io/etcd/client/v3](https://github.com/etcd-io/etcd) from 3.5.4 to 3.5.6. - [Release notes](https://github.com/etcd-io/etcd/releases) - [Changelog](https://github.com/etcd-io/etcd/blob/main/Dockerfile-release.amd64) - [Commits](https://github.com/etcd-io/etcd/compare/v3.5.4...v3.5.6) --- updated-dependencies: - dependency-name: go.etcd.io/etcd/client/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 6 +- vendor/go.etcd.io/etcd/client/v3/client.go | 4 +- vendor/go.etcd.io/etcd/client/v3/lease.go | 13 +++- vendor/go.etcd.io/etcd/client/v3/logger.go | 4 +- .../go.etcd.io/etcd/client/v3/maintenance.go | 1 + vendor/go.etcd.io/etcd/client/v3/op.go | 2 +- .../etcd/client/v3/retry_interceptor.go | 31 +++++--- vendor/go.etcd.io/etcd/client/v3/watch.go | 76 ++++++++++++++----- vendor/modules.txt | 2 +- 10 files changed, 99 insertions(+), 42 deletions(-) diff --git a/go.mod b/go.mod index ff093519c3a..1c1ca377ec2 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae go.etcd.io/etcd/api/v3 v3.5.6 go.etcd.io/etcd/client/pkg/v3 v3.5.6 - go.etcd.io/etcd/client/v3 v3.5.4 + go.etcd.io/etcd/client/v3 v3.5.6 go.opentelemetry.io/contrib/propagators/aws v1.12.0 go.opentelemetry.io/otel v1.11.2 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 diff --git a/go.sum b/go.sum index 2e08cb19bc7..d856c84bbd9 100644 --- a/go.sum +++ b/go.sum @@ -1560,17 +1560,15 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A= go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/etcd/client/v3 v3.5.6 h1:coLs69PWCXE9G4FKquzNaSHrRyMCAXwF+IX1tAPVO8E= +go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index 2990379ab9f..4dfae89c610 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -286,8 +286,7 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc. if err != nil { return nil, fmt.Errorf("failed to configure dialer: %v", err) } - if c.Username != "" && c.Password != "" { - c.authTokenBundle = credentials.NewBundle(credentials.Config{}) + if c.authTokenBundle != nil { opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) } @@ -383,6 +382,7 @@ func newClient(cfg *Config) (*Client, error) { if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password + client.authTokenBundle = credentials.NewBundle(credentials.Config{}) } if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go index bd31e6b4a5b..9e1b704648b 100644 --- a/vendor/go.etcd.io/etcd/client/v3/lease.go +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -397,7 +397,7 @@ func (l *lessor) closeRequireLeader() { } } -func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) { cctx, cancel := context.WithCancel(ctx) defer cancel() @@ -406,6 +406,15 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive return nil, toErr(ctx, err) } + defer func() { + if err := stream.CloseSend(); err != nil { + if ferr == nil { + ferr = toErr(ctx, err) + } + return + } + }() + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) if err != nil { return nil, toErr(ctx, err) @@ -416,7 +425,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive return nil, toErr(ctx, rerr) } - karesp := &LeaseKeepAliveResponse{ + karesp = &LeaseKeepAliveResponse{ ResponseHeader: resp.GetHeader(), ID: LeaseID(resp.ID), TTL: resp.TTL, diff --git a/vendor/go.etcd.io/etcd/client/v3/logger.go b/vendor/go.etcd.io/etcd/client/v3/logger.go index ecac42730f6..eaa35f2d3ac 100644 --- a/vendor/go.etcd.io/etcd/client/v3/logger.go +++ b/vendor/go.etcd.io/etcd/client/v3/logger.go @@ -51,8 +51,8 @@ func etcdClientDebugLevel() zapcore.Level { return zapcore.InfoLevel } var l zapcore.Level - if err := l.Set(envLevel); err == nil { - log.Printf("Deprecated env ETCD_CLIENT_DEBUG value. Using default level: 'info'") + if err := l.Set(envLevel); err != nil { + log.Printf("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'") return zapcore.InfoLevel } return l diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go index dbea530e66a..a98b8ca51e1 100644 --- a/vendor/go.etcd.io/etcd/client/v3/maintenance.go +++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go @@ -92,6 +92,7 @@ func NewMaintenance(c *Client) Maintenance { err = c.getToken(dctx) cancel() if err != nil { + conn.Close() return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err) } cancel = func() { conn.Close() } diff --git a/vendor/go.etcd.io/etcd/client/v3/op.go b/vendor/go.etcd.io/etcd/client/v3/op.go index e8c0c1e08c9..5251906322c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/op.go +++ b/vendor/go.etcd.io/etcd/client/v3/op.go @@ -389,12 +389,12 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { + op.isOptsWithPrefix = true if len(op.key) == 0 { op.key, op.end = []byte{0}, []byte{0} return } op.end = getPrefix(op.key) - op.isOptsWithPrefix = true } } diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go index 04f157a1dcb..7dc5ddae0fd 100644 --- a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go +++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go @@ -74,13 +74,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien continue } if c.shouldRefreshToken(lastErr, callOpts) { - // clear auth token before refreshing it. - // call c.Auth.Authenticate with an invalid token will always fail the auth check on the server-side, - // if the server has not apply the patch of pr #12165 (https://github.com/etcd-io/etcd/pull/12165) - // and a rpctypes.ErrInvalidAuthToken will recursively call c.getToken until system run out of resource. - c.authTokenBundle.UpdateAuthToken("") - - gterr := c.getToken(ctx) + gterr := c.refreshToken(ctx) if gterr != nil { c.GetLogger().Warn( "retrying of unary invoker failed to fetch new auth token", @@ -161,6 +155,24 @@ func (c *Client) shouldRefreshToken(err error, callOpts *options) bool { (rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision) } +func (c *Client) refreshToken(ctx context.Context) error { + if c.authTokenBundle == nil { + // c.authTokenBundle will be initialized only when + // c.Username != "" && c.Password != "". + // + // When users use the TLS CommonName based authentication, the + // authTokenBundle is always nil. But it's possible for the clients + // to get `rpctypes.ErrAuthOldRevision` response when the clients + // concurrently modify auth data (e.g, addUser, deleteUser etc.). + // In this case, there is no need to refresh the token; instead the + // clients just need to retry the operations (e.g. Put, Delete etc). + return nil + } + // clear auth token before refreshing it. + c.authTokenBundle.UpdateAuthToken("") + return c.getToken(ctx) +} + // type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a // proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish // a new ClientStream according to the retry policy. @@ -259,10 +271,7 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return true, err } if s.client.shouldRefreshToken(err, s.callOpts) { - // clear auth token to avoid failure when call getToken - s.client.authTokenBundle.UpdateAuthToken("") - - gterr := s.client.getToken(s.ctx) + gterr := s.client.refreshToken(s.ctx) if gterr != nil { s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) return false, err // return the original error for simplicity diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index b73925ba128..90f125ac466 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "strings" "sync" "time" @@ -37,6 +38,13 @@ const ( EventTypePut = mvccpb.PUT closeSendErrTimeout = 250 * time.Millisecond + + // AutoWatchID is the watcher ID passed in WatchStream.Watch when no + // user-provided ID is available. If pass, an ID will automatically be assigned. + AutoWatchID = 0 + + // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch. + InvalidWatchID = -1 ) type Event mvccpb.Event @@ -450,7 +458,7 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) { func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { // check watch ID for backward compatibility (<= v3.3) - if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") { + if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") { w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) // failed; no channel close(ws.recvc) @@ -481,7 +489,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { } else if ws.outc != nil { close(ws.outc) } - if ws.id != -1 { + if ws.id != InvalidWatchID { delete(w.substreams, ws.id) return } @@ -533,6 +541,7 @@ func (w *watchGrpcStream) run() { cancelSet := make(map[int64]struct{}) var cur *pb.WatchResponse + backoff := time.Millisecond for { select { // Watch() requested @@ -543,7 +552,7 @@ func (w *watchGrpcStream) run() { // TODO: pass custom watch ID? ws := &watcherStream{ initReq: *wreq, - id: -1, + id: InvalidWatchID, outc: outc, // unbuffered so resumes won't cause repeat events recvc: make(chan *WatchResponse), @@ -580,6 +589,26 @@ func (w *watchGrpcStream) run() { switch { case pbresp.Created: + cancelReasonError := v3rpc.Error(errors.New(pbresp.CancelReason)) + if shouldRetryWatch(cancelReasonError) { + var newErr error + if wc, newErr = w.newWatchClient(); newErr != nil { + w.lg.Error("failed to create a new watch client", zap.Error(newErr)) + return + } + + if len(w.resuming) != 0 { + if ws := w.resuming[0]; ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + } + + cur = nil + continue + } + // response to head of queue creation if len(w.resuming) != 0 { if ws := w.resuming[0]; ws != nil { @@ -649,6 +678,7 @@ func (w *watchGrpcStream) run() { closeErr = err return } + backoff = w.backoffIfUnavailable(backoff, err) if wc, closeErr = w.newWatchClient(); closeErr != nil { return } @@ -669,7 +699,7 @@ func (w *watchGrpcStream) run() { if len(w.substreams)+len(w.resuming) == 0 { return } - if ws.id != -1 { + if ws.id != InvalidWatchID { // client is closing an established watch; close it on the server proactively instead of waiting // to close when the next message arrives cancelSet[ws.id] = struct{}{} @@ -688,6 +718,11 @@ func (w *watchGrpcStream) run() { } } +func shouldRetryWatch(cancelReasonError error) bool { + return (strings.Compare(cancelReasonError.Error(), v3rpc.ErrGRPCInvalidAuthToken.Error()) == 0) || + (strings.Compare(cancelReasonError.Error(), v3rpc.ErrGRPCAuthOldRevision.Error()) == 0) +} + // nextResume chooses the next resuming to register with the grpc stream. Abandoned // streams are marked as nil in the queue since the head must wait for its inflight registration. func (w *watchGrpcStream) nextResume() *watcherStream { @@ -716,9 +751,9 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { cancelReason: pbresp.CancelReason, } - // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to + // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to // indicate they should be broadcast. - if wr.IsProgressNotify() && pbresp.WatchId == -1 { + if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID { return w.broadcastResponse(wr) } @@ -873,7 +908,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { w.resumec = make(chan struct{}) w.joinSubstreams() for _, ws := range w.substreams { - ws.id = -1 + ws.id = InvalidWatchID w.resuming = append(w.resuming, ws) } // strip out nils, if any @@ -963,6 +998,21 @@ func (w *watchGrpcStream) joinSubstreams() { var maxBackoff = 100 * time.Millisecond +func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration { + if isUnavailableErr(w.ctx, err) { + // retry, but backoff + if backoff < maxBackoff { + // 25% backoff factor + backoff = backoff + backoff/4 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + time.Sleep(backoff) + } + return backoff +} + // openWatchClient retries opening a watch client until success or halt. // manually retry in case "ws==nil && err==nil" // TODO: remove FailFast=false @@ -983,17 +1033,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) if isHaltErr(w.ctx, err) { return nil, v3rpc.Error(err) } - if isUnavailableErr(w.ctx, err) { - // retry, but backoff - if backoff < maxBackoff { - // 25% backoff factor - backoff = backoff + backoff/4 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - time.Sleep(backoff) - } + backoff = w.backoffIfUnavailable(backoff, err) } return ws, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index ef87f118360..12189bf21de 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -893,7 +893,7 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.4 +# go.etcd.io/etcd/client/v3 v3.5.6 ## explicit; go 1.16 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials From 6236cfa155af4e15a927c75d590d079c0eb2714a Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Fri, 30 Dec 2022 09:36:56 -0800 Subject: [PATCH 029/133] Create Span for the codec MergeResponse method (#5075) * Create Span for the codec MergeResponse method Signed-off-by: Alan Protasio * Changelog Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/querier/tripperware/instantquery/instant_query.go | 6 +++++- pkg/querier/tripperware/instantquery/instant_query_test.go | 2 +- pkg/querier/tripperware/query.go | 2 +- pkg/querier/tripperware/queryrange/query_range.go | 5 ++++- pkg/querier/tripperware/queryrange/query_range_test.go | 2 +- pkg/querier/tripperware/queryrange/results_cache.go | 6 +++--- pkg/querier/tripperware/queryrange/split_by_interval.go | 2 +- .../tripperware/queryrange/split_by_interval_test.go | 2 +- pkg/querier/tripperware/shard_by.go | 2 +- 10 files changed, 19 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e50a507f75..13539382189 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ * [FEATURE] Ingester: Enable snapshotting of In-memory TSDB on disk during shutdown via `-blocks-storage.tsdb.memory-snapshot-on-shutdown`. #5011 * [FEATURE] Query Frontend/Scheduler: Add a new counter metric `cortex_request_queue_requests_total` for total requests going to queue. #5030 * [FEATURE] Build ARM docker images. #5041 +* [FEATURE] Query-frontend/Querier: Create spans to measure time to merge promql responses. #5041 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 * [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index f925f41cc19..0af7d72627b 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -243,7 +243,11 @@ func (instantQueryCodec) EncodeResponse(ctx context.Context, res tripperware.Res return &resp, nil } -func (instantQueryCodec) MergeResponse(responses ...tripperware.Response) (tripperware.Response, error) { +func (instantQueryCodec) MergeResponse(ctx context.Context, responses ...tripperware.Response) (tripperware.Response, error) { + sp, _ := opentracing.StartSpanFromContext(ctx, "PrometheusInstantQueryResponse.MergeResponse") + sp.SetTag("response_count", len(responses)) + defer sp.Finish() + if len(responses) == 0 { return NewEmptyPrometheusInstantQueryResponse(), nil } else if len(responses) == 1 { diff --git a/pkg/querier/tripperware/instantquery/instant_query_test.go b/pkg/querier/tripperware/instantquery/instant_query_test.go index a0013a5ab0a..efdf968fea9 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_test.go +++ b/pkg/querier/tripperware/instantquery/instant_query_test.go @@ -308,7 +308,7 @@ func TestMergeResponse(t *testing.T) { require.NoError(t, err) resps = append(resps, dr) } - resp, err := InstantQueryCodec.MergeResponse(resps...) + resp, err := InstantQueryCodec.MergeResponse(context.Background(), resps...) assert.Equal(t, err, tc.expectedErr) if err != nil { return diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 4f90fb5cce7..8bff06ccb6e 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -40,7 +40,7 @@ type Codec interface { // Merger is used by middlewares making multiple requests to merge back all responses into a single one. type Merger interface { // MergeResponse merges responses from multiple requests into a single Response - MergeResponse(...Response) (Response, error) + MergeResponse(context.Context, ...Response) (Response, error) } // Response represents a query range response. diff --git a/pkg/querier/tripperware/queryrange/query_range.go b/pkg/querier/tripperware/queryrange/query_range.go index c3ad1839c19..cb94e8b1196 100644 --- a/pkg/querier/tripperware/queryrange/query_range.go +++ b/pkg/querier/tripperware/queryrange/query_range.go @@ -127,7 +127,10 @@ func NewEmptyPrometheusResponse() *PrometheusResponse { } } -func (c prometheusCodec) MergeResponse(responses ...tripperware.Response) (tripperware.Response, error) { +func (c prometheusCodec) MergeResponse(ctx context.Context, responses ...tripperware.Response) (tripperware.Response, error) { + sp, _ := opentracing.StartSpanFromContext(ctx, "QueryRangeResponse.MergeResponse") + sp.SetTag("response_count", len(responses)) + defer sp.Finish() if len(responses) == 0 { return NewEmptyPrometheusResponse(), nil } diff --git a/pkg/querier/tripperware/queryrange/query_range_test.go b/pkg/querier/tripperware/queryrange/query_range_test.go index cfad6e8f999..bd61485d064 100644 --- a/pkg/querier/tripperware/queryrange/query_range_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_test.go @@ -652,7 +652,7 @@ func TestMergeAPIResponses(t *testing.T) { }, }} { t.Run(tc.name, func(t *testing.T) { - output, err := PrometheusCodec.MergeResponse(tc.input...) + output, err := PrometheusCodec.MergeResponse(context.Background(), tc.input...) require.NoError(t, err) require.Equal(t, tc.expected, output) }) diff --git a/pkg/querier/tripperware/queryrange/results_cache.go b/pkg/querier/tripperware/queryrange/results_cache.go index c16d63dc892..5c0fc543759 100644 --- a/pkg/querier/tripperware/queryrange/results_cache.go +++ b/pkg/querier/tripperware/queryrange/results_cache.go @@ -407,7 +407,7 @@ func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, exte return nil, nil, err } if len(requests) == 0 { - response, err := s.merger.MergeResponse(responses...) + response, err := s.merger.MergeResponse(context.Background(), responses...) // No downstream requests so no need to write back to the cache. return response, nil, err } @@ -469,7 +469,7 @@ func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, exte if err != nil { return nil, nil, err } - merged, err := s.merger.MergeResponse(accumulator.Response, currentRes) + merged, err := s.merger.MergeResponse(ctx, accumulator.Response, currentRes) if err != nil { return nil, nil, err } @@ -481,7 +481,7 @@ func (s resultsCache) handleHit(ctx context.Context, r tripperware.Request, exte return nil, nil, err } - response, err := s.merger.MergeResponse(responses...) + response, err := s.merger.MergeResponse(ctx, responses...) return response, mergedExtents, err } diff --git a/pkg/querier/tripperware/queryrange/split_by_interval.go b/pkg/querier/tripperware/queryrange/split_by_interval.go index 6bf87bf13d0..4ad6e8f47f8 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval.go @@ -61,7 +61,7 @@ func (s splitByInterval) Do(ctx context.Context, r tripperware.Request) (tripper resps = append(resps, reqResp.Response) } - response, err := s.merger.MergeResponse(resps...) + response, err := s.merger.MergeResponse(ctx, resps...) if err != nil { return nil, err } diff --git a/pkg/querier/tripperware/queryrange/split_by_interval_test.go b/pkg/querier/tripperware/queryrange/split_by_interval_test.go index 3451570ce42..22e9565bce1 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval_test.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval_test.go @@ -266,7 +266,7 @@ func TestSplitQuery(t *testing.T) { } func TestSplitByDay(t *testing.T) { - mergedResponse, err := PrometheusCodec.MergeResponse(parsedResponse, parsedResponse) + mergedResponse, err := PrometheusCodec.MergeResponse(context.Background(), parsedResponse, parsedResponse) require.NoError(t, err) mergedHTTPResponse, err := PrometheusCodec.EncodeResponse(context.Background(), mergedResponse) diff --git a/pkg/querier/tripperware/shard_by.go b/pkg/querier/tripperware/shard_by.go index 8bb218af321..13d6645612e 100644 --- a/pkg/querier/tripperware/shard_by.go +++ b/pkg/querier/tripperware/shard_by.go @@ -78,7 +78,7 @@ func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { resps = append(resps, reqResp.Response) } - return s.merger.MergeResponse(resps...) + return s.merger.MergeResponse(ctx, resps...) } func (s shardBy) shardQuery(l log.Logger, numShards int, r Request, analysis querysharding.QueryAnalysis) []Request { From 2ee2fe7ed0fe441be17009b2b5ee9b0486ad9eed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Dec 2022 09:39:42 -0800 Subject: [PATCH 030/133] Bump github.com/prometheus/client_golang from 1.13.0 to 1.14.0 (#5065) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.13.0 to 1.14.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.13.0...v1.14.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- .../client_golang/api/prometheus/v1/api.go | 17 +- .../client_golang/prometheus/counter.go | 11 +- .../client_golang/prometheus/doc.go | 107 +- .../client_golang/prometheus/gauge.go | 6 +- .../client_golang/prometheus/histogram.go | 978 ++++++++++++++++-- .../prometheus/internal/almost_equal.go | 60 ++ .../prometheus/internal/difflib.go | 13 +- .../client_golang/prometheus/labels.go | 3 +- .../client_golang/prometheus/metric.go | 2 +- .../client_golang/prometheus/promauto/auto.go | 170 +-- .../prometheus/promhttp/instrument_client.go | 5 +- .../prometheus/promhttp/instrument_server.go | 24 +- .../client_golang/prometheus/push/push.go | 20 +- .../client_golang/prometheus/registry.go | 34 +- .../client_golang/prometheus/summary.go | 9 +- .../client_golang/prometheus/timer.go | 11 +- vendor/modules.txt | 2 +- 19 files changed, 1204 insertions(+), 274 deletions(-) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go diff --git a/go.mod b/go.mod index 1c1ca377ec2..545e4c73b64 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.24.0 - github.com/prometheus/client_golang v1.13.0 + github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.37.0 github.com/prometheus/prometheus v0.39.1 diff --git a/go.sum b/go.sum index d856c84bbd9..f77076f2f35 100644 --- a/go.sum +++ b/go.sum @@ -1318,8 +1318,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index 5f0ecef2987..f74139c71f6 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -335,14 +335,15 @@ type RuleGroup struct { // that rules are returned in by the API. // // Rule types can be determined using a type switch: -// switch v := rule.(type) { -// case RecordingRule: -// fmt.Print("got a recording rule") -// case AlertingRule: -// fmt.Print("got a alerting rule") -// default: -// fmt.Printf("unknown rule type %s", v) -// } +// +// switch v := rule.(type) { +// case RecordingRule: +// fmt.Print("got a recording rule") +// case AlertingRule: +// fmt.Print("got a alerting rule") +// default: +// fmt.Printf("unknown rule type %s", v) +// } type Rules []interface{} // AlertingRule models a alerting rule. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index de30de6daa3..a912b75a05b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -140,12 +140,13 @@ func (c *counter) get() float64 { } func (c *counter) Write(out *dto.Metric) error { - val := c.get() - + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } + val := c.get() return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) } @@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 98450125d6a..811072cbd54 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -21,55 +21,66 @@ // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// # A Basic Example // // As a starting point, a very basic usage example: // -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } +// +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } +// +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() +// +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. // -// Metrics +// # Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example @@ -100,7 +111,7 @@ // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // -// Custom Collectors and constant Metrics +// # Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first @@ -141,7 +152,7 @@ // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // -// Advanced Uses of the Registry +// # Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. @@ -176,23 +187,23 @@ // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // -// HTTP Exposition +// # HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // -// Pushing to the Pushgateway +// # Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge +// # Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // -// Other Means of Exposition +// # Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index bd0733d6a7d..21271a5bb46 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 0d47fecdc22..4c873a01c3d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -28,19 +28,216 @@ import ( dto "github.com/prometheus/client_model/go" ) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. +// +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -50,7 +247,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -64,18 +262,28 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -180,8 +388,85 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // NativeHistogramMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } + h.counts[1] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + div := 1 << -schema + key = (key + div - 1) / div + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) } type histogram struct { @@ -276,7 +667,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -284,8 +675,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -293,9 +686,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) + + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() + + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) } + addAndResetCounts(hotCounts, coldCounts) return nil } @@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) + } +} - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 00000000000..1ed5abe74c1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd45cadc0c6..fd0750f2cf5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -201,12 +201,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool { // If IsJunk is not defined: // // Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// // and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' +// +// k >= k' +// i <= i' +// and if i == i', j <= j' // // In other words, of all maximal matching blocks, return one that // starts earliest in a, and of all those maximal matching blocks that diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 6eee198fef0..c1b8fad36ae 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -25,7 +25,8 @@ import ( // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f0941f6f001..b5119c50410 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -187,7 +187,7 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()), + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go index f8d50d1f911..8031e870424 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go @@ -14,114 +14,114 @@ // Package promauto provides alternative constructors for the fundamental // Prometheus metric types and their …Vec and …Func variants. The difference to // their counterparts in the prometheus package is that the promauto -// constructors return Collectors that are already registered with a -// registry. There are two sets of constructors. The constructors in the first -// set are top-level functions, while the constructors in the other set are -// methods of the Factory type. The top-level function return Collectors -// registered with the global registry (prometheus.DefaultRegisterer), while the -// methods return Collectors registered with the registry the Factory was -// constructed with. All constructors panic if the registration fails. +// constructors register the Collectors with a registry before returning them. +// There are two sets of constructors. The constructors in the first set are +// top-level functions, while the constructors in the other set are methods of +// the Factory type. The top-level function return Collectors registered with +// the global registry (prometheus.DefaultRegisterer), while the methods return +// Collectors registered with the registry the Factory was constructed with. All +// constructors panic if the registration fails. // // The following example is a complete program to create a histogram of normally // distributed random numbers from the math/rand package: // -// package main +// package main // -// import ( -// "math/rand" -// "net/http" +// import ( +// "math/rand" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) // -// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) +// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) // -// func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } -// } +// func Random() { +// for { +// histogram.Observe(rand.NormFloat64()) +// } +// } // -// func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } +// func main() { +// go Random() +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } // // Prometheus's version of a minimal hello-world program: // -// package main +// package main // -// import ( -// "fmt" -// "net/http" +// import ( +// "fmt" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) // -// func main() { -// http.Handle("/", promhttp.InstrumentHandlerCounter( -// promauto.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hello_requests_total", -// Help: "Total number of hello-world requests by HTTP code.", -// }, -// []string{"code"}, -// ), -// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprint(w, "Hello, world!") -// }), -// )) -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } +// func main() { +// http.Handle("/", promhttp.InstrumentHandlerCounter( +// promauto.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hello_requests_total", +// Help: "Total number of hello-world requests by HTTP code.", +// }, +// []string{"code"}, +// ), +// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprint(w, "Hello, world!") +// }), +// )) +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } // // A Factory is created with the With(prometheus.Registerer) function, which // enables two usage pattern. With(prometheus.Registerer) can be called once per // line: // -// var ( -// reg = prometheus.NewRegistry() -// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = promauto.With(reg).NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) +// var ( +// reg = prometheus.NewRegistry() +// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = promauto.With(reg).NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) // // Or it can be used to create a Factory once to be used multiple times: // -// var ( -// reg = prometheus.NewRegistry() -// factory = promauto.With(reg) -// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = factory.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) +// var ( +// reg = prometheus.NewRegistry() +// factory = promauto.With(reg) +// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = factory.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) // // This appears very handy. So why are these constructors locked away in a // separate package? diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 097aff2df64..21086781621 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -73,12 +73,11 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), 1, rtOpts.getExemplarFn(r.Context()), ) - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() } return resp, err } @@ -117,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index bfe50098779..cca67a78a90 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -28,7 +28,9 @@ import ( // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" -func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) { +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { if labels == nil { obs.Observe(val) return @@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) } -func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) { +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { if labels == nil { obs.Add(val) return @@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op now := time.Now() next.ServeHTTP(w, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), 1, hOpts.getExemplarFn(r.Context()), @@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), 1, hOpts.getExemplarFn(r.Context()), @@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), float64(size), hOpts.getExemplarFn(r.Context()), @@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), float64(size), hOpts.getExemplarFn(r.Context()), @@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), float64(d.Written()), hOpts.getExemplarFn(r.Context()), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go index 06dee376e15..29f6cd309c9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go @@ -15,17 +15,17 @@ // builder approach. Create a Pusher with New and then add the various options // by using its methods, finally calling Add or Push, like this: // -// // Easy case: -// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() +// // Easy case: +// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() // -// // Complex case: -// push.New("http://example.org/metrics", "my_job"). -// Collector(myCollector1). -// Collector(myCollector2). -// Grouping("zone", "xy"). -// Client(&myHTTPClient). -// BasicAuth("top", "secret"). -// Add() +// // Complex case: +// push.New("http://example.org/metrics", "my_job"). +// Collector(myCollector1). +// Collector(myCollector2). +// Grouping("zone", "xy"). +// Client(&myHTTPClient). +// BasicAuth("top", "secret"). +// Add() // // See the examples section for more detailed examples. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 325f665ff67..09e34d307c9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error { } // Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. @@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c5fa8ed7c71..7bc448a8939 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { @@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error { // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index 8d5f1052337..f28a76f3a62 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -25,11 +25,12 @@ type Timer struct { // NewTimer creates a new Timer. The provided Observer is used to observe a // duration in seconds. Timer is usually used to time a function call in the // following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), diff --git a/vendor/modules.txt b/vendor/modules.txt index 12189bf21de..3a78ffe7798 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -645,7 +645,7 @@ github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/timeinterval github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.13.0 +# github.com/prometheus/client_golang v1.14.0 ## explicit; go 1.17 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 From 99b4432c6fe61047698ba04b4d5a078cec13d2bc Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Fri, 30 Dec 2022 09:40:03 -0800 Subject: [PATCH 031/133] switch encoding/json to json iter (#5069) * switch to json iter Signed-off-by: Ben Ye * add one ut Signed-off-by: Ben Ye * fix lint Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- .../tripperware/instantquery/instant_query.go | 5 +- pkg/querier/tripperware/query.go | 54 +++++++++++++----- pkg/querier/tripperware/query_test.go | 57 +++++++++++++++++++ .../tripperware/queryrange/query_range.go | 2 +- 4 files changed, 100 insertions(+), 18 deletions(-) create mode 100644 pkg/querier/tripperware/query_test.go diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index 0af7d72627b..36aa222bddd 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -30,7 +30,8 @@ import ( var ( InstantQueryCodec tripperware.Codec = newInstantQueryCodec() - json = jsoniter.Config{ + + json = jsoniter.Config{ EscapeHTML: false, // No HTML in our responses. SortMapKeys: true, ValidateJsonRawMessage: true, @@ -155,7 +156,7 @@ func (instantQueryCodec) DecodeResponse(ctx context.Context, r *http.Response, _ log, ctx := spanlogger.New(ctx, "PrometheusInstantQueryResponse") //nolint:ineffassign,staticcheck defer log.Finish() - buf, err := tripperware.BodyBuffer(r) + buf, err := tripperware.BodyBuffer(r, log) if err != nil { log.Error(err) return nil, err diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 8bff06ccb6e..90571d089d2 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -4,7 +4,6 @@ import ( "bytes" "compress/gzip" "context" - "encoding/json" "io" "net/http" "sort" @@ -13,6 +12,7 @@ import ( "time" "unsafe" + "github.com/go-kit/log" "github.com/gogo/protobuf/proto" jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" @@ -20,6 +20,15 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/util/runutil" +) + +var ( + json = jsoniter.Config{ + EscapeHTML: false, // No HTML in our responses. + SortMapKeys: true, + ValidateJsonRawMessage: true, + }.Froze() ) // Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares. @@ -73,6 +82,32 @@ type Request interface { WithStats(stats string) Request } +func encodeSampleStream(ptr unsafe.Pointer, stream *jsoniter.Stream) { + ss := (*SampleStream)(ptr) + stream.WriteObjectStart() + + stream.WriteObjectField(`metric`) + lbls, err := cortexpb.FromLabelAdaptersToLabels(ss.Labels).MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), lbls...)) + stream.WriteMore() + + stream.WriteObjectField(`values`) + stream.WriteArrayStart() + for i, sample := range ss.Samples { + if i != 0 { + stream.WriteMore() + } + cortexpb.SampleJsoniterEncode(unsafe.Pointer(&sample), stream) + } + stream.WriteArrayEnd() + + stream.WriteObjectEnd() +} + // UnmarshalJSON implements json.Unmarshaler. func (s *SampleStream) UnmarshalJSON(data []byte) error { var stream struct { @@ -87,18 +122,6 @@ func (s *SampleStream) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements json.Marshaler. -func (s *SampleStream) MarshalJSON() ([]byte, error) { - stream := struct { - Metric model.Metric `json:"metric"` - Values []cortexpb.Sample `json:"values"` - }{ - Metric: cortexpb.FromLabelAdaptersToMetric(s.Labels), - Values: s.Samples, - } - return json.Marshal(stream) -} - func PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { if !iter.ReadArray() { iter.ReportError("tripperware.PrometheusResponseQueryableSamplesStatsPerStep", "expected [") @@ -135,6 +158,7 @@ func PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode(ptr unsafe.Poi func init() { jsoniter.RegisterTypeEncoderFunc("tripperware.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode, func(unsafe.Pointer) bool { return false }) jsoniter.RegisterTypeDecoderFunc("tripperware.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode) + jsoniter.RegisterTypeEncoderFunc("tripperware.SampleStream", encodeSampleStream, func(unsafe.Pointer) bool { return false }) } func EncodeTime(t int64) string { @@ -148,7 +172,7 @@ type Buffer interface { Bytes() []byte } -func BodyBuffer(res *http.Response) ([]byte, error) { +func BodyBuffer(res *http.Response, logger log.Logger) ([]byte, error) { var buf *bytes.Buffer // Attempt to cast the response body to a Buffer and use it if possible. @@ -169,10 +193,10 @@ func BodyBuffer(res *http.Response) ([]byte, error) { // if the response is gziped, lets unzip it here if strings.EqualFold(res.Header.Get("Content-Encoding"), "gzip") { gReader, err := gzip.NewReader(buf) - if err != nil { return nil, err } + defer runutil.CloseWithLogOnErr(logger, gReader, "close gzip reader") return io.ReadAll(gReader) } diff --git a/pkg/querier/tripperware/query_test.go b/pkg/querier/tripperware/query_test.go new file mode 100644 index 00000000000..2880ec43f05 --- /dev/null +++ b/pkg/querier/tripperware/query_test.go @@ -0,0 +1,57 @@ +package tripperware + +import ( + stdjson "encoding/json" + "fmt" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/cortexpb" +) + +func TestMarshalSampleStream(t *testing.T) { + for i, tc := range []struct { + sampleStream SampleStream + }{ + { + sampleStream: SampleStream{ + Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1}}, + }, + }, + { + sampleStream: SampleStream{ + Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{"foo": "bar"})), + Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1}}, + }, + }, + { + sampleStream: SampleStream{ + Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{"foo": "bar", "test": "test", "a": "b"})), + Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1}, {Value: 2, TimestampMs: 2}, {Value: 3, TimestampMs: 3}}, + }, + }, + } { + t.Run(fmt.Sprintf("test-case-%d", i), func(t *testing.T) { + out1, err := json.Marshal(tc.sampleStream) + require.NoError(t, err) + out2, err := tc.sampleStream.MarshalJSON() + require.NoError(t, err) + require.Equal(t, out1, out2) + }) + } +} + +// MarshalJSON implements json.Marshaler. +func (s *SampleStream) MarshalJSON() ([]byte, error) { + stream := struct { + Metric model.Metric `json:"metric"` + Values []cortexpb.Sample `json:"values"` + }{ + Metric: cortexpb.FromLabelAdaptersToMetric(s.Labels), + Values: s.Samples, + } + return stdjson.Marshal(stream) +} diff --git a/pkg/querier/tripperware/queryrange/query_range.go b/pkg/querier/tripperware/queryrange/query_range.go index cb94e8b1196..56654f18be2 100644 --- a/pkg/querier/tripperware/queryrange/query_range.go +++ b/pkg/querier/tripperware/queryrange/query_range.go @@ -264,7 +264,7 @@ func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ t log, ctx := spanlogger.New(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck defer log.Finish() - buf, err := tripperware.BodyBuffer(r) + buf, err := tripperware.BodyBuffer(r, log) if err != nil { log.Error(err) return nil, err From d6122219d8f4ebf41a22acc3cd06da80dd6562ee Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Sat, 31 Dec 2022 13:28:23 +0100 Subject: [PATCH 032/133] Run integration tests in parallel (#5073) Run integration tests in parallel Signed-off-by: Alex Le --- .github/workflows/test-build-deploy.yml | 43 +++++++++++-------- .golangci.yml | 6 +++ Makefile | 2 +- integration/alertmanager_test.go | 4 +- integration/asserts.go | 4 +- integration/backward_compatibility_test.go | 4 +- integration/configs.go | 4 +- ...tegration_memberlist_single_binary_test.go | 4 +- integration/querier_remote_read_test.go | 4 +- integration/querier_sharding_test.go | 4 +- integration/querier_tenant_federation_test.go | 4 +- integration/querier_test.go | 4 +- integration/ruler_test.go | 4 +- integration/util.go | 4 +- 14 files changed, 53 insertions(+), 42 deletions(-) diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 67682ad6092..469f98d4f0e 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -88,6 +88,16 @@ jobs: integration: needs: build runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + tags: + - requires_docker + - integration_alertmanager + - integration_backward_compatibility + - integration_memberlist + - integration_querier + - integration_ruler steps: - name: Upgrade golang uses: actions/setup-go@v2 @@ -116,27 +126,22 @@ jobs: # locally and the download time doesn't account in the test execution time, which is subject # to a timeout run: | - docker pull minio/minio:RELEASE.2019-12-30T05-45-39Z - docker pull amazon/dynamodb-local:1.11.477 + docker pull minio/minio:RELEASE.2021-10-13T00-23-17Z docker pull consul:1.8.4 docker pull gcr.io/etcd-development/etcd:v3.4.7 - docker pull quay.io/cortexproject/cortex:v1.0.0 - docker pull quay.io/cortexproject/cortex:v1.1.0 - docker pull quay.io/cortexproject/cortex:v1.2.0 - docker pull quay.io/cortexproject/cortex:v1.3.0 - docker pull quay.io/cortexproject/cortex:v1.4.0 - docker pull quay.io/cortexproject/cortex:v1.5.0 - docker pull quay.io/cortexproject/cortex:v1.6.0 - docker pull quay.io/cortexproject/cortex:v1.7.0 - docker pull quay.io/cortexproject/cortex:v1.8.0 - docker pull quay.io/cortexproject/cortex:v1.9.0 - docker pull quay.io/cortexproject/cortex:v1.10.0 - docker pull quay.io/cortexproject/cortex:v1.11.1 - docker pull quay.io/cortexproject/cortex:v1.13.1 - docker pull quay.io/cortexproject/cortex:v1.14.0 - docker pull shopify/bigtable-emulator:0.1.0 + if [ "$TEST_TAGS" = "integration_backward_compatibility" ]; then + docker pull quay.io/cortexproject/cortex:v1.6.0 + docker pull quay.io/cortexproject/cortex:v1.7.0 + docker pull quay.io/cortexproject/cortex:v1.8.0 + docker pull quay.io/cortexproject/cortex:v1.9.0 + docker pull quay.io/cortexproject/cortex:v1.10.0 + docker pull quay.io/cortexproject/cortex:v1.11.1 + docker pull quay.io/cortexproject/cortex:v1.13.1 + docker pull quay.io/cortexproject/cortex:v1.14.0 + fi docker pull memcached:1.6.1 - docker pull bouncestorage/swift-aio:55ba4331 + env: + TEST_TAGS: ${{ matrix.tags }} - name: Integration Tests run: | export CORTEX_IMAGE_PREFIX="${IMAGE_PREFIX:-quay.io/cortexproject/}" @@ -144,7 +149,7 @@ jobs: export CORTEX_IMAGE="${CORTEX_IMAGE_PREFIX}cortex:$IMAGE_TAG-amd64" export CORTEX_CHECKOUT_DIR="/go/src/github.com/cortexproject/cortex" echo "Running integration tests with image: $CORTEX_IMAGE" - go test -tags=requires_docker -timeout 2400s -v -count=1 ./integration/... + go test -tags=integration,${{ matrix.tags }} -timeout 2400s -v -count=1 ./integration/... env: IMAGE_PREFIX: ${{ secrets.IMAGE_PREFIX }} diff --git a/.golangci.yml b/.golangci.yml index 6667d2703aa..144f7b4272a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -33,3 +33,9 @@ run: build-tags: - netgo - requires_docker + - integration + - integration_alertmanager + - integration_backward_compatibility + - integration_memberlist + - integration_querier + - integration_ruler diff --git a/Makefile b/Makefile index da88ad9cb9e..593adc901a6 100644 --- a/Makefile +++ b/Makefile @@ -184,7 +184,7 @@ lint: golangci-lint run # Ensure no blocklisted package is imported. - GOFLAGS="-tags=requires_docker" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ + GOFLAGS="-tags=requires_docker,integration,integration_alertmanager,integration_backward_compatibility,integration_memberlist,integration_querier,integration_ruler" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ golang.org/x/net/context=context,\ sync/atomic=go.uber.org/atomic,\ github.com/prometheus/client_golang/prometheus.{MultiError}=github.com/prometheus/prometheus/tsdb/errors.{NewMulti},\ diff --git a/integration/alertmanager_test.go b/integration/alertmanager_test.go index 8555bcc090b..2ceb5e00131 100644 --- a/integration/alertmanager_test.go +++ b/integration/alertmanager_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_alertmanager +// +build integration_alertmanager package integration diff --git a/integration/asserts.go b/integration/asserts.go index bb07c4ee17c..b19de6dfb32 100644 --- a/integration/asserts.go +++ b/integration/asserts.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration +// +build integration package integration diff --git a/integration/backward_compatibility_test.go b/integration/backward_compatibility_test.go index 62eb230e7d8..a1e2ff0343c 100644 --- a/integration/backward_compatibility_test.go +++ b/integration/backward_compatibility_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_backward_compatibility +// +build integration_backward_compatibility package integration diff --git a/integration/configs.go b/integration/configs.go index 8d5a7bc9716..cffea19a5f5 100644 --- a/integration/configs.go +++ b/integration/configs.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration +// +build integration package integration diff --git a/integration/integration_memberlist_single_binary_test.go b/integration/integration_memberlist_single_binary_test.go index 4ca4d5a4e18..ce62772dc70 100644 --- a/integration/integration_memberlist_single_binary_test.go +++ b/integration/integration_memberlist_single_binary_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_memberlist +// +build integration_memberlist package integration diff --git a/integration/querier_remote_read_test.go b/integration/querier_remote_read_test.go index 9de69e00aa2..2e3b0ee20ab 100644 --- a/integration/querier_remote_read_test.go +++ b/integration/querier_remote_read_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_querier +// +build integration_querier package integration diff --git a/integration/querier_sharding_test.go b/integration/querier_sharding_test.go index da37dd90c23..f9d12f7f3bd 100644 --- a/integration/querier_sharding_test.go +++ b/integration/querier_sharding_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_querier +// +build integration_querier package integration diff --git a/integration/querier_tenant_federation_test.go b/integration/querier_tenant_federation_test.go index 032edb48d6d..9bdee25f2df 100644 --- a/integration/querier_tenant_federation_test.go +++ b/integration/querier_tenant_federation_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_querier +// +build integration_querier package integration diff --git a/integration/querier_test.go b/integration/querier_test.go index a8a7c56712e..7f6f0cb5584 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_querier +// +build integration_querier package integration diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 9e420490faf..bb974051ee6 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration_ruler +// +build integration_ruler package integration diff --git a/integration/util.go b/integration/util.go index 88720f18ec2..34355697d30 100644 --- a/integration/util.go +++ b/integration/util.go @@ -1,5 +1,5 @@ -//go:build requires_docker -// +build requires_docker +//go:build integration +// +build integration package integration From 0fda0d1056ed0baf3a4b42e684563475f83a59e1 Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Tue, 3 Jan 2023 10:29:37 +0100 Subject: [PATCH 033/133] Get second port without closing first one to prevent port clashing (#5077) Get second port without closing first one to prevent port clashing Signed-off-by: Alex Le --- pkg/util/tls/test/tls_integration_test.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/pkg/util/tls/test/tls_integration_test.go b/pkg/util/tls/test/tls_integration_test.go index f5ce81105c0..957c018b955 100644 --- a/pkg/util/tls/test/tls_integration_test.go +++ b/pkg/util/tls/test/tls_integration_test.go @@ -51,21 +51,21 @@ func (h *grpcHealthCheck) Watch(_ *grpc_health_v1.HealthCheckRequest, _ grpc_hea return status.Error(codes.Unimplemented, "Watching is not supported") } -func getLocalHostPort() (int, error) { +func getLocalHostPort() (int, func() error, error) { addr, err := net.ResolveTCPAddr("tcp", "localhost:0") if err != nil { - return 0, err + return 0, nil, err } l, err := net.ListenTCP("tcp", addr) if err != nil { - return 0, err + return 0, nil, err } - if err := l.Close(); err != nil { - return 0, err + closePort := func() error { + return l.Close() } - return l.Addr().(*net.TCPAddr).Port, nil + return l.Addr().(*net.TCPAddr).Port, closePort, nil } func newIntegrationClientServer( @@ -80,9 +80,14 @@ func newIntegrationClientServer( prometheus.DefaultRegisterer = savedRegistry }() - grpcPort, err := getLocalHostPort() + grpcPort, closeGrpcPort, err := getLocalHostPort() require.NoError(t, err) - httpPort, err := getLocalHostPort() + httpPort, closeHTTPPort, err := getLocalHostPort() + require.NoError(t, err) + + err = closeGrpcPort() + require.NoError(t, err) + err = closeHTTPPort() require.NoError(t, err) cfg.HTTPListenPort = httpPort From 0908c6f11feba6fcf433bf3deee5ec04af96ae0a Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Tue, 3 Jan 2023 11:56:38 +0100 Subject: [PATCH 034/133] Parallelize distributor tests (#5076) Parallelize distributor tests Signed-off-by: Alex Le --- pkg/distributor/distributor_ring_test.go | 2 + pkg/distributor/distributor_test.go | 77 ++++++++++++++++--- pkg/distributor/ha_tracker_test.go | 53 +++++++++---- .../ingestion_rate_strategy_test.go | 2 + pkg/distributor/query_test.go | 7 ++ 5 files changed, 114 insertions(+), 27 deletions(-) diff --git a/pkg/distributor/distributor_ring_test.go b/pkg/distributor/distributor_ring_test.go index 24811aae903..8bd15e99799 100644 --- a/pkg/distributor/distributor_ring_test.go +++ b/pkg/distributor/distributor_ring_test.go @@ -11,6 +11,7 @@ import ( ) func TestRingConfig_DefaultConfigToLifecyclerConfig(t *testing.T) { + t.Parallel() cfg := RingConfig{} expected := ring.LifecyclerConfig{} @@ -30,6 +31,7 @@ func TestRingConfig_DefaultConfigToLifecyclerConfig(t *testing.T) { } func TestRingConfig_CustomConfigToLifecyclerConfig(t *testing.T) { + t.Parallel() cfg := RingConfig{} expected := ring.LifecyclerConfig{} diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 3ac53ae77da..7f87eb7864b 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -57,6 +57,7 @@ var ( ) func TestConfig_Validate(t *testing.T) { + t.Parallel() tests := map[string]struct { initConfig func(*Config) initLimits func(*validation.Limits) @@ -95,7 +96,9 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { + testData := testData // Needed for t.Parallel to work correctly t.Run(testName, func(t *testing.T) { + t.Parallel() cfg := Config{} limits := validation.Limits{} flagext.DefaultValues(&cfg, &limits) @@ -109,11 +112,12 @@ func TestConfig_Validate(t *testing.T) { } func TestDistributor_Push(t *testing.T) { + t.Parallel() // Metrics to assert on. lastSeenTimestamp := "cortex_distributor_latest_seen_sample_timestamp_seconds" distributorAppend := "cortex_distributor_ingester_appends_total" distributorAppendFailure := "cortex_distributor_ingester_append_failures_total" - ctx := user.InjectOrgID(context.Background(), "user") + ctx := user.InjectOrgID(context.Background(), "userDistributorPush") type samplesIn struct { num int @@ -145,7 +149,7 @@ func TestDistributor_Push(t *testing.T) { expectedMetrics: ` # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge - cortex_distributor_latest_seen_sample_timestamp_seconds{user="user"} 123456789.004 + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004 `, }, "A push to 2 happy ingesters should succeed": { @@ -158,7 +162,7 @@ func TestDistributor_Push(t *testing.T) { expectedMetrics: ` # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge - cortex_distributor_latest_seen_sample_timestamp_seconds{user="user"} 123456789.004 + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004 `, }, "A push to 1 happy ingesters should fail": { @@ -170,7 +174,7 @@ func TestDistributor_Push(t *testing.T) { expectedMetrics: ` # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge - cortex_distributor_latest_seen_sample_timestamp_seconds{user="user"} 123456789.009 + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.009 `, }, "A push to 0 happy ingesters should fail": { @@ -182,7 +186,7 @@ func TestDistributor_Push(t *testing.T) { expectedMetrics: ` # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge - cortex_distributor_latest_seen_sample_timestamp_seconds{user="user"} 123456789.009 + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.009 `, }, "A push exceeding burst size should fail": { @@ -195,7 +199,7 @@ func TestDistributor_Push(t *testing.T) { expectedMetrics: ` # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge - cortex_distributor_latest_seen_sample_timestamp_seconds{user="user"} 123456789.024 + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.024 `, }, "A push to ingesters should report the correct metrics with no metadata": { @@ -256,7 +260,11 @@ func TestDistributor_Push(t *testing.T) { }, } { for _, shardByAllLabels := range []bool{true, false} { + tc := tc + name := name + shardByAllLabels := shardByAllLabels t.Run(fmt.Sprintf("[%s](shardByAllLabels=%v)", name, shardByAllLabels), func(t *testing.T) { + t.Parallel() limits := &validation.Limits{} flagext.DefaultValues(limits) limits.IngestionRate = 20 @@ -291,6 +299,7 @@ func TestDistributor_Push(t *testing.T) { } func TestDistributor_MetricsCleanup(t *testing.T) { + t.Parallel() dists, _, regs, _ := prepare(t, prepConfig{ numDistributors: 1, }) @@ -399,6 +408,7 @@ func TestDistributor_MetricsCleanup(t *testing.T) { } func TestDistributor_PushIngestionRateLimiter(t *testing.T) { + t.Parallel() type testPush struct { samples int metadata int @@ -461,6 +471,7 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() limits := &validation.Limits{} flagext.DefaultValues(limits) limits.IngestionRateStrategy = testData.ingestionRateStrategy @@ -494,6 +505,7 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { } func TestPush_QuorumError(t *testing.T) { + t.Parallel() var limits validation.Limits flagext.DefaultValues(&limits) @@ -604,6 +616,7 @@ func TestPush_QuorumError(t *testing.T) { } func TestDistributor_PushInstanceLimits(t *testing.T) { + t.Parallel() type testPush struct { samples int @@ -713,6 +726,7 @@ func TestDistributor_PushInstanceLimits(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() limits := &validation.Limits{} flagext.DefaultValues(limits) @@ -754,6 +768,7 @@ func TestDistributor_PushInstanceLimits(t *testing.T) { } func TestDistributor_PushHAInstances(t *testing.T) { + t.Parallel() ctx := user.InjectOrgID(context.Background(), "user") for i, tc := range []struct { @@ -803,7 +818,10 @@ func TestDistributor_PushHAInstances(t *testing.T) { }, } { for _, shardByAllLabels := range []bool{true, false} { + tc := tc + shardByAllLabels := shardByAllLabels t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v)", i, shardByAllLabels), func(t *testing.T) { + t.Parallel() var limits validation.Limits flagext.DefaultValues(&limits) limits.AcceptHASamples = true @@ -841,6 +859,7 @@ func TestDistributor_PushHAInstances(t *testing.T) { } func TestDistributor_PushQuery(t *testing.T) { + t.Parallel() const shuffleShardSize = 5 ctx := user.InjectOrgID(context.Background(), "user") @@ -972,7 +991,9 @@ func TestDistributor_PushQuery(t *testing.T) { } for _, tc := range testcases { + tc := tc t.Run(tc.name, func(t *testing.T) { + t.Parallel() ds, ingesters, _, _ := prepare(t, prepConfig{ numIngesters: tc.numIngesters, happyIngesters: tc.happyIngesters, @@ -1016,6 +1037,7 @@ func TestDistributor_PushQuery(t *testing.T) { } func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReached(t *testing.T) { + t.Parallel() const maxChunksLimit = 30 // Chunks are duplicated due to replication factor. ctx := user.InjectOrgID(context.Background(), "user") @@ -1072,6 +1094,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReac } func TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReached(t *testing.T) { + t.Parallel() const maxSeriesLimit = 10 ctx := user.InjectOrgID(context.Background(), "user") @@ -1124,6 +1147,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReac } func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLimitIsReached(t *testing.T) { + t.Parallel() const seriesToAdd = 10 ctx := user.InjectOrgID(context.Background(), "user") @@ -1193,6 +1217,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLimitIs } func TestDistributor_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimitIsReached(t *testing.T) { + t.Parallel() const seriesToAdd = 10 ctx := user.InjectOrgID(context.Background(), "user") @@ -1262,6 +1287,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimitIsR } func TestDistributor_Push_LabelRemoval(t *testing.T) { + t.Parallel() ctx := user.InjectOrgID(context.Background(), "user") type testcase struct { @@ -1350,6 +1376,7 @@ func TestDistributor_Push_LabelRemoval(t *testing.T) { } func TestDistributor_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) { + t.Parallel() ctx := user.InjectOrgID(context.Background(), "user") type testcase struct { inputSeries labels.Labels @@ -1391,6 +1418,7 @@ func TestDistributor_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) } func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t *testing.T) { + t.Parallel() ctx := user.InjectOrgID(context.Background(), "user") tests := map[string]struct { inputSeries labels.Labels @@ -1468,7 +1496,9 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * limits.AcceptHASamples = true for testName, testData := range tests { + testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() ds, ingesters, _, _ := prepare(t, prepConfig{ numIngesters: 2, happyIngesters: 2, @@ -1497,6 +1527,7 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * } func TestDistributor_Push_LabelNameValidation(t *testing.T) { + t.Parallel() inputLabels := labels.Labels{ {Name: model.MetricNameLabel, Value: "foo"}, {Name: "999.illegal", Value: "baz"}, @@ -1528,7 +1559,9 @@ func TestDistributor_Push_LabelNameValidation(t *testing.T) { } for testName, tc := range tests { + tc := tc t.Run(testName, func(t *testing.T) { + t.Parallel() ds, _, _, _ := prepare(t, prepConfig{ numIngesters: 2, happyIngesters: 2, @@ -1550,6 +1583,7 @@ func TestDistributor_Push_LabelNameValidation(t *testing.T) { } func TestDistributor_Push_ExemplarValidation(t *testing.T) { + t.Parallel() ctx := user.InjectOrgID(context.Background(), "user") manyLabels := []string{model.MetricNameLabel, "test"} for i := 1; i < 31; i++ { @@ -1590,7 +1624,9 @@ func TestDistributor_Push_ExemplarValidation(t *testing.T) { } for testName, tc := range tests { + tc := tc t.Run(testName, func(t *testing.T) { + t.Parallel() ds, _, _, _ := prepare(t, prepConfig{ numIngesters: 2, happyIngesters: 2, @@ -1909,12 +1945,16 @@ func BenchmarkDistributor_Push(b *testing.B) { } func TestSlowQueries(t *testing.T) { + t.Parallel() ctx := user.InjectOrgID(context.Background(), "user") nameMatcher := mustEqualMatcher(model.MetricNameLabel, "foo") nIngesters := 3 for _, shardByAllLabels := range []bool{true, false} { for happy := 0; happy <= nIngesters; happy++ { + shardByAllLabels := shardByAllLabels + happy := happy t.Run(fmt.Sprintf("%t/%d", shardByAllLabels, happy), func(t *testing.T) { + t.Parallel() var expectedErr error if nIngesters-happy > 1 { expectedErr = errFail @@ -1939,6 +1979,7 @@ func TestSlowQueries(t *testing.T) { } func TestDistributor_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) { + t.Parallel() // Create distributor ds, ing, _, _ := prepare(t, prepConfig{ numIngesters: 3, @@ -1969,6 +2010,7 @@ func TestDistributor_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) { } func TestDistributor_MetricsForLabelMatchers(t *testing.T) { + t.Parallel() const numIngesters = 5 fixtures := []struct { @@ -2102,7 +2144,9 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { } for testName, testData := range tests { + testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() now := model.Now() // Create distributor @@ -2251,6 +2295,7 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { } func TestDistributor_MetricsMetadata(t *testing.T) { + t.Parallel() const numIngesters = 5 tests := map[string]struct { @@ -2275,7 +2320,9 @@ func TestDistributor_MetricsMetadata(t *testing.T) { } for testName, testData := range tests { + testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() // Create distributor ds, ingesters, _, _ := prepare(t, prepConfig{ numIngesters: numIngesters, @@ -2956,6 +3003,7 @@ outer: } func TestDistributorValidation(t *testing.T) { + t.Parallel() ctx := user.InjectOrgID(context.Background(), "1") now := model.Now() future, past := now.Add(5*time.Hour), now.Add(-25*time.Hour) @@ -3027,7 +3075,9 @@ func TestDistributorValidation(t *testing.T) { err: httpgrpc.Errorf(http.StatusBadRequest, `metadata missing metric name`), }, } { + tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { + t.Parallel() var limits validation.Limits flagext.DefaultValues(&limits) @@ -3051,6 +3101,7 @@ func TestDistributorValidation(t *testing.T) { } func TestRemoveReplicaLabel(t *testing.T) { + t.Parallel() replicaLabel := "replica" clusterLabel := "cluster" cases := []struct { @@ -3096,6 +3147,7 @@ func TestRemoveReplicaLabel(t *testing.T) { // This is not great, but we deal with unsorted labels when validating labels. func TestShardByAllLabelsReturnsWrongResultsForUnsortedLabels(t *testing.T) { + t.Parallel() val1 := shardByAllLabels("test", []cortexpb.LabelAdapter{ {Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}, @@ -3112,6 +3164,7 @@ func TestShardByAllLabelsReturnsWrongResultsForUnsortedLabels(t *testing.T) { } func TestSortLabels(t *testing.T) { + t.Parallel() sorted := []cortexpb.LabelAdapter{ {Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}, @@ -3139,7 +3192,8 @@ func TestSortLabels(t *testing.T) { } func TestDistributor_Push_Relabel(t *testing.T) { - ctx := user.InjectOrgID(context.Background(), "user") + t.Parallel() + ctx := user.InjectOrgID(context.Background(), "userDistributorPushRelabel") type testcase struct { name string @@ -3211,7 +3265,9 @@ func TestDistributor_Push_Relabel(t *testing.T) { } for _, tc := range cases { + tc := tc t.Run(tc.name, func(t *testing.T) { + t.Parallel() var err error var limits validation.Limits flagext.DefaultValues(&limits) @@ -3244,6 +3300,7 @@ func TestDistributor_Push_Relabel(t *testing.T) { } func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing.T) { + t.Parallel() metricRelabelConfigs := []*relabel.Config{ { SourceLabels: []model.LabelName{"__name__"}, @@ -3281,7 +3338,7 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing // Push the series to the distributor req := mockWriteRequest(inputSeries, 1, 1) - ctx := user.InjectOrgID(context.Background(), "user1") + ctx := user.InjectOrgID(context.Background(), "userDistributorPushRelabelDropWillExportMetricOfDroppedSamples") _, err = ds[0].Push(ctx, req) require.NoError(t, err) @@ -3297,10 +3354,10 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing expectedMetrics := ` # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{reason="relabel_configuration",user="user1"} 1 + cortex_discarded_samples_total{reason="relabel_configuration",user="userDistributorPushRelabelDropWillExportMetricOfDroppedSamples"} 1 # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. # TYPE cortex_distributor_received_samples_total counter - cortex_distributor_received_samples_total{user="user1"} 1 + cortex_distributor_received_samples_total{user="userDistributorPushRelabelDropWillExportMetricOfDroppedSamples"} 1 ` require.NoError(t, testutil.GatherAndCompare(regs[0], strings.NewReader(expectedMetrics), metrics...)) diff --git a/pkg/distributor/ha_tracker_test.go b/pkg/distributor/ha_tracker_test.go index 0947afd69a0..e18a4e2d15c 100644 --- a/pkg/distributor/ha_tracker_test.go +++ b/pkg/distributor/ha_tracker_test.go @@ -116,7 +116,9 @@ func TestHATrackerConfig_Validate(t *testing.T) { } for testName, testData := range tests { + testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() assert.Equal(t, testData.expectedErr, testData.cfg.Validate()) }) } @@ -124,6 +126,7 @@ func TestHATrackerConfig_Validate(t *testing.T) { // Test that values are set in the HATracker after WatchPrefix has found it in the KVStore. func TestWatchPrefixAssignment(t *testing.T) { + t.Parallel() cluster := "c1" replica := "r1" @@ -154,6 +157,7 @@ func TestWatchPrefixAssignment(t *testing.T) { } func TestCheckReplicaOverwriteTimeout(t *testing.T) { + t.Parallel() replica1 := "replica1" replica2 := "replica2" @@ -191,8 +195,10 @@ func TestCheckReplicaOverwriteTimeout(t *testing.T) { } func TestCheckReplicaMultiCluster(t *testing.T) { + t.Parallel() replica1 := "replica1" replica2 := "replica2" + user := "userCheckReplicaMultiCluster" reg := prometheus.NewPedanticRegistry() c, err := newHATracker(HATrackerConfig{ @@ -209,21 +215,21 @@ func TestCheckReplicaMultiCluster(t *testing.T) { now := time.Now() // Write the first time. - err = c.checkReplica(context.Background(), "user", "c1", replica1, now) + err = c.checkReplica(context.Background(), user, "c1", replica1, now) assert.NoError(t, err) - err = c.checkReplica(context.Background(), "user", "c2", replica1, now) + err = c.checkReplica(context.Background(), user, "c2", replica1, now) assert.NoError(t, err) // Reject samples from replica 2 in each cluster. - err = c.checkReplica(context.Background(), "user", "c1", replica2, now) + err = c.checkReplica(context.Background(), user, "c1", replica2, now) assert.Error(t, err) - err = c.checkReplica(context.Background(), "user", "c2", replica2, now) + err = c.checkReplica(context.Background(), user, "c2", replica2, now) assert.Error(t, err) // We should still accept from replica 1. - err = c.checkReplica(context.Background(), "user", "c1", replica1, now) + err = c.checkReplica(context.Background(), user, "c1", replica1, now) assert.NoError(t, err) - err = c.checkReplica(context.Background(), "user", "c2", replica1, now) + err = c.checkReplica(context.Background(), user, "c2", replica1, now) assert.NoError(t, err) // We expect no CAS operation failures. @@ -241,8 +247,10 @@ func TestCheckReplicaMultiCluster(t *testing.T) { } func TestCheckReplicaMultiClusterTimeout(t *testing.T) { + t.Parallel() replica1 := "replica1" replica2 := "replica2" + user := "userCheckReplicaMultiClusterTimeout" reg := prometheus.NewPedanticRegistry() c, err := newHATracker(HATrackerConfig{ @@ -259,39 +267,39 @@ func TestCheckReplicaMultiClusterTimeout(t *testing.T) { now := time.Now() // Write the first time. - err = c.checkReplica(context.Background(), "user", "c1", replica1, now) + err = c.checkReplica(context.Background(), user, "c1", replica1, now) assert.NoError(t, err) - err = c.checkReplica(context.Background(), "user", "c2", replica1, now) + err = c.checkReplica(context.Background(), user, "c2", replica1, now) assert.NoError(t, err) // Reject samples from replica 2 in each cluster. - err = c.checkReplica(context.Background(), "user", "c1", replica2, now) + err = c.checkReplica(context.Background(), user, "c1", replica2, now) assert.Error(t, err) - err = c.checkReplica(context.Background(), "user", "c2", replica2, now) + err = c.checkReplica(context.Background(), user, "c2", replica2, now) assert.Error(t, err) // Accept a sample for replica1 in C2. now = now.Add(500 * time.Millisecond) - err = c.checkReplica(context.Background(), "user", "c2", replica1, now) + err = c.checkReplica(context.Background(), user, "c2", replica1, now) assert.NoError(t, err) // Reject samples from replica 2 in each cluster. - err = c.checkReplica(context.Background(), "user", "c1", replica2, now) + err = c.checkReplica(context.Background(), user, "c1", replica2, now) assert.Error(t, err) - err = c.checkReplica(context.Background(), "user", "c2", replica2, now) + err = c.checkReplica(context.Background(), user, "c2", replica2, now) assert.Error(t, err) // Wait more than the failover timeout. now = now.Add(1100 * time.Millisecond) // Accept a sample from c1/replica2. - err = c.checkReplica(context.Background(), "user", "c1", replica2, now) + err = c.checkReplica(context.Background(), user, "c1", replica2, now) assert.NoError(t, err) // We should still accept from c2/replica1 but reject from c1/replica1. - err = c.checkReplica(context.Background(), "user", "c1", replica1, now) + err = c.checkReplica(context.Background(), user, "c1", replica1, now) assert.Error(t, err) - err = c.checkReplica(context.Background(), "user", "c2", replica1, now) + err = c.checkReplica(context.Background(), user, "c2", replica1, now) assert.NoError(t, err) // We expect no CAS operation failures. @@ -310,6 +318,7 @@ func TestCheckReplicaMultiClusterTimeout(t *testing.T) { // Test that writes only happen every update timeout. func TestCheckReplicaUpdateTimeout(t *testing.T) { + t.Parallel() replica := "r1" cluster := "c1" user := "user" @@ -360,6 +369,7 @@ func TestCheckReplicaUpdateTimeout(t *testing.T) { // Test that writes only happen every write timeout. func TestCheckReplicaMultiUser(t *testing.T) { + t.Parallel() replica := "r1" cluster := "c1" @@ -441,7 +451,9 @@ func TestCheckReplicaUpdateTimeoutJitter(t *testing.T) { } for testName, testData := range tests { + testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() // Init HA tracker codec := GetReplicaDescCodec() kvStore, closer := consul.NewInMemoryClient(codec, log.NewNopLogger(), nil) @@ -483,6 +495,7 @@ func TestCheckReplicaUpdateTimeoutJitter(t *testing.T) { } func TestFindHALabels(t *testing.T) { + t.Parallel() replicaLabel, clusterLabel := "replica", "cluster" type expectedOutput struct { cluster string @@ -530,6 +543,7 @@ func TestFindHALabels(t *testing.T) { } func TestHATrackerConfig_ShouldCustomizePrefixDefaultValue(t *testing.T) { + t.Parallel() haConfig := HATrackerConfig{} ringConfig := ring.Config{} flagext.DefaultValues(&haConfig) @@ -540,6 +554,7 @@ func TestHATrackerConfig_ShouldCustomizePrefixDefaultValue(t *testing.T) { } func TestHAClustersLimit(t *testing.T) { + t.Parallel() const userID = "user" codec := GetReplicaDescCodec() @@ -611,6 +626,7 @@ func waitForClustersUpdate(t *testing.T, expected int, tr *haTracker, userID str } func TestTooManyClustersError(t *testing.T) { + t.Parallel() var err error = tooManyClustersError{limit: 10} assert.True(t, errors.Is(err, tooManyClustersError{})) assert.True(t, errors.Is(err, &tooManyClustersError{})) @@ -625,6 +641,7 @@ func TestTooManyClustersError(t *testing.T) { } func TestReplicasNotMatchError(t *testing.T) { + t.Parallel() var err error = replicasNotMatchError{replica: "a", elected: "b"} assert.True(t, errors.Is(err, replicasNotMatchError{})) assert.True(t, errors.Is(err, &replicasNotMatchError{})) @@ -647,6 +664,7 @@ func (l trackerLimits) MaxHAClusters(_ string) int { } func TestHATracker_MetricsCleanup(t *testing.T) { + t.Parallel() reg := prometheus.NewPedanticRegistry() tr, err := newHATracker(HATrackerConfig{EnableHATracker: false}, nil, reg, log.NewNopLogger()) require.NoError(t, err) @@ -705,9 +723,10 @@ func TestHATracker_MetricsCleanup(t *testing.T) { } func TestCheckReplicaCleanup(t *testing.T) { + t.Parallel() replica := "r1" cluster := "c1" - userID := "user" + userID := "userCheckReplicaCleanup" ctx := user.InjectOrgID(context.Background(), userID) reg := prometheus.NewPedanticRegistry() diff --git a/pkg/distributor/ingestion_rate_strategy_test.go b/pkg/distributor/ingestion_rate_strategy_test.go index 8a5e0d577f0..2820c2fb59d 100644 --- a/pkg/distributor/ingestion_rate_strategy_test.go +++ b/pkg/distributor/ingestion_rate_strategy_test.go @@ -13,6 +13,7 @@ import ( ) func TestIngestionRateStrategy(t *testing.T) { + t.Parallel() tests := map[string]struct { limits validation.Limits ring ReadLifecycler @@ -59,6 +60,7 @@ func TestIngestionRateStrategy(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { + t.Parallel() var strategy limiter.RateLimiterStrategy // Init limits overrides diff --git a/pkg/distributor/query_test.go b/pkg/distributor/query_test.go index eced05a2c8d..9e229746792 100644 --- a/pkg/distributor/query_test.go +++ b/pkg/distributor/query_test.go @@ -14,6 +14,7 @@ import ( ) func TestMergeSamplesIntoFirstDuplicates(t *testing.T) { + t.Parallel() a := []cortexpb.Sample{ {Value: 1.084537996, TimestampMs: 1583946732744}, {Value: 1.086111723, TimestampMs: 1583946750366}, @@ -47,6 +48,7 @@ func TestMergeSamplesIntoFirstDuplicates(t *testing.T) { } func TestMergeSamplesIntoFirst(t *testing.T) { + t.Parallel() a := []cortexpb.Sample{ {Value: 1, TimestampMs: 10}, {Value: 2, TimestampMs: 20}, @@ -84,6 +86,7 @@ func TestMergeSamplesIntoFirst(t *testing.T) { } func TestMergeSamplesIntoFirstNilA(t *testing.T) { + t.Parallel() b := []cortexpb.Sample{ {Value: 1, TimestampMs: 5}, {Value: 2, TimestampMs: 15}, @@ -99,6 +102,7 @@ func TestMergeSamplesIntoFirstNilA(t *testing.T) { } func TestMergeSamplesIntoFirstNilB(t *testing.T) { + t.Parallel() a := []cortexpb.Sample{ {Value: 1, TimestampMs: 10}, {Value: 2, TimestampMs: 20}, @@ -113,6 +117,7 @@ func TestMergeSamplesIntoFirstNilB(t *testing.T) { } func TestMergeExemplars(t *testing.T) { + t.Parallel() now := timestamp.FromTime(time.Now()) exemplar1 := cortexpb.Exemplar{Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "trace-1")), TimestampMs: now, Value: 1} exemplar2 := cortexpb.Exemplar{Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "trace-2")), TimestampMs: now + 1, Value: 2} @@ -171,7 +176,9 @@ func TestMergeExemplars(t *testing.T) { {Labels: labels2, Exemplars: []cortexpb.Exemplar{exemplar3, exemplar4}}}, }, } { + c := c t.Run(fmt.Sprint("test", i), func(t *testing.T) { + t.Parallel() rA := &ingester_client.ExemplarQueryResponse{Timeseries: c.seriesA} rB := &ingester_client.ExemplarQueryResponse{Timeseries: c.seriesB} e := mergeExemplarQueryResponses([]interface{}{rA, rB}) From 16b7a8f1bb1c644e5f09dffce2e90887bf80fc98 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 3 Jan 2023 03:55:21 -0800 Subject: [PATCH 035/133] Skip instant query round tripper if sharding is not applicable. (#5062) * Skip instant query round tripper if sharding is not applicable. Signed-off-by: Ben Ye * address review comments Signed-off-by: Ben Ye * update changelog Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/cortex/modules.go | 7 +- .../instantquery/instant_query_middlewares.go | 4 +- .../queryrange/query_range_middlewares.go | 4 +- .../query_range_middlewares_test.go | 7 +- pkg/querier/tripperware/roundtrip.go | 17 +++- pkg/querier/tripperware/roundtrip_test.go | 79 +++++++++++++++---- pkg/querier/tripperware/shard_by.go | 23 +++--- .../tripperware/test_shard_by_query_utils.go | 4 +- 9 files changed, 113 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 13539382189..397fb9d8366 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ * [ENHANCEMENT] Query Frontend/Query Scheduler: Increase upper bound to 60s for queue duration histogram metric. #5029 * [ENHANCEMENT] Query Frontend: Log Vertical sharding information when `query_stats_enabled` is enabled. #5037 * [ENHANCEMENT] Ingester: The metadata APIs should honour `querier.query-ingesters-within` when `querier.query-store-for-labels-enabled` is true. #5027 +* [ENHANCEMENT] Query Frontend: Skip instant query roundtripper if sharding is not applicable. #5062 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index c8560f7e3b7..189ee6604b1 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -16,6 +16,7 @@ import ( "github.com/prometheus/prometheus/rules" prom_storage "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/discovery/dns" + "github.com/thanos-io/thanos/pkg/querysharding" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/server" @@ -437,6 +438,7 @@ func (t *Cortex) initDeleteRequestsStore() (serv services.Service, err error) { // initQueryFrontendTripperware instantiates the tripperware used by the query frontend // to optimize Prometheus query requests. func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err error) { + queryAnalyzer := querysharding.NewQueryAnalyzer() queryRangeMiddlewares, cache, err := queryrange.Middlewares( t.Cfg.QueryRange, util_log.Logger, @@ -444,12 +446,13 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro queryrange.PrometheusResponseExtractor{}, prometheus.DefaultRegisterer, t.TombstonesLoader, + queryAnalyzer, ) if err != nil { return nil, err } - instantQueryMiddlewares, err := instantquery.Middlewares(util_log.Logger, t.Overrides) + instantQueryMiddlewares, err := instantquery.Middlewares(util_log.Logger, t.Overrides, queryAnalyzer) if err != nil { return nil, err } @@ -461,6 +464,8 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro instantQueryMiddlewares, queryrange.PrometheusCodec, instantquery.InstantQueryCodec, + t.Overrides, + queryAnalyzer, ) return services.NewIdleService(nil, func(_ error) error { diff --git a/pkg/querier/tripperware/instantquery/instant_query_middlewares.go b/pkg/querier/tripperware/instantquery/instant_query_middlewares.go index 62cdf68011a..b88515e6be0 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_middlewares.go +++ b/pkg/querier/tripperware/instantquery/instant_query_middlewares.go @@ -2,6 +2,7 @@ package instantquery import ( "github.com/go-kit/log" + "github.com/thanos-io/thanos/pkg/querysharding" "github.com/cortexproject/cortex/pkg/querier/tripperware" ) @@ -9,9 +10,10 @@ import ( func Middlewares( log log.Logger, limits tripperware.Limits, + queryAnalyzer querysharding.Analyzer, ) ([]tripperware.Middleware, error) { var m []tripperware.Middleware - m = append(m, tripperware.ShardByMiddleware(log, limits, InstantQueryCodec)) + m = append(m, tripperware.ShardByMiddleware(log, limits, InstantQueryCodec, queryAnalyzer)) return m, nil } diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares.go b/pkg/querier/tripperware/queryrange/query_range_middlewares.go index e6f32dd2766..24714c54e1b 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/querysharding" "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/querier" @@ -76,6 +77,7 @@ func Middlewares( cacheExtractor Extractor, registerer prometheus.Registerer, cacheGenNumberLoader CacheGenNumberLoader, + queryAnalyzer querysharding.Analyzer, ) ([]tripperware.Middleware, cache.Cache, error) { // Metric used to keep track of each middleware execution duration. metrics := tripperware.NewInstrumentMiddlewareMetrics(registerer) @@ -109,7 +111,7 @@ func Middlewares( queryRangeMiddleware = append(queryRangeMiddleware, tripperware.InstrumentMiddleware("retry", metrics), NewRetryMiddleware(log, cfg.MaxRetries, NewRetryMiddlewareMetrics(registerer))) } - queryRangeMiddleware = append(queryRangeMiddleware, tripperware.InstrumentMiddleware("shardBy", metrics), tripperware.ShardByMiddleware(log, limits, ShardedPrometheusCodec)) + queryRangeMiddleware = append(queryRangeMiddleware, tripperware.InstrumentMiddleware("shardBy", metrics), tripperware.ShardByMiddleware(log, limits, ShardedPrometheusCodec, queryAnalyzer)) return queryRangeMiddleware, c, nil } diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go index ffbb49ca278..cd45dea9df4 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go @@ -2,7 +2,7 @@ package queryrange import ( "context" - io "io" + "io" "net/http" "net/http/httptest" "net/url" @@ -11,6 +11,7 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/querysharding" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/user" @@ -43,12 +44,14 @@ func TestRoundTrip(t *testing.T) { next: http.DefaultTransport, } + qa := querysharding.NewQueryAnalyzer() queyrangemiddlewares, _, err := Middlewares(Config{}, log.NewNopLogger(), mockLimits{}, nil, nil, nil, + qa, ) require.NoError(t, err) @@ -59,6 +62,8 @@ func TestRoundTrip(t *testing.T) { nil, PrometheusCodec, nil, + nil, + qa, ) for i, tc := range []struct { diff --git a/pkg/querier/tripperware/roundtrip.go b/pkg/querier/tripperware/roundtrip.go index a973e3e57d0..39f30a116b9 100644 --- a/pkg/querier/tripperware/roundtrip.go +++ b/pkg/querier/tripperware/roundtrip.go @@ -27,12 +27,14 @@ import ( "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/querysharding" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/validation" ) // HandlerFunc is like http.HandlerFunc, but for Handler. @@ -98,6 +100,8 @@ func NewQueryTripperware( instantRangeMiddleware []Middleware, queryRangeCodec Codec, instantQueryCodec Codec, + limits Limits, + queryAnalyzer querysharding.Analyzer, ) Tripperware { // Per tenant query metrics. queriesPerTenant := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ @@ -139,7 +143,18 @@ func NewQueryTripperware( if isQueryRange { return queryrange.RoundTrip(r) - } else if isQuery && len(instantRangeMiddleware) > 0 { + } else if isQuery { + // If vertical sharding is not enabled for the tenant, use downstream roundtripper. + numShards := validation.SmallestPositiveIntPerTenant(tenantIDs, limits.QueryVerticalShardSize) + if numShards <= 1 { + return next.RoundTrip(r) + } + // If the given query is not shardable, use downstream roundtripper. + query := r.FormValue("query") + analysis, err := queryAnalyzer.Analyze(query) + if err != nil || !analysis.IsShardable() { + return next.RoundTrip(r) + } return instantQuery.RoundTrip(r) } return next.RoundTrip(r) diff --git a/pkg/querier/tripperware/roundtrip_test.go b/pkg/querier/tripperware/roundtrip_test.go index 9d9456a3b13..66ee2d359b6 100644 --- a/pkg/querier/tripperware/roundtrip_test.go +++ b/pkg/querier/tripperware/roundtrip_test.go @@ -11,14 +11,19 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/querysharding" "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/validation" ) const ( - queryRange = "/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&stats=all&step=120" - query = "/api/v1/query?time=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680" - queryExemplar = "/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'" - responseBody = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}` + queryRange = "/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&stats=all&step=120" + query = "/api/v1/query?time=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680" + queryNonShardable = "/api/v1/query?time=1536716898&query=container_memory_rss&start=1536673680" + queryExemplar = "/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'" + responseBody = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}` ) type mockRequest struct { @@ -86,22 +91,54 @@ func TestRoundTrip(t *testing.T) { return mockMiddleware{} }), } - tw := NewQueryTripperware(log.NewNopLogger(), - nil, - nil, - middlewares, - middlewares, - mockCodec{}, - mockCodec{}, - ) + limits := validation.Limits{} + flagext.DefaultValues(&limits) + defaultOverrides, err := validation.NewOverrides(limits, nil) + require.NoError(t, err) + + limitsWithVerticalSharding := validation.Limits{QueryVerticalShardSize: 3} + shardingOverrides, err := validation.NewOverrides(limitsWithVerticalSharding, nil) + require.NoError(t, err) for _, tc := range []struct { path, expectedBody string + limits Limits }{ - {"/foo", "bar"}, - {queryExemplar, "bar"}, - {queryRange, responseBody}, - {query, responseBody}, + { + path: "/foo", + expectedBody: "bar", + limits: defaultOverrides, + }, + { + path: queryExemplar, + expectedBody: "bar", + limits: defaultOverrides, + }, + { + path: queryRange, + expectedBody: responseBody, + limits: defaultOverrides, + }, + { + path: query, + expectedBody: "bar", + limits: defaultOverrides, + }, + { + path: queryNonShardable, + expectedBody: "bar", + limits: defaultOverrides, + }, + { + path: queryNonShardable, + expectedBody: "bar", + limits: shardingOverrides, + }, + { + path: query, + expectedBody: responseBody, + limits: shardingOverrides, + }, } { t.Run(tc.path, func(t *testing.T) { req, err := http.NewRequest("GET", tc.path, http.NoBody) @@ -115,6 +152,16 @@ func TestRoundTrip(t *testing.T) { err = user.InjectOrgIDIntoHTTPRequest(ctx, req) require.NoError(t, err) + tw := NewQueryTripperware(log.NewNopLogger(), + nil, + nil, + middlewares, + middlewares, + mockCodec{}, + mockCodec{}, + tc.limits, + querysharding.NewQueryAnalyzer(), + ) resp, err := tw(downstream).RoundTrip(req) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) diff --git a/pkg/querier/tripperware/shard_by.go b/pkg/querier/tripperware/shard_by.go index 13d6645612e..c596f40b637 100644 --- a/pkg/querier/tripperware/shard_by.go +++ b/pkg/querier/tripperware/shard_by.go @@ -17,23 +17,24 @@ import ( "github.com/cortexproject/cortex/pkg/util/validation" ) -func ShardByMiddleware(logger log.Logger, limits Limits, merger Merger) Middleware { +func ShardByMiddleware(logger log.Logger, limits Limits, merger Merger, queryAnalyzer querysharding.Analyzer) Middleware { return MiddlewareFunc(func(next Handler) Handler { return shardBy{ - next: next, - limits: limits, - merger: merger, - logger: logger, + next: next, + limits: limits, + merger: merger, + logger: logger, + analyzer: queryAnalyzer, } }) } type shardBy struct { - next Handler - limits Limits - logger log.Logger - merger Merger - queryAnalyzer *querysharding.QueryAnalyzer + next Handler + limits Limits + logger log.Logger + merger Merger + analyzer querysharding.Analyzer } func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { @@ -51,7 +52,7 @@ func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { } logger := util_log.WithContext(ctx, s.logger) - analysis, err := s.queryAnalyzer.Analyze(r.GetQuery()) + analysis, err := s.analyzer.Analyze(r.GetQuery()) if err != nil { level.Warn(logger).Log("msg", "error analyzing query", "q", r.GetQuery(), "err", err) } diff --git a/pkg/querier/tripperware/test_shard_by_query_utils.go b/pkg/querier/tripperware/test_shard_by_query_utils.go index f673fc8e4e6..bb2d32619dd 100644 --- a/pkg/querier/tripperware/test_shard_by_query_utils.go +++ b/pkg/querier/tripperware/test_shard_by_query_utils.go @@ -16,6 +16,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql/parser" "github.com/stretchr/testify/require" + thanosquerysharding "github.com/thanos-io/thanos/pkg/querysharding" "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/weaveworks/common/user" @@ -406,7 +407,8 @@ http_requests_total`, next: http.DefaultTransport, } - roundtripper := NewRoundTripper(downstream, tt.codec, nil, ShardByMiddleware(log.NewNopLogger(), mockLimits{shardSize: tt.shardSize}, tt.codec)) + qa := thanosquerysharding.NewQueryAnalyzer() + roundtripper := NewRoundTripper(downstream, tt.codec, nil, ShardByMiddleware(log.NewNopLogger(), mockLimits{shardSize: tt.shardSize}, tt.codec, qa)) ctx := user.InjectOrgID(context.Background(), "1") From 7d51ad9df6a468586ea4d3369c41e73e739bc286 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 3 Jan 2023 08:47:02 -0800 Subject: [PATCH 036/133] stop using FromMetricsToLabelAdapters (#5078) Signed-off-by: Alex Le --- pkg/distributor/distributor.go | 6 +++--- pkg/querier/tripperware/instantquery/instant_query.go | 5 +++-- pkg/querier/tripperware/query.go | 5 +++-- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index b1524a6c2b0..1b02f84a384 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -1036,11 +1036,11 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through if err := queryLimiter.AddDataBytes(resp.Size()); err != nil { return nil, err } - ms := ingester_client.FromMetricsForLabelMatchersResponse(resp) - for _, m := range ms { - if err := queryLimiter.AddSeries(cortexpb.FromMetricsToLabelAdapters(m)); err != nil { + for _, m := range resp.Metric { + if err := queryLimiter.AddSeries(m.Labels); err != nil { return nil, err } + m := cortexpb.FromLabelAdaptersToMetric(m.Labels) fingerprint := m.Fingerprint() mutex.Lock() (*metrics)[fingerprint] = m diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index 36aa222bddd..645f9f70e42 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -17,6 +17,7 @@ import ( otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc/status" @@ -422,13 +423,13 @@ func decorateWithParamName(err error, field string) error { // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(data []byte) error { var sample struct { - Metric model.Metric `json:"metric"` + Metric labels.Labels `json:"metric"` Value cortexpb.Sample `json:"value"` } if err := json.Unmarshal(data, &sample); err != nil { return err } - s.Labels = cortexpb.FromMetricsToLabelAdapters(sample.Metric) + s.Labels = cortexpb.FromLabelsToLabelAdapters(sample.Metric) s.Sample = sample.Value return nil } diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 90571d089d2..6c6d0dd3bbd 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -17,6 +17,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/cortexpb" @@ -111,13 +112,13 @@ func encodeSampleStream(ptr unsafe.Pointer, stream *jsoniter.Stream) { // UnmarshalJSON implements json.Unmarshaler. func (s *SampleStream) UnmarshalJSON(data []byte) error { var stream struct { - Metric model.Metric `json:"metric"` + Metric labels.Labels `json:"metric"` Values []cortexpb.Sample `json:"values"` } if err := json.Unmarshal(data, &stream); err != nil { return err } - s.Labels = cortexpb.FromMetricsToLabelAdapters(stream.Metric) + s.Labels = cortexpb.FromLabelsToLabelAdapters(stream.Metric) s.Samples = stream.Values return nil } From 350f5a46dcb7c17dbcb3b38c80822b4b0dd85282 Mon Sep 17 00:00:00 2001 From: Alex Le Date: Tue, 3 Jan 2023 12:15:29 -0800 Subject: [PATCH 037/133] Fixed compile error Signed-off-by: Alex Le --- pkg/compactor/compactor_test.go | 2 +- pkg/compactor/shuffle_sharding_grouper_test.go | 10 ++++++++++ .../github.com/thanos-io/thanos/pkg/compact/compact.go | 2 +- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index a460938c783..f08e19a9283 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1602,7 +1602,7 @@ func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Bucket, li var blocksGrouperFactory BlocksGrouperFactory if tsdbGrouper != nil { - blocksGrouperFactory = func(_ context.Context, _ Config, _ objstore.InstrumentedBucket, _ log.Logger, _ prometheus.Registerer, _, _, _ prometheus.Counter, _ prometheus.Gauge, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string) compact.Grouper { + blocksGrouperFactory = func(_ context.Context, _ Config, _ objstore.InstrumentedBucket, _ log.Logger, _ prometheus.Registerer, _, _, _ prometheus.Counter, _ prometheus.Gauge, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ prometheus.Counter, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string, _ *compact.GatherNoCompactionMarkFilter) compact.Grouper { return tsdbGrouper } } else { diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go index 666beb44e1c..6c080ae6f59 100644 --- a/pkg/compactor/shuffle_sharding_grouper_test.go +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -1117,6 +1117,10 @@ func TestGroupPartitioning(t *testing.T) { bkt := &bucket.ClientMock{} + noCompactFilter := func() map[ulid.ULID]*metadata.NoCompactMark { + return make(map[ulid.ULID]*metadata.NoCompactMark) + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() g := NewShuffleShardingGrouper( @@ -1145,6 +1149,7 @@ func TestGroupPartitioning(t *testing.T) { nil, nil, nil, + noCompactFilter, ) var testBlocks []*metadata.Meta for block, partitionID := range testData.blocks { @@ -1288,6 +1293,10 @@ func TestPartitionStrategyChange_shouldUseOriginalPartitionedGroup(t *testing.T) ring := &RingMock{} + noCompactFilter := func() map[ulid.ULID]*metadata.NoCompactMark { + return make(map[ulid.ULID]*metadata.NoCompactMark) + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() return NewShuffleShardingGrouper( @@ -1316,6 +1325,7 @@ func TestPartitionStrategyChange_shouldUseOriginalPartitionedGroup(t *testing.T) nil, nil, nil, + noCompactFilter, ) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 26a447834cf..0ea5eb9c935 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -797,7 +797,7 @@ type Compactor interface { // * The source dirs are marked Deletable. // * Returns empty ulid.ULID{}. Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) - CompactWithAdditionalPostings(dest string, dirs []string, open []*tsdb.Block, additionalPostingsProvider tsdb.AdditionalPostingsProvider) (ulid.ULID, error) + CompactWithAdditionalPostings(dest string, dirs []string, open []*tsdb.Block, additionalPostingsProvider tsdb.AdditionalPostingsFunc) (ulid.ULID, error) } // Compact plans and runs a single compaction against the group. The compacted result From a6791f063074f9c9d5b7bdac56ab2ba24467b9e5 Mon Sep 17 00:00:00 2001 From: Alex Le Date: Tue, 31 Jan 2023 11:35:46 -0800 Subject: [PATCH 038/133] Revert prometheus parallelism change Signed-off-by: Alex Le --- .../prometheus/prometheus/storage/merge.go | 21 ++------ .../prometheus/prometheus/tsdb/compact.go | 26 ++-------- .../prometheus/tsdb/index/postings.go | 51 ------------------- 3 files changed, 9 insertions(+), 89 deletions(-) diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 059687b6c1e..2f175d3e7ec 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -331,23 +331,10 @@ func (c *genericMergeSeriesSet) Next() bool { for { // Firstly advance all the current series sets. If any of them have run out, // we can drop them, otherwise they should be inserted back into the heap. - ch := make(chan genericSeriesSet, len(c.currentSets)) - wg := sync.WaitGroup{} - for i := range c.currentSets { - wg.Add(1) - i := i - go func() { - defer wg.Done() - if c.currentSets[i].Next() { - ch <- c.currentSets[i] - } - }() - } - - wg.Wait() - close(ch) - for set := range ch { - heap.Push(&c.heap, set) + for _, set := range c.currentSets { + if set.Next() { + heap.Push(&c.heap, set) + } } if len(c.heap) == 0 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index a93bd83105f..d4642b77d02 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -17,12 +17,10 @@ import ( "context" "crypto/rand" "fmt" - "golang.org/x/sync/errgroup" "io" "os" "path/filepath" "sort" - "sync" "time" "github.com/go-kit/log" @@ -675,7 +673,6 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, var ( sets []storage.ChunkSeriesSet - setsMtx sync.Mutex symbols index.StringIter closers []io.Closer overlapping bool @@ -691,8 +688,6 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, c.metrics.populatingBlocks.Set(1) globalMaxt := blocks[0].Meta().MaxTime - g, _:= errgroup.WithContext(c.ctx) - g.SetLimit(8) for i, b := range blocks { select { case <-c.ctx.Done(): @@ -735,19 +730,11 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, return err } all = indexr.SortedPostings(all) - g.Go(func() error { - if additionalPostingsFunc != nil { - all = additionalPostingsFunc.GetPostings(all, indexr) - } - //shardStart := time.Now() - //shardedPosting := index.NewShardedPosting(all, uint64(partitionCount), uint64(partitionId), indexr.Series) - //fmt.Printf("finished sharding, duration: %v\n", time.Since(shardStart)) - // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - setsMtx.Lock() - sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) - setsMtx.Unlock() - return nil - }) + if additionalPostingsFunc != nil { + all = additionalPostingsFunc.GetPostings(all, indexr) + } + // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. + sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) syms := indexr.Symbols() if i == 0 { symbols = syms @@ -755,9 +742,6 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } symbols = NewMergedStringIter(symbols, syms) } - if err := g.Wait(); err != nil { - return err - } for symbols.Next() { if err := indexw.AddSymbol(symbols.At()); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index e7d2115e9b9..8df2bccf67e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -16,7 +16,6 @@ package index import ( "container/heap" "encoding/binary" - "fmt" "runtime" "sort" "sync" @@ -25,7 +24,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunks" ) var allPostingsKey = labels.Label{} @@ -422,55 +420,6 @@ func (e errPostings) Seek(storage.SeriesRef) bool { return false } func (e errPostings) At() storage.SeriesRef { return 0 } func (e errPostings) Err() error { return e.err } -type shardedPostings struct { - //postings Postings - //labelsFn func(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error - series []storage.SeriesRef - cur storage.SeriesRef - initialized bool - - //bufChks []chunks.Meta - //bufLbls labels.Labels -} - -func NewShardedPosting(postings Postings, partitionCount uint64, partitionId uint64, labelsFn func(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error) *shardedPostings { - bufChks := make([]chunks.Meta, 0) - bufLbls := make(labels.Labels, 0) - series := make([]storage.SeriesRef, 0) - for postings.Next() { - err := labelsFn(postings.At(), &bufLbls, &bufChks) - if err != nil { - fmt.Printf("err: %v", err) - } - if bufLbls.Hash() % partitionCount == partitionId { - posting := postings.At() - series = append(series, posting) - } - } - return &shardedPostings{series: series, initialized: false} -} - -func (p *shardedPostings) Next() bool { - if len(p.series) > 0 { - p.cur, p.series = p.series[0], p.series[1:] - return true - } - return false -} - -func (p *shardedPostings) At() storage.SeriesRef { - return p.cur -} - -func (p *shardedPostings) Seek(v storage.SeriesRef) bool { - fmt.Println("ran seek") - return false -} - -func (p *shardedPostings) Err() error { - return nil -} - var emptyPostings = errPostings{} // EmptyPostings returns a postings list that's always empty. From 7000ee9eae438e17d44aae32dca4e64b0e75a642 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 10 Jan 2023 18:15:31 -0800 Subject: [PATCH 039/133] chore: Updating Prometheus to v0.40.7 and Thanos to 3327c510076a (#5048) * Updating Prometheus Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio * Breaking changes Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio * fix gogo proto Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio * Updating AM Signed-off-by: Alan Protasio * rebase Signed-off-by: Alan Protasio * Fix AM breaking change Signed-off-by: Alan Protasio * Make lint happy Signed-off-by: Alan Protasio * Update Doc Signed-off-by: Alan Protasio * Fix AM VictorOps test Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- docs/configuration/config-file-reference.md | 10 + go.mod | 82 +- go.sum | 204 +- pkg/alertmanager/alertmanager.go | 2 +- pkg/alertmanager/alertmanager_http_test.go | 2 +- pkg/alertmanager/api_test.go | 1 - pkg/alertmanager/multitenant.go | 2 +- pkg/chunk/encoding/prometheus_chunk.go | 6 +- pkg/compactor/compactor_test.go | 4 +- pkg/cortexpb/compat_test.go | 2 +- pkg/cortexpb/cortex.pb.go | 89 +- pkg/cortexpb/cortex.proto | 2 +- .../distributorpb/distributor.pb.go | 26 +- .../distributorpb/distributor.proto | 2 +- pkg/distributor/ha_tracker.pb.go | 29 +- pkg/distributor/ha_tracker.proto | 2 +- pkg/frontend/v1/frontendv1pb/frontend.pb.go | 67 +- pkg/frontend/v1/frontendv1pb/frontend.proto | 2 +- pkg/frontend/v2/frontendv2pb/frontend.pb.go | 48 +- pkg/frontend/v2/frontendv2pb/frontend.proto | 2 +- pkg/ingester/client/ingester.pb.go | 164 +- pkg/ingester/client/ingester.proto | 2 +- pkg/ingester/ingester.go | 2 +- pkg/querier/batch/batch.go | 11 +- pkg/querier/batch/batch_test.go | 7 +- pkg/querier/batch/chunk_test.go | 8 +- pkg/querier/block.go | 11 +- pkg/querier/block_test.go | 6 +- pkg/querier/blocks_store_queryable_test.go | 2 +- pkg/querier/distributor_queryable_test.go | 5 +- pkg/querier/iterators/chunk_merge_iterator.go | 8 +- .../iterators/chunk_merge_iterator_test.go | 11 +- .../iterators/compatibe_chunk_iterator.go | 77 + pkg/querier/remote_read.go | 3 +- pkg/querier/series/series_set.go | 23 +- pkg/querier/series/series_set_test.go | 5 +- pkg/querier/stats/stats.pb.go | 51 +- pkg/querier/stats/stats.proto | 2 +- pkg/querier/timeseries_series_set.go | 9 +- pkg/querier/timeseries_series_set_test.go | 19 +- .../instantquery/instantquery.pb.go | 85 +- .../instantquery/instantquery.proto | 2 +- pkg/querier/tripperware/query.pb.go | 65 +- pkg/querier/tripperware/query.proto | 2 +- .../tripperware/queryrange/queryrange.pb.go | 94 +- .../tripperware/queryrange/queryrange.proto | 2 +- .../tripperware/queryrange/series_test.go | 3 +- pkg/ring/kv/memberlist/kv.pb.go | 27 +- pkg/ring/kv/memberlist/kv.proto | 2 +- pkg/ring/ring.pb.go | 55 +- pkg/ring/ring.proto | 2 +- pkg/ruler/compat.go | 7 +- pkg/ruler/ruler.pb.go | 88 +- pkg/ruler/ruler.proto | 2 +- pkg/ruler/rulespb/rules.pb.go | 62 +- pkg/ruler/rulespb/rules.proto | 2 +- pkg/scheduler/schedulerpb/scheduler.pb.go | 84 +- pkg/scheduler/schedulerpb/scheduler.proto | 2 +- pkg/storage/tsdb/config.go | 4 +- pkg/storegateway/gateway_test.go | 2 +- vendor/cloud.google.com/go/.gitignore | 12 - .../.release-please-manifest-submodules.json | 111 - .../go/.release-please-manifest.json | 3 - vendor/cloud.google.com/go/CHANGES.md | 2424 -- vendor/cloud.google.com/go/CODE_OF_CONDUCT.md | 44 - vendor/cloud.google.com/go/CONTRIBUTING.md | 327 - vendor/cloud.google.com/go/README.md | 139 - vendor/cloud.google.com/go/RELEASING.md | 141 - vendor/cloud.google.com/go/SECURITY.md | 7 - .../go/compute/internal/version.go | 18 + .../go/compute/metadata/CHANGES.md | 5 + .../go/compute/metadata/LICENSE | 202 + .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 1 + .../metadata/tidyfix.go} | 6 +- vendor/cloud.google.com/go/doc.go | 228 - vendor/cloud.google.com/go/iam/CHANGES.md | 21 + .../go/internal/.repo-metadata-full.json | 119 +- ...elease-please-config-yoshi-submodules.json | 334 - .../go/release-please-config.json | 10 - vendor/cloud.google.com/go/testing.md | 236 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1787 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../internal/shareddefaults/shared_config.go | 18 +- .../shared_config_resolve_home.go | 18 + .../shared_config_resolve_home_go1.12.go | 13 + .../private/protocol/json/jsonutil/build.go | 19 +- .../protocol/json/jsonutil/unmarshal.go | 13 + .../protocol/query/queryutil/queryutil.go | 34 +- .../aws-sdk-go/private/protocol/rest/build.go | 18 +- .../private/protocol/rest/unmarshal.go | 18 +- .../private/protocol/xml/xmlutil/build.go | 32 +- .../private/protocol/xml/xmlutil/unmarshal.go | 18 +- .../aws/aws-sdk-go/service/sns/api.go | 72 +- .../aws/aws-sdk-go/service/sts/api.go | 213 +- vendor/github.com/benbjohnson/clock/LICENSE | 21 + vendor/github.com/benbjohnson/clock/README.md | 105 + vendor/github.com/benbjohnson/clock/clock.go | 378 + .../github.com/benbjohnson/clock/context.go | 86 + vendor/github.com/cespare/xxhash/v2/README.md | 31 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +- .../cespare/xxhash/v2/xxhash_amd64.s | 336 +- .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- .../go-systemd/v22/activation/files_unix.go | 70 + .../v22/activation/files_windows.go | 21 + .../go-systemd/v22/activation/listeners.go | 103 + .../go-systemd/v22/activation/packetconns.go | 38 + .../go-systemd/v22/journal/journal_unix.go | 23 +- vendor/github.com/docker/go-units/size.go | 70 +- .../github.com/fsnotify/fsnotify/.gitignore | 10 +- vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 - .../github.com/fsnotify/fsnotify/CHANGELOG.md | 113 + .../fsnotify/fsnotify/CONTRIBUTING.md | 72 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 47 +- vendor/github.com/fsnotify/fsnotify/README.md | 205 +- .../fsnotify/fsnotify/backend_fen.go | 162 + .../fsnotify/fsnotify/backend_inotify.go | 459 + .../fsnotify/fsnotify/backend_kqueue.go | 707 + .../fsnotify/fsnotify/backend_other.go | 66 + .../fsnotify/fsnotify/backend_windows.go | 746 + vendor/github.com/fsnotify/fsnotify/fen.go | 38 - .../github.com/fsnotify/fsnotify/fsnotify.go | 80 +- .../fsnotify/fsnotify/fsnotify_unsupported.go | 36 - .../github.com/fsnotify/fsnotify/inotify.go | 351 - .../fsnotify/fsnotify/inotify_poller.go | 187 - vendor/github.com/fsnotify/fsnotify/kqueue.go | 535 - vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 + .../{open_mode_bsd.go => system_bsd.go} | 4 - .../{open_mode_darwin.go => system_darwin.go} | 4 - .../github.com/fsnotify/fsnotify/windows.go | 586 - .../go-openapi/analysis/.golangci.yml | 3 + .../go-openapi/errors/.golangci.yml | 2 + vendor/github.com/go-openapi/errors/api.go | 3 +- vendor/github.com/go-openapi/errors/doc.go | 2 - .../go-openapi/errors/middleware.go | 1 - .../go-openapi/runtime/client_request.go | 3 +- .../go-openapi/runtime/client_response.go | 18 +- .../go-openapi/runtime/constants.go | 2 + .../go-openapi/runtime/middleware/context.go | 15 +- .../runtime/middleware/parameter.go | 6 +- .../runtime/middleware/swaggerui.go | 10 +- .../runtime/middleware/swaggerui_oauth2.go | 122 + .../runtime/security/authenticator.go | 4 +- .../go-openapi/validate/default_validator.go | 2 +- .../go-openapi/validate/example_validator.go | 2 +- .../github.com/go-openapi/validate/helpers.go | 4 +- vendor/github.com/go-openapi/validate/spec.go | 11 +- vendor/github.com/gofrs/uuid/README.md | 10 +- vendor/github.com/gofrs/uuid/generator.go | 267 +- vendor/github.com/gofrs/uuid/uuid.go | 10 +- .../github.com/google/pprof/profile/encode.go | 76 +- .../github.com/google/pprof/profile/filter.go | 4 + .../github.com/google/pprof/profile/merge.go | 158 +- .../github.com/google/pprof/profile/proto.go | 19 +- .../github.com/google/pprof/profile/prune.go | 26 +- .../client/client.go | 27 +- .../client/util/util.go | 11 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 7 + .../googleapis/gax-go/v2/content_type.go | 112 + .../googleapis/gax-go/v2/internal/version.go | 2 +- .../hashicorp/golang-lru/.golangci.yml | 30 + vendor/github.com/hashicorp/golang-lru/2q.go | 3 +- .../github.com/hashicorp/golang-lru/LICENSE | 2 + .../github.com/hashicorp/golang-lru/README.md | 2 +- vendor/github.com/hashicorp/golang-lru/arc.go | 1 - vendor/github.com/hashicorp/golang-lru/lru.go | 119 +- .../hashicorp/golang-lru/simplelru/lru.go | 6 +- .../golang-lru/simplelru/lru_interface.go | 5 +- .../hashicorp/golang-lru/testing.go | 16 + .../prometheus/alertmanager/api/api.go | 11 +- .../prometheus/alertmanager/api/v1/api.go | 8 +- .../prometheus/alertmanager/api/v2/api.go | 67 +- .../prometheus/alertmanager/api/v2/compat.go | 3 +- .../alertmanager/api/v2/models/alert.go | 41 +- .../alertmanager/api/v2/models/alert_group.go | 93 +- .../api/v2/models/alert_groups.go | 28 + .../api/v2/models/alert_status.go | 6 + .../api/v2/models/alertmanager_config.go | 7 + .../api/v2/models/alertmanager_status.go | 78 + .../api/v2/models/cluster_status.go | 38 +- .../api/v2/models/gettable_alert.go | 98 +- .../api/v2/models/gettable_alerts.go | 28 + .../api/v2/models/gettable_silence.go | 39 + .../api/v2/models/gettable_silences.go | 28 + .../alertmanager/api/v2/models/label_set.go | 7 + .../alertmanager/api/v2/models/matcher.go | 7 + .../alertmanager/api/v2/models/matchers.go | 28 + .../alertmanager/api/v2/models/peer_status.go | 7 + .../api/v2/models/postable_alert.go | 47 +- .../api/v2/models/postable_alerts.go | 28 + .../api/v2/models/postable_silence.go | 17 + .../alertmanager/api/v2/models/receiver.go | 7 + .../alertmanager/api/v2/models/silence.go | 32 + .../api/v2/models/silence_status.go | 6 + .../api/v2/models/version_info.go | 7 + .../api/v2/restapi/configure_alertmanager.go | 12 +- .../alertmanager/api/v2/restapi/doc.go | 22 +- .../v2/restapi/operations/alert/get_alerts.go | 8 +- .../operations/alert/get_alerts_parameters.go | 9 +- .../operations/alert/get_alerts_responses.go | 9 +- .../restapi/operations/alert/post_alerts.go | 8 +- .../alert/post_alerts_parameters.go | 9 +- .../operations/alert/post_alerts_responses.go | 9 +- .../operations/alertgroup/get_alert_groups.go | 8 +- .../alertgroup/get_alert_groups_parameters.go | 8 +- .../alertgroup/get_alert_groups_responses.go | 9 +- .../v2/restapi/operations/alertmanager_api.go | 18 + .../restapi/operations/general/get_status.go | 8 +- .../general/get_status_parameters.go | 3 +- .../general/get_status_responses.go | 3 +- .../operations/receiver/get_receivers.go | 8 +- .../receiver/get_receivers_parameters.go | 3 +- .../receiver/get_receivers_responses.go | 3 +- .../operations/silence/delete_silence.go | 8 +- .../silence/delete_silence_parameters.go | 4 +- .../silence/delete_silence_responses.go | 6 +- .../restapi/operations/silence/get_silence.go | 8 +- .../silence/get_silence_parameters.go | 4 +- .../silence/get_silence_responses.go | 9 +- .../operations/silence/get_silences.go | 8 +- .../silence/get_silences_parameters.go | 6 +- .../silence/get_silences_responses.go | 6 +- .../operations/silence/post_silences.go | 14 +- .../silence/post_silences_parameters.go | 9 +- .../silence/post_silences_responses.go | 9 +- .../alertmanager/api/v2/restapi/server.go | 6 +- .../prometheus/alertmanager/api/v2/testing.go | 70 + .../alertmanager/asset/assets_vfsdata.go | 16 +- .../alertmanager/cluster/channel.go | 3 +- .../alertmanager/cluster/cluster.go | 7 +- .../alertmanager/cluster/connection_pool.go | 2 +- .../alertmanager/cluster/delegate.go | 3 +- .../alertmanager/cluster/tls_config.go | 6 +- .../alertmanager/cluster/tls_connection.go | 1 - .../alertmanager/cluster/tls_transport.go | 2 +- .../prometheus/alertmanager/config/config.go | 94 +- .../alertmanager/config/coordinator.go | 2 +- .../alertmanager/config/notifiers.go | 167 +- .../alertmanager/dispatch/dispatch.go | 2 - .../prometheus/alertmanager/dispatch/route.go | 3 - .../prometheus/alertmanager/nflog/nflog.go | 12 +- .../alertmanager/notify/email/email.go | 23 +- .../prometheus/alertmanager/notify/notify.go | 14 +- .../alertmanager/notify/opsgenie/opsgenie.go | 27 +- .../notify/pagerduty/pagerduty.go | 43 +- .../alertmanager/notify/pushover/pushover.go | 23 +- .../alertmanager/notify/slack/slack.go | 28 +- .../prometheus/alertmanager/notify/sns/sns.go | 8 +- .../prometheus/alertmanager/notify/util.go | 53 +- .../notify/victorops/victorops.go | 27 +- .../alertmanager/notify/webhook/webhook.go | 20 +- .../alertmanager/notify/wechat/wechat.go | 4 +- .../alertmanager/pkg/labels/parse.go | 13 +- .../alertmanager/provider/mem/mem.go | 51 +- .../alertmanager/provider/provider.go | 6 +- .../alertmanager/silence/silence.go | 43 +- .../prometheus/alertmanager/store/store.go | 9 +- .../alertmanager/template/default.tmpl | 25 +- .../alertmanager/template/email.html | 6 +- .../alertmanager/template/email.tmpl | 8 +- .../alertmanager/template/template.go | 8 +- .../alertmanager/timeinterval/timeinterval.go | 70 +- .../prometheus/alertmanager/types/types.go | 31 +- .../prometheus/alertmanager/ui/web.go | 6 + .../prometheus/common/config/http_config.go | 150 +- .../prometheus/common/expfmt/text_create.go | 3 +- .../prometheus/common/version/info.go | 6 +- .../prometheus/common/version/info_default.go | 21 + .../prometheus/common/version/info_go118.go | 58 + .../exporter-toolkit/web/tls_config.go | 148 +- .../exporter-toolkit/web/web-config.yml | 1 - .../prometheus/prometheus/config/config.go | 13 +- .../prometheus/discovery/refresh/refresh.go | 5 +- .../model/histogram/float_histogram.go | 871 + .../prometheus/model/histogram/generic.go | 536 + .../prometheus/model/histogram/histogram.go | 448 + .../prometheus/model/textparse/interface.go | 35 +- .../model/textparse/openmetricsparse.go | 7 + .../prometheus/model/textparse/promparse.go | 7 + .../model/textparse/protobufparse.go | 518 + .../prometheus/prometheus/prompb/buf.yaml | 3 + .../prompb/io/prometheus/client/metrics.pb.go | 3994 ++ .../prompb/io/prometheus/client/metrics.proto | 146 + .../prometheus/prometheus/prompb/remote.pb.go | 6 +- .../prometheus/prometheus/prompb/remote.proto | 6 +- .../prometheus/prometheus/prompb/types.pb.go | 1642 +- .../prometheus/prometheus/prompb/types.proto | 77 +- .../prometheus/prometheus/promql/engine.go | 240 +- .../prometheus/prometheus/promql/functions.go | 200 +- .../prometheus/promql/parser/functions.go | 15 + .../prometheus/prometheus/promql/quantile.go | 171 + .../prometheus/prometheus/promql/value.go | 140 +- .../prometheus/prometheus/rules/manager.go | 5 +- .../prometheus/prometheus/scrape/manager.go | 3 + .../prometheus/prometheus/scrape/scrape.go | 78 +- .../prometheus/prometheus/storage/buffer.go | 185 +- .../prometheus/prometheus/storage/fanout.go | 15 + .../prometheus/storage/interface.go | 37 +- .../prometheus/storage/memoized_iterator.go | 94 +- .../prometheus/prometheus/storage/merge.go | 84 +- .../prometheus/storage/remote/codec.go | 100 +- .../storage/remote/queue_manager.go | 318 +- .../prometheus/storage/remote/write.go | 30 +- .../storage/remote/write_handler.go | 14 + .../prometheus/prometheus/storage/series.go | 131 +- .../prometheus/prometheus/tsdb/README.md | 2 +- .../prometheus/prometheus/tsdb/block.go | 6 +- .../prometheus/prometheus/tsdb/blockwriter.go | 1 + .../prometheus/tsdb/chunkenc/chunk.go | 163 +- .../prometheus/tsdb/chunkenc/histogram.go | 857 + .../tsdb/chunkenc/histogram_meta.go | 334 + .../prometheus/tsdb/chunkenc/varbit.go | 232 + .../prometheus/tsdb/chunkenc/xor.go | 239 +- .../prometheus/tsdb/chunks/head_chunks.go | 10 +- .../prometheus/prometheus/tsdb/compact.go | 3 +- .../prometheus/prometheus/tsdb/db.go | 63 +- .../prometheus/tsdb/encoding/encoding.go | 7 +- .../prometheus/prometheus/tsdb/head.go | 149 +- .../prometheus/prometheus/tsdb/head_append.go | 401 +- .../prometheus/prometheus/tsdb/head_read.go | 68 +- .../prometheus/prometheus/tsdb/head_wal.go | 173 +- .../prometheus/prometheus/tsdb/index/index.go | 9 +- .../prometheus/tsdb/index/postings.go | 14 +- .../prometheus/prometheus/tsdb/ooo_head.go | 4 +- .../prometheus/prometheus/tsdb/querier.go | 214 +- .../prometheus/tsdb/record/record.go | 172 +- .../prometheus/tsdb/tsdbblockutil.go | 28 +- .../prometheus/tsdb/tsdbutil/chunks.go | 57 +- .../prometheus/prometheus/tsdb/wal.go | 8 +- .../tsdb/{wal => wlog}/checkpoint.go | 47 +- .../tsdb/{wal => wlog}/live_reader.go | 2 +- .../prometheus/tsdb/{wal => wlog}/reader.go | 2 +- .../prometheus/tsdb/{wal => wlog}/watcher.go | 75 +- .../tsdb/{wal/wal.go => wlog/wlog.go} | 90 +- .../prometheus/util/jsonutil/marshal.go | 62 + .../prometheus/util/teststorage/storage.go | 1 + .../prometheus/prometheus/web/api/v1/api.go | 265 +- vendor/github.com/rueian/rueidis/.gitignore | 3 + .../rueian/rueidis/.goreleaser.yaml | 8 + vendor/github.com/rueian/rueidis/LICENSE | 201 + vendor/github.com/rueian/rueidis/NOTICE | 2 + vendor/github.com/rueian/rueidis/README.md | 667 + vendor/github.com/rueian/rueidis/binary.go | 26 + vendor/github.com/rueian/rueidis/client.go | 250 + vendor/github.com/rueian/rueidis/cluster.go | 764 + vendor/github.com/rueian/rueidis/cmds.go | 19 + vendor/github.com/rueian/rueidis/codecov.yml | 4 + .../rueian/rueidis/docker-compose.yml | 93 + .../github.com/rueian/rueidis/dockertest.sh | 22 + vendor/github.com/rueian/rueidis/helper.go | 70 + .../rueian/rueidis/internal/cmds/builder.go | 119 + .../rueian/rueidis/internal/cmds/cmds.go | 263 + .../rueian/rueidis/internal/cmds/gen.go | 34266 ++++++++++++++++ .../rueian/rueidis/internal/cmds/slot.go | 109 + .../rueian/rueidis/internal/util/parallel.go | 45 + vendor/github.com/rueian/rueidis/lru.go | 260 + vendor/github.com/rueian/rueidis/lua.go | 85 + vendor/github.com/rueian/rueidis/message.go | 875 + vendor/github.com/rueian/rueidis/mux.go | 319 + vendor/github.com/rueian/rueidis/pipe.go | 1228 + vendor/github.com/rueian/rueidis/pool.go | 67 + vendor/github.com/rueian/rueidis/pubsub.go | 134 + vendor/github.com/rueian/rueidis/resp.go | 255 + vendor/github.com/rueian/rueidis/ring.go | 136 + vendor/github.com/rueian/rueidis/rueidis.go | 309 + vendor/github.com/rueian/rueidis/sentinel.go | 353 + .../github.com/rueian/rueidis/singleflight.go | 39 + .../thanos-io/thanos/pkg/block/block.go | 19 + .../thanos/pkg/cacheutil/redis_client.go | 160 +- .../pkg/compact/downsample/downsample.go | 66 +- .../thanos-io/thanos/pkg/dedup/chunk_iter.go | 2 +- .../thanos-io/thanos/pkg/dedup/iter.go | 98 +- .../thanos/pkg/dedup/pushdown_iter.go | 52 +- .../thanos-io/thanos/pkg/extprom/testing.go | 2 +- .../thanos-io/thanos/pkg/gate/gate.go | 41 +- .../thanos-io/thanos/pkg/httpconfig/http.go | 4 +- .../thanos-io/thanos/pkg/store/proxy.go | 2 +- .../thanos/pkg/testutil/e2eutil/copy.go | 55 + .../thanos/pkg/testutil/e2eutil/port.go | 20 + .../thanos/pkg/testutil/e2eutil/prometheus.go | 645 + .../pkg/testutil/e2eutil/sysprocattr.go | 13 + .../pkg/testutil/e2eutil/sysprocattr_linux.go | 13 + .../thanos/pkg/testutil/testorbench.go | 85 - .../thanos-io/thanos/pkg/testutil/testutil.go | 365 - .../thanos-io/thanos/pkg/tls/options.go | 16 +- .../thanos-io/thanos/pkg/tracing/testutil.go | 2 +- .../weaveworks/common/httpgrpc/httpgrpc.pb.go | 43 +- .../weaveworks/common/httpgrpc/httpgrpc.proto | 11 +- .../common/httpgrpc/server/server.go | 4 +- .../weaveworks/common/httpgrpc/tools.go | 6 + .../common/instrument/instrument.go | 9 +- .../weaveworks/common/middleware/logging.go | 2 +- .../weaveworks/common/middleware/response.go | 52 +- .../weaveworks/common/server/server.go | 80 +- .../weaveworks/common/server/tls_config.go | 55 + .../go.mongodb.org/mongo-driver/bson/bson.go | 6 +- .../mongo-driver/bson/bsoncodec/doc.go | 12 +- .../mongo-driver/bson/bsoncodec/registry.go | 1 + .../bson/bsoncodec/struct_tag_parser.go | 42 +- .../go.mongodb.org/mongo-driver/bson/doc.go | 109 +- .../mongo-driver/bson/primitive/decimal.go | 7 +- .../mongo-driver/bson/primitive/objectid.go | 4 +- .../mongo-driver/bson/primitive/primitive.go | 6 +- .../x/bsonx/bsoncore/document_sequence.go | 4 +- .../net/http/otelhttp/version.go | 2 +- .../instrument/asyncfloat64/asyncfloat64.go | 8 +- .../instrument/asyncint64/asyncint64.go | 8 +- vendor/golang.org/x/crypto/pkcs12/crypto.go | 2 +- vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/slices/slices.go | 248 + vendor/golang.org/x/exp/slices/sort.go | 127 + vendor/golang.org/x/exp/slices/zsortfunc.go | 479 + .../golang.org/x/exp/slices/zsortordered.go | 481 + vendor/golang.org/x/oauth2/google/doc.go | 12 +- .../externalaccount/basecredentials.go | 2 + vendor/golang.org/x/text/cases/cases.go | 162 + vendor/golang.org/x/text/cases/context.go | 376 + vendor/golang.org/x/text/cases/fold.go | 34 + vendor/golang.org/x/text/cases/icu.go | 62 + vendor/golang.org/x/text/cases/info.go | 82 + vendor/golang.org/x/text/cases/map.go | 816 + .../golang.org/x/text/cases/tables10.0.0.go | 2256 + .../golang.org/x/text/cases/tables11.0.0.go | 2317 ++ .../golang.org/x/text/cases/tables12.0.0.go | 2360 ++ .../golang.org/x/text/cases/tables13.0.0.go | 2400 ++ vendor/golang.org/x/text/cases/tables9.0.0.go | 2216 + vendor/golang.org/x/text/cases/trieval.go | 217 + vendor/golang.org/x/text/internal/internal.go | 49 + .../x/text/internal/language/common.go | 16 + .../x/text/internal/language/compact.go | 29 + .../text/internal/language/compact/compact.go | 61 + .../internal/language/compact/language.go | 260 + .../text/internal/language/compact/parents.go | 120 + .../text/internal/language/compact/tables.go | 1015 + .../x/text/internal/language/compact/tags.go | 91 + .../x/text/internal/language/compose.go | 167 + .../x/text/internal/language/coverage.go | 28 + .../x/text/internal/language/language.go | 627 + .../x/text/internal/language/lookup.go | 412 + .../x/text/internal/language/match.go | 226 + .../x/text/internal/language/parse.go | 608 + .../x/text/internal/language/tables.go | 3472 ++ .../x/text/internal/language/tags.go | 48 + vendor/golang.org/x/text/internal/match.go | 67 + vendor/golang.org/x/text/internal/tag/tag.go | 100 + vendor/golang.org/x/text/language/coverage.go | 187 + vendor/golang.org/x/text/language/doc.go | 98 + vendor/golang.org/x/text/language/language.go | 605 + vendor/golang.org/x/text/language/match.go | 735 + vendor/golang.org/x/text/language/parse.go | 256 + vendor/golang.org/x/text/language/tables.go | 298 + vendor/golang.org/x/text/language/tags.go | 145 + vendor/golang.org/x/time/rate/rate.go | 81 +- .../x/tools/go/gcexportdata/gcexportdata.go | 8 +- .../golang.org/x/tools/go/packages/golist.go | 22 +- .../x/tools/go/packages/packages.go | 48 +- .../{go => }/internal/gcimporter/bexport.go | 9 +- .../{go => }/internal/gcimporter/bimport.go | 0 .../internal/gcimporter/exportdata.go | 0 .../internal/gcimporter/gcimporter.go | 67 +- .../{go => }/internal/gcimporter/iexport.go | 70 +- .../{go => }/internal/gcimporter/iimport.go | 30 +- .../internal/gcimporter/newInterface10.go | 0 .../internal/gcimporter/newInterface11.go | 0 .../internal/gcimporter/support_go117.go | 0 .../internal/gcimporter/support_go118.go | 14 + .../internal/gcimporter/unified_no.go | 0 .../internal/gcimporter/unified_yes.go | 0 .../internal/gcimporter/ureader_no.go | 0 .../internal/gcimporter/ureader_yes.go | 201 +- .../x/tools/internal/gocommand/invoke.go | 83 +- .../x/tools/internal/gocommand/version.go | 34 +- .../tools/{go => }/internal/pkgbits/codes.go | 0 .../{go => }/internal/pkgbits/decoder.go | 106 +- .../x/tools/{go => }/internal/pkgbits/doc.go | 0 .../{go => }/internal/pkgbits/encoder.go | 18 +- .../tools/{go => }/internal/pkgbits/flags.go | 0 .../{go => }/internal/pkgbits/frames_go1.go | 0 .../{go => }/internal/pkgbits/frames_go17.go | 0 .../tools/{go => }/internal/pkgbits/reloc.go | 4 +- .../{go => }/internal/pkgbits/support.go | 0 .../x/tools/{go => }/internal/pkgbits/sync.go | 0 .../internal/pkgbits/syncmarker_string.go | 0 .../tools/internal/typesinternal/errorcode.go | 38 +- .../typesinternal/errorcode_string.go | 26 +- .../api/internal/gensupport/media.go | 95 +- .../google.golang.org/api/internal/version.go | 2 +- vendor/modules.txt | 134 +- 496 files changed, 90432 insertions(+), 10168 deletions(-) create mode 100644 pkg/querier/iterators/compatibe_chunk_iterator.go delete mode 100644 vendor/cloud.google.com/go/.gitignore delete mode 100644 vendor/cloud.google.com/go/.release-please-manifest-submodules.json delete mode 100644 vendor/cloud.google.com/go/.release-please-manifest.json delete mode 100644 vendor/cloud.google.com/go/CHANGES.md delete mode 100644 vendor/cloud.google.com/go/CODE_OF_CONDUCT.md delete mode 100644 vendor/cloud.google.com/go/CONTRIBUTING.md delete mode 100644 vendor/cloud.google.com/go/README.md delete mode 100644 vendor/cloud.google.com/go/RELEASING.md delete mode 100644 vendor/cloud.google.com/go/SECURITY.md create mode 100644 vendor/cloud.google.com/go/compute/internal/version.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/CHANGES.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/README.md rename vendor/cloud.google.com/go/{iam/go_mod_tidy_hack.go => compute/metadata/tidyfix.go} (85%) delete mode 100644 vendor/cloud.google.com/go/doc.go delete mode 100644 vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json delete mode 100644 vendor/cloud.google.com/go/release-please-config.json delete mode 100644 vendor/cloud.google.com/go/testing.md create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go create mode 100644 vendor/github.com/benbjohnson/clock/LICENSE create mode 100644 vendor/github.com/benbjohnson/clock/README.md create mode 100644 vendor/github.com/benbjohnson/clock/clock.go create mode 100644 vendor/github.com/benbjohnson/clock/context.go create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) create mode 100644 vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/activation/listeners.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh rename vendor/github.com/fsnotify/fsnotify/{open_mode_bsd.go => system_bsd.go} (57%) rename vendor/github.com/fsnotify/fsnotify/{open_mode_darwin.go => system_darwin.go} (52%) delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/content_type.go create mode 100644 vendor/github.com/hashicorp/golang-lru/.golangci.yml create mode 100644 vendor/github.com/hashicorp/golang-lru/testing.go create mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/testing.go create mode 100644 vendor/github.com/prometheus/common/version/info_default.go create mode 100644 vendor/github.com/prometheus/common/version/info_go118.go create mode 100644 vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go create mode 100644 vendor/github.com/prometheus/prometheus/model/histogram/generic.go create mode 100644 vendor/github.com/prometheus/prometheus/model/histogram/histogram.go create mode 100644 vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go create mode 100644 vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go create mode 100644 vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/checkpoint.go (87%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/live_reader.go (99%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/reader.go (99%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/watcher.go (91%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal/wal.go => wlog/wlog.go} (92%) create mode 100644 vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go create mode 100644 vendor/github.com/rueian/rueidis/.gitignore create mode 100644 vendor/github.com/rueian/rueidis/.goreleaser.yaml create mode 100644 vendor/github.com/rueian/rueidis/LICENSE create mode 100644 vendor/github.com/rueian/rueidis/NOTICE create mode 100644 vendor/github.com/rueian/rueidis/README.md create mode 100644 vendor/github.com/rueian/rueidis/binary.go create mode 100644 vendor/github.com/rueian/rueidis/client.go create mode 100644 vendor/github.com/rueian/rueidis/cluster.go create mode 100644 vendor/github.com/rueian/rueidis/cmds.go create mode 100644 vendor/github.com/rueian/rueidis/codecov.yml create mode 100644 vendor/github.com/rueian/rueidis/docker-compose.yml create mode 100644 vendor/github.com/rueian/rueidis/dockertest.sh create mode 100644 vendor/github.com/rueian/rueidis/helper.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/builder.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/cmds.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/gen.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/slot.go create mode 100644 vendor/github.com/rueian/rueidis/internal/util/parallel.go create mode 100644 vendor/github.com/rueian/rueidis/lru.go create mode 100644 vendor/github.com/rueian/rueidis/lua.go create mode 100644 vendor/github.com/rueian/rueidis/message.go create mode 100644 vendor/github.com/rueian/rueidis/mux.go create mode 100644 vendor/github.com/rueian/rueidis/pipe.go create mode 100644 vendor/github.com/rueian/rueidis/pool.go create mode 100644 vendor/github.com/rueian/rueidis/pubsub.go create mode 100644 vendor/github.com/rueian/rueidis/resp.go create mode 100644 vendor/github.com/rueian/rueidis/ring.go create mode 100644 vendor/github.com/rueian/rueidis/rueidis.go create mode 100644 vendor/github.com/rueian/rueidis/sentinel.go create mode 100644 vendor/github.com/rueian/rueidis/singleflight.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/copy.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/port.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr_linux.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go create mode 100644 vendor/github.com/weaveworks/common/httpgrpc/tools.go create mode 100644 vendor/github.com/weaveworks/common/server/tls_config.go create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go create mode 100644 vendor/golang.org/x/text/cases/cases.go create mode 100644 vendor/golang.org/x/text/cases/context.go create mode 100644 vendor/golang.org/x/text/cases/fold.go create mode 100644 vendor/golang.org/x/text/cases/icu.go create mode 100644 vendor/golang.org/x/text/cases/info.go create mode 100644 vendor/golang.org/x/text/cases/map.go create mode 100644 vendor/golang.org/x/text/cases/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/cases/trieval.go create mode 100644 vendor/golang.org/x/text/internal/internal.go create mode 100644 vendor/golang.org/x/text/internal/language/common.go create mode 100644 vendor/golang.org/x/text/internal/language/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/language.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/parents.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tags.go create mode 100644 vendor/golang.org/x/text/internal/language/compose.go create mode 100644 vendor/golang.org/x/text/internal/language/coverage.go create mode 100644 vendor/golang.org/x/text/internal/language/language.go create mode 100644 vendor/golang.org/x/text/internal/language/lookup.go create mode 100644 vendor/golang.org/x/text/internal/language/match.go create mode 100644 vendor/golang.org/x/text/internal/language/parse.go create mode 100644 vendor/golang.org/x/text/internal/language/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/tags.go create mode 100644 vendor/golang.org/x/text/internal/match.go create mode 100644 vendor/golang.org/x/text/internal/tag/tag.go create mode 100644 vendor/golang.org/x/text/language/coverage.go create mode 100644 vendor/golang.org/x/text/language/doc.go create mode 100644 vendor/golang.org/x/text/language/language.go create mode 100644 vendor/golang.org/x/text/language/match.go create mode 100644 vendor/golang.org/x/text/language/parse.go create mode 100644 vendor/golang.org/x/text/language/tables.go create mode 100644 vendor/golang.org/x/text/language/tags.go rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/bexport.go (99%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/bimport.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/exportdata.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/gcimporter.go (95%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/iexport.go (90%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/iimport.go (95%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/newInterface10.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/newInterface11.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/support_go117.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/support_go118.go (62%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/unified_no.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/unified_yes.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/ureader_no.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/ureader_yes.go (72%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/codes.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/decoder.go (83%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/doc.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/encoder.go (96%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/flags.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/frames_go1.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/frames_go17.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/reloc.go (95%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/support.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/sync.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/syncmarker_string.go (100%) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 705c84ad305..2bd0d36cc1a 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -297,6 +297,16 @@ The `server_config` configures the HTTP and gRPC server of the launched service( # CLI flag: -server.grpc-conn-limit [grpc_listen_conn_limit: | default = 0] +# Comma-separated list of cipher suites to use. If blank, the default Go cipher +# suites is used. +# CLI flag: -server.tls-cipher-suites +[tls_cipher_suites: | default = ""] + +# Minimum TLS version to use. Allowed values: VersionTLS10, VersionTLS11, +# VersionTLS12, VersionTLS13. If blank, the Go TLS minimum version is used. +# CLI flag: -server.tls-min-version +[tls_min_version: | default = ""] + http_tls_config: # HTTP server cert path. # CLI flag: -server.http-tls-cert-path diff --git a/go.mod b/go.mod index 545e4c73b64..c3e4661a64b 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.0 + github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/felixge/fgprof v0.9.3 github.com/go-kit/log v0.2.1 @@ -38,19 +39,19 @@ require ( github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/alertmanager v0.24.0 + github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.37.0 - github.com/prometheus/prometheus v0.39.1 + github.com/prometheus/common v0.38.0 + github.com/prometheus/prometheus v0.40.7 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 - github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 + github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a github.com/uber/jaeger-client-go v2.30.0+incompatible - github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae + github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.6 go.etcd.io/etcd/client/pkg/v3 v3.5.6 go.etcd.io/etcd/client/v3 v3.5.6 @@ -63,8 +64,8 @@ require ( go.opentelemetry.io/otel/trace v1.11.2 go.uber.org/atomic v1.10.0 golang.org/x/net v0.4.0 - golang.org/x/sync v0.0.0-20220907140024-f12130a52804 - golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 + golang.org/x/sync v0.1.0 + golang.org/x/time v0.1.0 google.golang.org/grpc v1.51.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -72,9 +73,10 @@ require ( ) require ( - cloud.google.com/go v0.104.0 // indirect - cloud.google.com/go/compute v1.7.0 // indirect - cloud.google.com/go/iam v0.3.0 // indirect + cloud.google.com/go v0.105.0 // indirect + cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute/metadata v0.2.1 // indirect + cloud.google.com/go/iam v0.6.0 // indirect cloud.google.com/go/storage v1.27.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect @@ -85,7 +87,7 @@ require ( github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go v1.44.109 // indirect + github.com/aws/aws-sdk-go v1.44.156 // indirect github.com/aws/aws-sdk-go-v2 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect @@ -97,42 +99,42 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.11.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 // indirect github.com/aws/smithy-go v1.11.1 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/docker/go-units v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/runtime v0.23.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/validate v0.21.0 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.25.0 // indirect + github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/validate v0.22.0 // indirect + github.com/gofrs/uuid v4.3.1+incompatible // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/golang-jwt/jwt v3.2.1+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 // indirect + github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.5.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.6.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -141,7 +143,7 @@ require ( github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/jessevdk/go-flags v1.5.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -156,7 +158,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.0 // indirect @@ -170,11 +172,12 @@ require ( github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.7.3 // indirect + github.com/prometheus/exporter-toolkit v0.8.2 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/cors v1.8.2 // indirect github.com/rs/xid v1.4.0 // indirect + github.com/rueian/rueidis v0.0.90 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect @@ -185,29 +188,30 @@ require ( github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a // indirect github.com/weaveworks/promrus v1.2.0 // indirect github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect - go.mongodb.org/mongo-driver v1.10.2 // indirect + go.mongodb.org/mongo-driver v1.11.0 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 // indirect go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.32.0 // indirect + go.opentelemetry.io/otel/metric v0.33.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/goleak v1.2.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect + golang.org/x/crypto v0.1.0 // indirect + golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/oauth2 v0.1.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect - golang.org/x/tools v0.1.12 // indirect - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect - google.golang.org/api v0.97.0 // indirect + golang.org/x/tools v0.4.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/api v0.102.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 // indirect + google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.66.6 // indirect diff --git a/go.sum b/go.sum index f77076f2f35..9b7c122d0b0 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0c cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -45,13 +45,18 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.6.0 h1:nsqQC88kT5Iwlm4MeNGTpfMWddp6NB/UOLFTH6m1QfQ= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/longrunning v0.1.1 h1:y50CXG4j0+qvEukslYFBCrzaXX0qpFbBzc3PchSu/LE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -195,9 +200,8 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.109 h1:+Na5JPeS0kiEHoBp5Umcuuf+IDqXqD0lXnM920E31YI= -github.com/aws/aws-sdk-go v1.44.109/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.156 h1:3RhbBTZ87HoI5OP2JjcKdd5qOnyo9YOAW8+Bb/h0vSE= +github.com/aws/aws-sdk-go v1.44.156/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -249,8 +253,9 @@ github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/baidubce/bce-sdk-go v0.9.111 h1:yGgtPpZYUZW4uoVorQ4xnuEgVeddACydlcJKW87MDV4= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -287,8 +292,9 @@ github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -432,8 +438,9 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -462,7 +469,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= -github.com/digitalocean/godo v1.84.1 h1:VgPsuxhrO9pUygvij6qOhqXfAkxAsDZYRpmjSDMEaHo= +github.com/digitalocean/godo v1.88.0 h1:SAEdw63xOMmzlwCeCWjLH1GcyDPUjbSAR1Bh7VELxzc= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -473,7 +480,7 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6 github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= +github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -481,8 +488,9 @@ github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6Uezg github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -509,14 +517,13 @@ github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPO github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8= +github.com/envoyproxy/protoc-gen-validate v0.6.13 h1:TvDcILLkjuZV3ER58VkBmncKsLUBqBDxra/XctCzuMM= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= @@ -535,8 +542,8 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2 github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= @@ -556,7 +563,6 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= @@ -578,12 +584,14 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= @@ -593,15 +601,15 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9 github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/runtime v0.23.1 h1:/Drg9R96eMmgKJHVWZADz78XbE39/6QiIiB45mc+epo= -github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc= +github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs= github.com/go-openapi/spec v0.20.6 h1:ich1RQ3WDbfoeTqTAb+5EIxNmpKVJZWBNah9RAT0jIQ= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= @@ -611,19 +619,14 @@ github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= -github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= +github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -651,7 +654,6 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -661,8 +663,8 @@ github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= @@ -784,8 +786,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 h1:ykKxL12NZd3JmWZnyqarJGsF73M9Xhtrik/FEtEeFRE= -github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e h1:F1LLQqQ8WoIbyoxLUY+JUZe1kuHdxThM6CPUATzE6Io= +github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -794,8 +796,8 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -803,8 +805,8 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= @@ -894,13 +896,14 @@ github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf h1:l/EZ57iRPNs8vd8c9qH0dB4Q+IiZHJouLAgxJ5j25tU= +github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005 h1:jKwXhVS4F7qk0g8laz+Anz0g/6yaSJ3HqmSAuSNLUcA= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= @@ -1025,7 +1028,7 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd h1:b1taQnM42dp3NdiiQwfmM1WyyucHayZSKN5R0PRYWL0= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1048,7 +1051,6 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtB github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -1059,7 +1061,7 @@ github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.9.1 h1:29UpEPpYcGFnbwiJW8mbk/bjBZpgd/pv68io2IKTo34= +github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1080,7 +1082,6 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -1104,8 +1105,9 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -1137,7 +1139,6 @@ github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= @@ -1221,7 +1222,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1269,6 +1270,7 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oracle/oci-go-sdk/v65 v65.13.0 h1:0+9ea5goYfhI3/MPfbIQU6yzHYWE6sCk6VuUepxk5Nk= +github.com/ovh/go-ovh v1.1.0 h1:bHXZmw8nTgZin4Nv7JuaLs0KG5x54EQR7migYTd1zrk= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1303,8 +1305,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw= -github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI= +github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg= +github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -1318,6 +1320,7 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1342,14 +1345,13 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.38.0 h1:VTQitp6mXTdUoCmDMugDVOJ1opi6ADftKfp/yeqTR/E= +github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.7.3 h1:IYBn0CTGi/nYxstdTUKysuSofUNJ3DQW3FmZ/Ub6rgU= -github.com/prometheus/exporter-toolkit v0.7.3/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= +github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1366,8 +1368,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.39.1 h1:abZM6A+sKAv2eKTbRIaHq4amM/nT07MuxRm0+QTaTj0= -github.com/prometheus/prometheus v0.39.1/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA= +github.com/prometheus/prometheus v0.40.7 h1:cYtp4YrR9M99YpTUfXbei/HjIJJ+En23NKsTCeZ2U2w= +github.com/prometheus/prometheus v0.40.7/go.mod h1:nO+vI0cJo1ezp2DPGw5NEnTlYHGRpBFrqE4zb9O0g0U= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1386,6 +1388,8 @@ github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rueian/rueidis v0.0.90 h1:zuTFPvouPmO4FojrWg2p47XhiFcJ5Ux/cQu37ZRIdVs= +github.com/rueian/rueidis v0.0.90/go.mod h1:LiKWMM/QnILwRfDZIhSIXi4vQqZ/UZy4+/aNkSCt8XA= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1442,7 +1446,6 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1489,8 +1492,8 @@ github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1 github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= -github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 h1:NRu4DwMAHgI+AJ0eevwQ8mVgrT15KskHOIKtQ0T3/EE= -github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5/go.mod h1:2bDlu7xW2TXrSt/TGBozMMVECjjrnqoLJKc/ZIiPngA= +github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a h1:oN3VupYNkavPRvdXwq71p54SAFSbOGvL0qL7CeKFrJ0= +github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a/go.mod h1:xZKr/xpbijYM8EqTMpurqgKItyIPFy1wqUd/DMTDu4M= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1520,8 +1523,8 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= -github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae h1:Z8YibUpdBEdCq8nwrYXJQ8vYooevbmEBIdFpseXK3/8= -github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae/go.mod h1:YfOOLoW1Q/jIIu0WLeSwgStmrKjuJEZSKTAUc+0KFvE= +github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d h1:9Z/HiqeGN+LOnmotAMpFEQjuXZ4AGAVFG0rC1laP5Go= +github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d/go.mod h1:Fnq3+U51tMkPRMC6Wr7zKGUeFFYX4YjNrNK50iU0fcE= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1537,7 +1540,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1545,7 +1547,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= @@ -1575,10 +1577,9 @@ go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVd go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= -go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= +go.mongodb.org/mongo-driver v1.11.0 h1:FZKhBSTydeuffHj9CBjXlR8vQLee1cQyTWYPA6/tqiE= +go.mongodb.org/mongo-driver v1.11.0/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1594,8 +1595,8 @@ go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUz go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 h1:qZ3KzA4qPzLBDtQyPk4ydjlg8zvXbNysnFHaVMKJbVo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0/go.mod h1:14Oo79mRwusSI02L0EfG3Gp1uF3+1wSL+D4zDysxyqs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 h1:aUEBEdCa6iamGzg6fuYxDA8ThxvOG240mAvWDU+XLio= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4/go.mod h1:l2MdsbKTocpPS5nQZscqTR9jd8u96VYZdcpF8Sye7mA= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= go.opentelemetry.io/contrib/propagators/aws v1.12.0 h1:n3lNyZs2ytVEfFhcn2QO5Z80lgrMXyP1Ncx0C7rUU8A= @@ -1624,8 +1625,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKP go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v0.32.0 h1:lh5KMDB8xlMM4kwE38vlZJ3rZeiWrjw3As1vclfC01k= -go.opentelemetry.io/otel/metric v0.32.0/go.mod h1:PVDNTt297p8ehm949jsIzd+Z2bIZJYQQG/uuHTeWFHY= +go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E= +go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= @@ -1647,7 +1648,6 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -1688,17 +1688,16 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1713,6 +1712,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE= +golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1746,9 +1747,9 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1816,7 +1817,6 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1828,7 +1828,9 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1853,9 +1855,9 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1869,8 +1871,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1994,7 +1997,6 @@ golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2015,7 +2017,10 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2023,6 +2028,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2033,6 +2039,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2044,8 +2051,8 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 h1:yuLAip3bfURHClMG9VBdzPrQvCWjWiWUTBGV+/fCbUs= -golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2126,9 +2133,9 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2137,8 +2144,9 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= @@ -2187,8 +2195,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.97.0 h1:x/vEL1XDF/2V4xzdNgFPaKHluRESo2aTsL7QzHnBtGQ= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2293,9 +2301,8 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2341,7 +2348,6 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -2380,13 +2386,13 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.25.1 h1:yL7du50yc93k17nH/Xe9jujAYrcDkI/i5DL1jPz4E3M= +k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI= +k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2395,7 +2401,7 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.25.1 h1:uFj4AJKtE1/ckcSKz8IhgAuZTdRXZDKev8g387ndD58= +k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= diff --git a/pkg/alertmanager/alertmanager.go b/pkg/alertmanager/alertmanager.go index 5917d4c8c89..5cb46ca5927 100644 --- a/pkg/alertmanager/alertmanager.go +++ b/pkg/alertmanager/alertmanager.go @@ -248,7 +248,7 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { callback = newAlertsLimiter(am.cfg.UserID, am.cfg.Limits, reg) } - am.alerts, err = mem.NewAlerts(context.Background(), am.marker, 30*time.Minute, callback, am.logger) + am.alerts, err = mem.NewAlerts(context.Background(), am.marker, 30*time.Minute, callback, am.logger, am.registry) if err != nil { return nil, fmt.Errorf("failed to create alerts: %v", err) } diff --git a/pkg/alertmanager/alertmanager_http_test.go b/pkg/alertmanager/alertmanager_http_test.go index d7bdb508058..d34049a6b21 100644 --- a/pkg/alertmanager/alertmanager_http_test.go +++ b/pkg/alertmanager/alertmanager_http_test.go @@ -29,7 +29,7 @@ func TestMultitenantAlertmanager_GetStatusHandler(t *testing.T) { true, cluster.DefaultPushPullInterval, cluster.DefaultGossipInterval, - cluster.DefaultTcpTimeout, + cluster.DefaultTCPTimeout, cluster.DefaultProbeTimeout, cluster.DefaultProbeInterval, nil, diff --git a/pkg/alertmanager/api_test.go b/pkg/alertmanager/api_test.go index 981f81b3101..04784c1b5e2 100644 --- a/pkg/alertmanager/api_test.go +++ b/pkg/alertmanager/api_test.go @@ -441,7 +441,6 @@ alertmanager_config: | - name: default-receiver victorops_configs: - api_key_file: /secrets - api_key: my-key routing_key: test route: diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index bb5d1d54977..4a5c2b7ab55 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -312,7 +312,7 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, store alerts true, cfg.Cluster.PushPullInterval, cfg.Cluster.GossipInterval, - cluster.DefaultTcpTimeout, + cluster.DefaultTCPTimeout, cluster.DefaultProbeTimeout, cluster.DefaultProbeInterval, nil, diff --git a/pkg/chunk/encoding/prometheus_chunk.go b/pkg/chunk/encoding/prometheus_chunk.go index 3bab45b1742..bbc4d43660a 100644 --- a/pkg/chunk/encoding/prometheus_chunk.go +++ b/pkg/chunk/encoding/prometheus_chunk.go @@ -84,14 +84,14 @@ type prometheusChunkIterator struct { } func (p *prometheusChunkIterator) Scan() bool { - return p.it.Next() + return p.it.Next() != chunkenc.ValNone } func (p *prometheusChunkIterator) FindAtOrAfter(time model.Time) bool { // FindAtOrAfter must return OLDEST value at given time. That means we need to start with a fresh iterator, // otherwise we cannot guarantee OLDEST. p.it = p.c.Iterator(p.it) - return p.it.Seek(int64(time)) + return p.it.Seek(int64(time)) != chunkenc.ValNone } func (p *prometheusChunkIterator) Value() model.SamplePair { @@ -110,7 +110,7 @@ func (p *prometheusChunkIterator) Batch(size int) Batch { batch.Timestamps[j] = t batch.Values[j] = v j++ - if j < size && !p.it.Next() { + if j < size && p.it.Next() == chunkenc.ValNone { break } } diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index f08e19a9283..6ba27589476 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -29,7 +29,7 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" - "github.com/thanos-io/thanos/pkg/testutil" + thanos_testutil "github.com/thanos-io/thanos/pkg/testutil/e2eutil" "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/ring" @@ -974,7 +974,7 @@ func TestCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { b1 := createTSDBBlock(t, bucketClient, "user-1", 10, 20, map[string]string{"__name__": "Teste"}) b2 := createTSDBBlock(t, bucketClient, "user-1", 20, 30, map[string]string{"__name__": "Teste"}) - err := testutil.PutOutOfOrderIndex(path.Join(tmpDir, "user-1", b1.String()), 10, 20) + err := thanos_testutil.PutOutOfOrderIndex(path.Join(tmpDir, "user-1", b1.String()), 10, 20) require.NoError(t, err) cfg := prepareConfig() diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go index 450689911c1..662ad0800ce 100644 --- a/pkg/cortexpb/compat_test.go +++ b/pkg/cortexpb/compat_test.go @@ -7,12 +7,12 @@ import ( "testing" "unsafe" + "github.com/efficientgo/core/testutil" jsoniter "github.com/json-iterator/go" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/thanos-io/thanos/pkg/testutil" ) // This test verifies that jsoninter uses our custom method for marshalling. diff --git a/pkg/cortexpb/cortex.pb.go b/pkg/cortexpb/cortex.pb.go index 4a88a7e7c39..311430e1944 100644 --- a/pkg/cortexpb/cortex.pb.go +++ b/pkg/cortexpb/cortex.pb.go @@ -510,51 +510,50 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 697 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4a, - 0x10, 0xf7, 0xe6, 0x7f, 0x86, 0x90, 0x67, 0xed, 0x43, 0x7a, 0x16, 0x07, 0x27, 0xf8, 0x5d, 0x72, - 0x78, 0x2f, 0x54, 0x54, 0x6d, 0xd5, 0xaa, 0xaa, 0xe4, 0x54, 0x81, 0x22, 0xc8, 0x1f, 0x6d, 0x9c, - 0xa2, 0xf6, 0x12, 0x6d, 0xc2, 0x02, 0x56, 0xed, 0xd8, 0xb5, 0xd7, 0x88, 0xdc, 0x7a, 0xea, 0xb9, - 0xe7, 0x7e, 0x82, 0x7e, 0x82, 0x4a, 0xfd, 0x06, 0x1c, 0x39, 0xa2, 0x1e, 0x50, 0x31, 0x17, 0x8e, - 0x7c, 0x84, 0xca, 0x6b, 0x27, 0x06, 0x55, 0xdc, 0xb8, 0xcd, 0xcc, 0x6f, 0x7e, 0x33, 0xb3, 0xf3, - 0x1b, 0x2d, 0x54, 0x26, 0x8e, 0xc7, 0xd9, 0x49, 0xd3, 0xf5, 0x1c, 0xee, 0xe0, 0x52, 0xec, 0xb9, - 0xe3, 0xd5, 0xff, 0x0f, 0x4d, 0x7e, 0x14, 0x8c, 0x9b, 0x13, 0xc7, 0x5e, 0x3f, 0x74, 0x0e, 0x9d, - 0x75, 0x91, 0x30, 0x0e, 0x0e, 0x84, 0x27, 0x1c, 0x61, 0xc5, 0x44, 0xed, 0x7b, 0x06, 0x2a, 0x7b, - 0x9e, 0xc9, 0x19, 0x61, 0x1f, 0x03, 0xe6, 0x73, 0xdc, 0x07, 0xe0, 0xa6, 0xcd, 0x7c, 0xe6, 0x99, - 0xcc, 0x57, 0x50, 0x3d, 0xdb, 0x58, 0xda, 0x58, 0x69, 0xce, 0xcb, 0x37, 0x0d, 0xd3, 0x66, 0x03, - 0x81, 0xb5, 0x56, 0x4f, 0x2f, 0x6a, 0xd2, 0xcf, 0x8b, 0x1a, 0xee, 0x7b, 0x8c, 0x5a, 0x96, 0x33, - 0x31, 0x16, 0x3c, 0x72, 0xab, 0x06, 0x7e, 0x0e, 0x85, 0x81, 0x13, 0x78, 0x13, 0xa6, 0x64, 0xea, - 0xa8, 0x51, 0xdd, 0x58, 0x4b, 0xab, 0xdd, 0xee, 0xdc, 0x8c, 0x93, 0xda, 0xd3, 0xc0, 0x26, 0x09, - 0x01, 0xbf, 0x80, 0x92, 0xcd, 0x38, 0xdd, 0xa7, 0x9c, 0x2a, 0x59, 0x31, 0x8a, 0x92, 0x92, 0x3b, - 0x8c, 0x7b, 0xe6, 0xa4, 0x93, 0xe0, 0xad, 0xdc, 0xe9, 0x45, 0x0d, 0x91, 0x45, 0x3e, 0x7e, 0x09, - 0xab, 0xfe, 0x07, 0xd3, 0x1d, 0x59, 0x74, 0xcc, 0xac, 0xd1, 0x94, 0xda, 0x6c, 0x74, 0x4c, 0x2d, - 0x73, 0x9f, 0x72, 0xd3, 0x99, 0x2a, 0xd7, 0xc5, 0x3a, 0x6a, 0x94, 0xc8, 0x3f, 0x51, 0xca, 0x6e, - 0x94, 0xd1, 0xa5, 0x36, 0x7b, 0xbb, 0xc0, 0xb5, 0x1a, 0x40, 0x3a, 0x0f, 0x2e, 0x42, 0x56, 0xef, - 0x6f, 0xcb, 0x12, 0x2e, 0x41, 0x8e, 0x0c, 0x77, 0xdb, 0x32, 0xd2, 0xfe, 0x82, 0xe5, 0x64, 0x7a, - 0xdf, 0x75, 0xa6, 0x3e, 0xd3, 0x7e, 0x20, 0x80, 0x74, 0x3b, 0x58, 0x87, 0x82, 0xe8, 0x3c, 0xdf, - 0xe1, 0xdf, 0xe9, 0xe0, 0xa2, 0x5f, 0x9f, 0x9a, 0x5e, 0x6b, 0x25, 0x59, 0x61, 0x45, 0x84, 0xf4, - 0x7d, 0xea, 0x72, 0xe6, 0x91, 0x84, 0x88, 0x1f, 0x41, 0xd1, 0xa7, 0xb6, 0x6b, 0x31, 0x5f, 0xc9, - 0x88, 0x1a, 0x72, 0x5a, 0x63, 0x20, 0x00, 0xf1, 0x68, 0x89, 0xcc, 0xd3, 0xf0, 0x53, 0x28, 0xb3, - 0x13, 0x66, 0xbb, 0x16, 0xf5, 0xfc, 0x64, 0x61, 0x38, 0xe5, 0xb4, 0x13, 0x28, 0x61, 0xa5, 0xa9, - 0xda, 0x13, 0x28, 0x2f, 0x86, 0xc2, 0x18, 0x72, 0xd1, 0xb6, 0x14, 0x54, 0x47, 0x8d, 0x0a, 0x11, - 0x36, 0x5e, 0x81, 0xfc, 0x31, 0xb5, 0x82, 0x58, 0xc2, 0x0a, 0x89, 0x1d, 0x4d, 0x87, 0x42, 0x3c, - 0x47, 0x8a, 0x47, 0x24, 0x94, 0xe0, 0x78, 0x0d, 0x2a, 0xe2, 0x0e, 0x38, 0xb5, 0xdd, 0x91, 0xed, - 0x0b, 0x72, 0x96, 0x2c, 0x2d, 0x62, 0x1d, 0x5f, 0xfb, 0x9a, 0x81, 0xea, 0x5d, 0x21, 0xf1, 0x33, - 0xc8, 0xf1, 0x99, 0x1b, 0x97, 0xaa, 0x6e, 0xfc, 0x7b, 0x9f, 0xe0, 0x89, 0x6b, 0xcc, 0x5c, 0x46, - 0x04, 0x01, 0xff, 0x07, 0xd8, 0x16, 0xb1, 0xd1, 0x01, 0xb5, 0x4d, 0x6b, 0x26, 0x44, 0x17, 0x4d, - 0xcb, 0x44, 0x8e, 0x91, 0x4d, 0x01, 0x44, 0x5a, 0x47, 0xcf, 0x3c, 0x62, 0x96, 0xab, 0xe4, 0x04, - 0x2e, 0xec, 0x28, 0x16, 0x4c, 0x4d, 0xae, 0xe4, 0xe3, 0x58, 0x64, 0x6b, 0x33, 0x80, 0xb4, 0x13, - 0x5e, 0x82, 0xe2, 0xb0, 0xbb, 0xd3, 0xed, 0xed, 0x75, 0x65, 0x29, 0x72, 0x5e, 0xf7, 0x86, 0x5d, - 0xa3, 0x4d, 0x64, 0x84, 0xcb, 0x90, 0xdf, 0xd2, 0x87, 0x5b, 0x6d, 0x39, 0x83, 0x97, 0xa1, 0xfc, - 0x66, 0x7b, 0x60, 0xf4, 0xb6, 0x88, 0xde, 0x91, 0xb3, 0x18, 0x43, 0x55, 0x20, 0x69, 0x2c, 0x17, - 0x51, 0x07, 0xc3, 0x4e, 0x47, 0x27, 0xef, 0xe4, 0x7c, 0x74, 0x55, 0xdb, 0xdd, 0xcd, 0x9e, 0x5c, - 0xc0, 0x15, 0x28, 0x0d, 0x0c, 0xdd, 0x68, 0x0f, 0xda, 0x86, 0x5c, 0xd4, 0x76, 0xa0, 0x10, 0xb7, - 0x7e, 0x80, 0x6b, 0xd2, 0x3e, 0x23, 0x28, 0xcd, 0x2f, 0xe0, 0x21, 0xae, 0xf3, 0xce, 0x49, 0xdc, - 0x2b, 0x79, 0xf6, 0x0f, 0xc9, 0x5b, 0xaf, 0xce, 0x2e, 0x55, 0xe9, 0xfc, 0x52, 0x95, 0x6e, 0x2e, - 0x55, 0xf4, 0x29, 0x54, 0xd1, 0xb7, 0x50, 0x45, 0xa7, 0xa1, 0x8a, 0xce, 0x42, 0x15, 0xfd, 0x0a, - 0x55, 0x74, 0x1d, 0xaa, 0xd2, 0x4d, 0xa8, 0xa2, 0x2f, 0x57, 0xaa, 0x74, 0x76, 0xa5, 0x4a, 0xe7, - 0x57, 0xaa, 0xf4, 0x7e, 0xf1, 0xc3, 0x8d, 0x0b, 0xe2, 0xe7, 0x7a, 0xfc, 0x3b, 0x00, 0x00, 0xff, - 0xff, 0x04, 0x70, 0x7e, 0x13, 0x02, 0x05, 0x00, 0x00, + // 680 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xf6, 0xe6, 0x7f, 0xa6, 0x6e, 0x7e, 0xd6, 0xfe, 0x22, 0x61, 0xf5, 0xe0, 0xa4, 0xe6, 0x92, + 0x03, 0x0a, 0xa8, 0x08, 0x10, 0x08, 0x21, 0x39, 0xc8, 0x2d, 0x55, 0x9b, 0x3f, 0x5a, 0x3b, 0x54, + 0x70, 0x89, 0xb6, 0xe9, 0x52, 0x2c, 0xec, 0xd8, 0xd8, 0x4e, 0xd5, 0xdc, 0x38, 0x71, 0xe6, 0xcc, + 0x13, 0xf0, 0x04, 0x48, 0xbc, 0x41, 0x8f, 0x3d, 0x56, 0x1c, 0x2a, 0xea, 0x5c, 0x7a, 0xec, 0x23, + 0x20, 0xaf, 0x9d, 0xb8, 0x15, 0xea, 0xad, 0xb7, 0x99, 0xf9, 0xe6, 0x9b, 0x99, 0x9d, 0x6f, 0xb4, + 0x20, 0x8e, 0x5d, 0x3f, 0x64, 0xc7, 0x6d, 0xcf, 0x77, 0x43, 0x17, 0x57, 0x12, 0xcf, 0xdb, 0x5f, + 0xab, 0x1f, 0xba, 0x87, 0x2e, 0x0f, 0x3e, 0x8c, 0xad, 0x04, 0x57, 0x7f, 0xe6, 0x40, 0xdc, 0xf3, + 0xad, 0x90, 0x11, 0xf6, 0x79, 0xca, 0x82, 0x10, 0x0f, 0x00, 0x42, 0xcb, 0x61, 0x01, 0xf3, 0x2d, + 0x16, 0xc8, 0xa8, 0x99, 0x6f, 0xad, 0x6c, 0xd4, 0xdb, 0x8b, 0x2a, 0x6d, 0xd3, 0x72, 0x98, 0xc1, + 0xb1, 0xce, 0xda, 0xc9, 0x79, 0x43, 0xf8, 0x7d, 0xde, 0xc0, 0x03, 0x9f, 0x51, 0xdb, 0x76, 0xc7, + 0xe6, 0x92, 0x47, 0xae, 0xd5, 0xc0, 0xcf, 0xa1, 0x64, 0xb8, 0x53, 0x7f, 0xcc, 0xe4, 0x5c, 0x13, + 0xb5, 0x6a, 0x1b, 0xeb, 0x59, 0xb5, 0xeb, 0x9d, 0xdb, 0x49, 0x92, 0x3e, 0x99, 0x3a, 0x24, 0x25, + 0xe0, 0x17, 0x50, 0x71, 0x58, 0x48, 0x0f, 0x68, 0x48, 0xe5, 0x3c, 0x1f, 0x45, 0xce, 0xc8, 0x5d, + 0x16, 0xfa, 0xd6, 0xb8, 0x9b, 0xe2, 0x9d, 0xc2, 0xc9, 0x79, 0x03, 0x91, 0x65, 0x3e, 0x7e, 0x09, + 0x6b, 0xc1, 0x27, 0xcb, 0x1b, 0xd9, 0x74, 0x9f, 0xd9, 0xa3, 0x09, 0x75, 0xd8, 0xe8, 0x88, 0xda, + 0xd6, 0x01, 0x0d, 0x2d, 0x77, 0x22, 0x5f, 0x96, 0x9b, 0xa8, 0x55, 0x21, 0xf7, 0xe2, 0x94, 0xdd, + 0x38, 0xa3, 0x47, 0x1d, 0xf6, 0x76, 0x89, 0xab, 0x0d, 0x80, 0x6c, 0x1e, 0x5c, 0x86, 0xbc, 0x36, + 0xd8, 0x96, 0x04, 0x5c, 0x81, 0x02, 0x19, 0xee, 0xea, 0x12, 0x52, 0xff, 0x83, 0xd5, 0x74, 0xfa, + 0xc0, 0x73, 0x27, 0x01, 0x53, 0x7f, 0x21, 0x80, 0x6c, 0x3b, 0x58, 0x83, 0x12, 0xef, 0xbc, 0xd8, + 0xe1, 0xff, 0xd9, 0xe0, 0xbc, 0xdf, 0x80, 0x5a, 0x7e, 0xa7, 0x9e, 0xae, 0x50, 0xe4, 0x21, 0xed, + 0x80, 0x7a, 0x21, 0xf3, 0x49, 0x4a, 0xc4, 0x8f, 0xa0, 0x1c, 0x50, 0xc7, 0xb3, 0x59, 0x20, 0xe7, + 0x78, 0x0d, 0x29, 0xab, 0x61, 0x70, 0x80, 0x3f, 0x5a, 0x20, 0x8b, 0x34, 0xfc, 0x14, 0xaa, 0xec, + 0x98, 0x39, 0x9e, 0x4d, 0xfd, 0x20, 0x5d, 0x18, 0xce, 0x38, 0x7a, 0x0a, 0xa5, 0xac, 0x2c, 0x55, + 0x7d, 0x02, 0xd5, 0xe5, 0x50, 0x18, 0x43, 0x21, 0xde, 0x96, 0x8c, 0x9a, 0xa8, 0x25, 0x12, 0x6e, + 0xe3, 0x3a, 0x14, 0x8f, 0xa8, 0x3d, 0x4d, 0x24, 0x14, 0x49, 0xe2, 0xa8, 0x1a, 0x94, 0x92, 0x39, + 0x32, 0x3c, 0x26, 0xa1, 0x14, 0xc7, 0xeb, 0x20, 0xf2, 0x3b, 0x08, 0xa9, 0xe3, 0x8d, 0x9c, 0x80, + 0x93, 0xf3, 0x64, 0x65, 0x19, 0xeb, 0x06, 0xea, 0xf7, 0x1c, 0xd4, 0x6e, 0x0a, 0x89, 0x9f, 0x41, + 0x21, 0x9c, 0x79, 0x49, 0xa9, 0xda, 0xc6, 0xfd, 0xdb, 0x04, 0x4f, 0x5d, 0x73, 0xe6, 0x31, 0xc2, + 0x09, 0xf8, 0x01, 0x60, 0x87, 0xc7, 0x46, 0x1f, 0xa8, 0x63, 0xd9, 0x33, 0x2e, 0x3a, 0x6f, 0x5a, + 0x25, 0x52, 0x82, 0x6c, 0x72, 0x20, 0xd6, 0x3a, 0x7e, 0xe6, 0x47, 0x66, 0x7b, 0x72, 0x81, 0xe3, + 0xdc, 0x8e, 0x63, 0xd3, 0x89, 0x15, 0xca, 0xc5, 0x24, 0x16, 0xdb, 0xea, 0x0c, 0x20, 0xeb, 0x84, + 0x57, 0xa0, 0x3c, 0xec, 0xed, 0xf4, 0xfa, 0x7b, 0x3d, 0x49, 0x88, 0x9d, 0xd7, 0xfd, 0x61, 0xcf, + 0xd4, 0x89, 0x84, 0x70, 0x15, 0x8a, 0x5b, 0xda, 0x70, 0x4b, 0x97, 0x72, 0x78, 0x15, 0xaa, 0x6f, + 0xb6, 0x0d, 0xb3, 0xbf, 0x45, 0xb4, 0xae, 0x94, 0xc7, 0x18, 0x6a, 0x1c, 0xc9, 0x62, 0x85, 0x98, + 0x6a, 0x0c, 0xbb, 0x5d, 0x8d, 0xbc, 0x93, 0x8a, 0xf1, 0x55, 0x6d, 0xf7, 0x36, 0xfb, 0x52, 0x09, + 0x8b, 0x50, 0x31, 0x4c, 0xcd, 0xd4, 0x0d, 0xdd, 0x94, 0xca, 0xea, 0x0e, 0x94, 0x92, 0xd6, 0x77, + 0x70, 0x4d, 0xea, 0x57, 0x04, 0x95, 0xc5, 0x05, 0xdc, 0xc5, 0x75, 0xde, 0x38, 0x89, 0x5b, 0x25, + 0xcf, 0xff, 0x23, 0x79, 0xe7, 0xd5, 0xe9, 0x85, 0x22, 0x9c, 0x5d, 0x28, 0xc2, 0xd5, 0x85, 0x82, + 0xbe, 0x44, 0x0a, 0xfa, 0x11, 0x29, 0xe8, 0x24, 0x52, 0xd0, 0x69, 0xa4, 0xa0, 0x3f, 0x91, 0x82, + 0x2e, 0x23, 0x45, 0xb8, 0x8a, 0x14, 0xf4, 0x6d, 0xae, 0x08, 0xa7, 0x73, 0x45, 0x38, 0x9b, 0x2b, + 0xc2, 0xfb, 0xe5, 0x47, 0xb6, 0x5f, 0xe2, 0x3f, 0xd7, 0xe3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x5e, 0x23, 0x91, 0x5d, 0xe9, 0x04, 0x00, 0x00, } func (x WriteRequest_SourceEnum) String() string { diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto index 632422d0337..4d683fbcc0e 100644 --- a/pkg/cortexpb/cortex.proto +++ b/pkg/cortexpb/cortex.proto @@ -4,7 +4,7 @@ package cortexpb; option go_package = "cortexpb"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/pkg/distributor/distributorpb/distributor.pb.go b/pkg/distributor/distributorpb/distributor.pb.go index 8fe9bcf02cf..9711c9efe66 100644 --- a/pkg/distributor/distributorpb/distributor.pb.go +++ b/pkg/distributor/distributorpb/distributor.pb.go @@ -29,21 +29,21 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("distributor.proto", fileDescriptor_c518e33639ca565d) } var fileDescriptor_c518e33639ca565d = []byte{ - // 221 bytes of a gzipped FileDescriptorProto + // 212 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0xc9, 0x2c, 0x2e, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x46, - 0x12, 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, - 0x4f, 0xcf, 0xd7, 0x07, 0xab, 0x49, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x57, - 0xca, 0x12, 0x49, 0x79, 0x72, 0x7e, 0x51, 0x49, 0x6a, 0x45, 0x41, 0x51, 0x7e, 0x56, 0x6a, 0x72, - 0x09, 0x94, 0xa7, 0x5f, 0x90, 0x9d, 0x0e, 0x93, 0x48, 0x82, 0x32, 0x20, 0x5a, 0x8d, 0x3c, 0xb8, - 0xb8, 0x5d, 0x10, 0x16, 0x0b, 0x59, 0x72, 0xb1, 0x04, 0x94, 0x16, 0x67, 0x08, 0x89, 0xe9, 0xc1, - 0x94, 0xeb, 0x85, 0x17, 0x65, 0x96, 0xa4, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x48, 0x89, - 0x63, 0x88, 0x17, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x2a, 0x31, 0x38, 0x39, 0x5f, 0x78, 0x28, 0xc7, - 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, - 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, - 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, - 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x45, 0xf2, 0x76, 0x41, 0x52, 0x12, 0x1b, 0xd8, 0x55, 0xc6, 0x80, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0xcd, 0x1b, 0x85, 0x21, 0x01, 0x00, 0x00, + 0x12, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xeb, 0x83, 0x58, 0x10, 0x25, 0x52, 0x96, + 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xc9, 0xf9, 0x45, 0x25, 0xa9, + 0x15, 0x05, 0x45, 0xf9, 0x59, 0xa9, 0xc9, 0x25, 0x50, 0x9e, 0x7e, 0x41, 0x76, 0x3a, 0x4c, 0x22, + 0x09, 0xca, 0x80, 0x68, 0x35, 0xf2, 0xe0, 0xe2, 0x76, 0x41, 0x98, 0x2f, 0x64, 0xc9, 0xc5, 0x12, + 0x50, 0x5a, 0x9c, 0x21, 0x24, 0xa6, 0x07, 0x53, 0xae, 0x17, 0x5e, 0x94, 0x59, 0x92, 0x1a, 0x94, + 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, 0x8e, 0x21, 0x5e, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0xaa, + 0xc4, 0xe0, 0xe4, 0x7c, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, + 0x36, 0x3c, 0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, + 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f, + 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x8a, 0x17, 0xc9, 0x77, 0x05, + 0x49, 0x49, 0x6c, 0x60, 0x57, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x5b, 0x37, 0x6f, + 0x08, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/pkg/distributor/distributorpb/distributor.proto b/pkg/distributor/distributorpb/distributor.proto index d473c38be54..5319ba44a18 100644 --- a/pkg/distributor/distributorpb/distributor.proto +++ b/pkg/distributor/distributorpb/distributor.proto @@ -4,7 +4,7 @@ package distributor; option go_package = "distributorpb"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; option (gogoproto.marshaler_all) = true; diff --git a/pkg/distributor/ha_tracker.pb.go b/pkg/distributor/ha_tracker.pb.go index 0e9d494eb14..730d66d1708 100644 --- a/pkg/distributor/ha_tracker.pb.go +++ b/pkg/distributor/ha_tracker.pb.go @@ -96,21 +96,20 @@ func init() { func init() { proto.RegisterFile("ha_tracker.proto", fileDescriptor_86f0e7bcf71d860b) } var fileDescriptor_86f0e7bcf71d860b = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0x8c, 0x2f, - 0x29, 0x4a, 0x4c, 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0xc9, - 0x2c, 0x2e, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, - 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0xab, 0x49, 0x2a, 0x4d, - 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x57, 0x29, 0x9d, 0x8b, 0x3b, 0x28, 0xb5, 0x20, 0x27, - 0x33, 0x39, 0xd1, 0x25, 0xb5, 0x38, 0x59, 0x48, 0x82, 0x8b, 0xbd, 0x08, 0xc2, 0x95, 0x60, 0x54, - 0x60, 0xd4, 0xe0, 0x0c, 0x82, 0x71, 0x85, 0xe4, 0xb9, 0xb8, 0x8b, 0x52, 0x93, 0x53, 0x33, 0xcb, - 0x52, 0x53, 0xe2, 0x13, 0x4b, 0x24, 0x98, 0x14, 0x18, 0x35, 0x98, 0x83, 0xb8, 0x60, 0x42, 0x8e, - 0x25, 0x42, 0xb2, 0x5c, 0x5c, 0x29, 0xa9, 0x39, 0xa9, 0x25, 0x10, 0x79, 0x66, 0xb0, 0x3c, 0x27, - 0x54, 0xc4, 0xb1, 0xc4, 0xc9, 0xe4, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, - 0x94, 0x63, 0x6c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, - 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, 0x87, 0x47, 0x72, 0x8c, - 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0xd8, - 0x95, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xd1, 0xdd, 0x8d, 0xf5, 0x00, 0x00, 0x00, + // 206 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8e, 0x31, 0x4e, 0x80, 0x40, + 0x10, 0x45, 0x77, 0x24, 0xd1, 0xb0, 0x34, 0x66, 0x63, 0x41, 0x4c, 0x1c, 0x89, 0x15, 0x95, 0x16, + 0x7a, 0x01, 0x8c, 0x27, 0xe0, 0x02, 0x64, 0x59, 0x26, 0xb8, 0x91, 0x64, 0xc9, 0x32, 0x5a, 0x7b, + 0x04, 0x8f, 0xe1, 0x51, 0x2c, 0x29, 0x29, 0x65, 0x69, 0x2c, 0x39, 0x82, 0x11, 0xa4, 0x9b, 0xf7, + 0xde, 0x14, 0x5f, 0x9e, 0x3f, 0xeb, 0x8a, 0xbd, 0x36, 0x2f, 0xe4, 0x6f, 0x7b, 0xef, 0xd8, 0xa9, + 0xa4, 0xb1, 0x03, 0x7b, 0x5b, 0xbf, 0xb2, 0xf3, 0x97, 0x17, 0xad, 0x6b, 0xdd, 0xe6, 0xef, 0xfe, + 0xae, 0xfd, 0xe5, 0xa6, 0x95, 0x49, 0x49, 0x7d, 0x67, 0x8d, 0x7e, 0xa2, 0xc1, 0xa8, 0x54, 0x9e, + 0xf9, 0x1d, 0x53, 0xc8, 0x20, 0x8f, 0xcb, 0x03, 0xd5, 0xb5, 0x4c, 0x3c, 0x19, 0xb2, 0x6f, 0xd4, + 0x54, 0x9a, 0xd3, 0x93, 0x0c, 0xf2, 0xa8, 0x94, 0x87, 0x2a, 0x58, 0x5d, 0x49, 0xd9, 0x50, 0x47, + 0xbc, 0xf7, 0x68, 0xeb, 0xf1, 0xbf, 0x29, 0xf8, 0xf1, 0x61, 0x9c, 0x51, 0x4c, 0x33, 0x8a, 0x75, + 0x46, 0x78, 0x0f, 0x08, 0x9f, 0x01, 0xe1, 0x2b, 0x20, 0x8c, 0x01, 0xe1, 0x3b, 0x20, 0xfc, 0x04, + 0x14, 0x6b, 0x40, 0xf8, 0x58, 0x50, 0x8c, 0x0b, 0x8a, 0x69, 0x41, 0x51, 0x9f, 0x6e, 0x2b, 0xef, + 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x41, 0x50, 0x11, 0xdc, 0x00, 0x00, 0x00, } func (this *ReplicaDesc) Equal(that interface{}) bool { diff --git a/pkg/distributor/ha_tracker.proto b/pkg/distributor/ha_tracker.proto index f58758ee554..98001795785 100644 --- a/pkg/distributor/ha_tracker.proto +++ b/pkg/distributor/ha_tracker.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package distributor; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/pkg/frontend/v1/frontendv1pb/frontend.pb.go b/pkg/frontend/v1/frontendv1pb/frontend.pb.go index 6e8a9be191b..59a3faf3654 100644 --- a/pkg/frontend/v1/frontendv1pb/frontend.pb.go +++ b/pkg/frontend/v1/frontendv1pb/frontend.pb.go @@ -259,40 +259,39 @@ func init() { func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } var fileDescriptor_eca3873955a29cfe = []byte{ - // 515 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x33, 0xba, 0xae, 0xf5, 0xb5, 0x94, 0x30, 0xa8, 0x94, 0x28, 0x63, 0x09, 0x2a, 0x45, - 0x30, 0xd1, 0x2a, 0x88, 0xa2, 0x20, 0x75, 0xeb, 0xba, 0x17, 0xd9, 0x4d, 0xe3, 0xc5, 0xcb, 0xd2, - 0xa4, 0xd3, 0xb4, 0xee, 0x36, 0x93, 0x9d, 0x4c, 0xb6, 0xf6, 0xe6, 0x47, 0x10, 0xfc, 0x12, 0x7e, - 0x06, 0x3f, 0x81, 0xc7, 0x1e, 0x17, 0x0f, 0x62, 0xd3, 0x8b, 0xc7, 0xfd, 0x08, 0xd2, 0x99, 0x34, - 0x9b, 0x96, 0x45, 0xf1, 0x32, 0xcc, 0xcb, 0x7b, 0xff, 0xf7, 0x7e, 0xef, 0x9f, 0x81, 0x6a, 0x9f, - 0xb3, 0x50, 0xd0, 0xb0, 0x67, 0x45, 0x9c, 0x09, 0x86, 0x4b, 0xcb, 0xd8, 0xb8, 0x1f, 0x0c, 0xc5, - 0x20, 0xf1, 0x2c, 0x9f, 0x8d, 0xec, 0x80, 0x05, 0xcc, 0x96, 0x05, 0x5e, 0xd2, 0x97, 0x91, 0x0c, - 0xe4, 0x4d, 0x09, 0x8d, 0xc7, 0x85, 0xf2, 0x31, 0xed, 0x1e, 0xd3, 0x31, 0xe3, 0x07, 0xb1, 0xed, - 0xb3, 0xd1, 0x88, 0x85, 0xf6, 0x40, 0x88, 0x28, 0xe0, 0x91, 0x9f, 0x5f, 0x32, 0xd5, 0x8b, 0x82, - 0xca, 0x67, 0x5c, 0xd0, 0x8f, 0x11, 0x67, 0x1f, 0xa8, 0x2f, 0xb2, 0xc8, 0x8e, 0x0e, 0x02, 0xfb, - 0x28, 0xa1, 0x7c, 0x48, 0xb9, 0x1d, 0x8b, 0xae, 0x88, 0xd5, 0xa9, 0xe4, 0xe6, 0x17, 0x04, 0xfa, - 0xeb, 0x0c, 0xd8, 0x65, 0xaf, 0x0e, 0x87, 0x34, 0x14, 0xf8, 0x09, 0x94, 0x17, 0x53, 0x1c, 0x7a, - 0x94, 0xd0, 0x58, 0xd4, 0x50, 0x1d, 0x35, 0xca, 0xcd, 0x6b, 0x56, 0x3e, 0xf9, 0x8d, 0xeb, 0xee, - 0x66, 0x49, 0xa7, 0x58, 0x89, 0x4d, 0xd8, 0x10, 0x93, 0x88, 0xd6, 0x2e, 0xd4, 0x51, 0xa3, 0xda, - 0xac, 0x5a, 0xb9, 0x35, 0xee, 0x24, 0xa2, 0x8e, 0xcc, 0x61, 0x13, 0x2a, 0x12, 0xa0, 0x1d, 0x76, - 0xbd, 0x43, 0xda, 0xab, 0x5d, 0xac, 0xa3, 0x46, 0xc9, 0x59, 0xf9, 0x66, 0x4e, 0x11, 0xe8, 0x8a, - 0xc5, 0x65, 0x4b, 0x3a, 0xfc, 0x0c, 0x2a, 0x6a, 0x56, 0x1c, 0xb1, 0x30, 0xa6, 0x19, 0xd6, 0xf5, - 0x75, 0x2c, 0x95, 0x75, 0x56, 0x6a, 0xb1, 0x01, 0x25, 0x5f, 0xf6, 0xdb, 0xd9, 0x92, 0x70, 0x57, - 0x9c, 0x3c, 0xc6, 0x3d, 0xb8, 0x24, 0x87, 0x4b, 0x92, 0x72, 0xb3, 0x62, 0x29, 0x7f, 0x3a, 0x8b, - 0xb3, 0xf5, 0xf2, 0xc7, 0xcf, 0x5b, 0xcf, 0xff, 0xdb, 0x62, 0x6b, 0x2f, 0xa1, 0x7c, 0x22, 0x3b, - 0x38, 0xaa, 0xb9, 0xf9, 0x14, 0x6e, 0xbc, 0x65, 0x62, 0xd8, 0x9f, 0xa8, 0xbd, 0x3a, 0x83, 0x44, - 0xf4, 0xd8, 0x38, 0x5c, 0x3a, 0x57, 0x04, 0x44, 0xab, 0x80, 0x26, 0x81, 0x9b, 0xe7, 0x4b, 0xd5, - 0x72, 0xf7, 0x6e, 0xc3, 0xc6, 0xc2, 0x5f, 0xac, 0x43, 0x65, 0x61, 0xc1, 0xbe, 0xd3, 0xde, 0x7b, - 0xd7, 0xee, 0xb8, 0xba, 0x86, 0x01, 0x36, 0xb7, 0xdb, 0xee, 0xfe, 0xce, 0x96, 0x8e, 0x9a, 0xdf, - 0x10, 0x94, 0x72, 0x2f, 0xb7, 0xe1, 0xf2, 0x2e, 0x67, 0x3e, 0x8d, 0x63, 0x6c, 0x9c, 0xfd, 0xa5, - 0x75, 0xcb, 0x8d, 0x42, 0x6e, 0xfd, 0x91, 0x98, 0x5a, 0x03, 0x3d, 0x40, 0x98, 0xc2, 0xd5, 0xf3, - 0xd8, 0xf0, 0x9d, 0x33, 0xe5, 0x5f, 0xd6, 0x36, 0xee, 0xfe, 0xab, 0x4c, 0xad, 0xd8, 0x6a, 0x4d, - 0x67, 0x44, 0x3b, 0x99, 0x11, 0xed, 0x74, 0x46, 0xd0, 0xa7, 0x94, 0xa0, 0xaf, 0x29, 0x41, 0xdf, - 0x53, 0x82, 0xa6, 0x29, 0x41, 0xbf, 0x52, 0x82, 0x7e, 0xa7, 0x44, 0x3b, 0x4d, 0x09, 0xfa, 0x3c, - 0x27, 0xda, 0x74, 0x4e, 0xb4, 0x93, 0x39, 0xd1, 0xde, 0x57, 0x96, 0xcd, 0x8f, 0x1f, 0x46, 0x9e, - 0xb7, 0x29, 0x5f, 0xfc, 0xa3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0x79, 0xcf, 0x36, 0xb1, - 0x03, 0x00, 0x00, + // 508 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xbd, 0x50, 0x4a, 0x98, 0x58, 0x91, 0xb5, 0x2a, 0x28, 0x32, 0x68, 0x89, 0x2c, 0x40, + 0x11, 0x07, 0x1b, 0x02, 0x12, 0x02, 0x81, 0x84, 0x42, 0x43, 0xe9, 0x05, 0xb5, 0x8e, 0xb9, 0x70, + 0xa9, 0x12, 0x67, 0xeb, 0x84, 0x36, 0x5e, 0x77, 0xbd, 0x6e, 0xc8, 0x8d, 0x47, 0x40, 0xe2, 0x25, + 0x78, 0x06, 0x9e, 0x80, 0x63, 0x8e, 0x15, 0x07, 0x44, 0x9c, 0x0b, 0xc7, 0x3e, 0x02, 0xf2, 0xae, + 0xe3, 0x26, 0x51, 0x04, 0xe2, 0x62, 0xed, 0xec, 0xcc, 0x3f, 0xf3, 0xcd, 0xef, 0x85, 0xca, 0x21, + 0x67, 0xa1, 0xa0, 0x61, 0xcf, 0x8e, 0x38, 0x13, 0x0c, 0x97, 0xe6, 0xb1, 0xb9, 0x15, 0xb0, 0x80, + 0xc9, 0x4b, 0x27, 0x3b, 0xa9, 0xbc, 0xf9, 0x38, 0x18, 0x88, 0x7e, 0xd2, 0xb5, 0x7d, 0x36, 0x74, + 0x46, 0xb4, 0x73, 0x4a, 0x47, 0x8c, 0x1f, 0xc5, 0x8e, 0xcf, 0x86, 0x43, 0x16, 0x3a, 0x7d, 0x21, + 0xa2, 0x80, 0x47, 0x7e, 0x71, 0xc8, 0x55, 0x2f, 0x16, 0x54, 0x3e, 0xe3, 0x82, 0x7e, 0x8c, 0x38, + 0xfb, 0x40, 0x7d, 0x91, 0x47, 0x4e, 0x74, 0x14, 0x38, 0x27, 0x09, 0xe5, 0x03, 0xca, 0x9d, 0x58, + 0x74, 0x44, 0xac, 0xbe, 0x4a, 0x6e, 0x7d, 0x41, 0x60, 0xbc, 0xce, 0xb9, 0x3c, 0xf6, 0xea, 0x78, + 0x40, 0x43, 0x81, 0x9f, 0x40, 0x39, 0x9b, 0xe2, 0xd2, 0x93, 0x84, 0xc6, 0xa2, 0x8a, 0x6a, 0xa8, + 0x5e, 0x6e, 0x5c, 0xb7, 0x8b, 0xc9, 0x6f, 0x3c, 0x6f, 0x2f, 0x4f, 0xba, 0x8b, 0x95, 0xd8, 0x82, + 0x0d, 0x31, 0x8e, 0x68, 0xf5, 0x52, 0x0d, 0xd5, 0x2b, 0x8d, 0x8a, 0x5d, 0x38, 0xe0, 0x8d, 0x23, + 0xea, 0xca, 0x1c, 0xb6, 0x40, 0x97, 0x00, 0xad, 0xb0, 0xd3, 0x3d, 0xa6, 0xbd, 0xea, 0xe5, 0x1a, + 0xaa, 0x97, 0xdc, 0xa5, 0x3b, 0x6b, 0x82, 0xc0, 0x50, 0x2c, 0x1e, 0x9b, 0xd3, 0xe1, 0x67, 0xa0, + 0xab, 0x59, 0x71, 0xc4, 0xc2, 0x98, 0xe6, 0x58, 0x37, 0x56, 0xb1, 0x54, 0xd6, 0x5d, 0xaa, 0xc5, + 0x26, 0x94, 0x7c, 0xd9, 0x6f, 0x77, 0x5b, 0xc2, 0x5d, 0x73, 0x8b, 0x18, 0xf7, 0xe0, 0x8a, 0x1c, + 0x2e, 0x49, 0xca, 0x0d, 0xdd, 0x56, 0xfe, 0xb4, 0xb3, 0x6f, 0xf3, 0xe5, 0x8f, 0x9f, 0xb7, 0x9f, + 0xff, 0xb7, 0xc5, 0xf6, 0x7e, 0x42, 0xf9, 0x58, 0x76, 0x70, 0x55, 0x73, 0xeb, 0x29, 0xdc, 0x7c, + 0xcb, 0xc4, 0xe0, 0x70, 0xac, 0xf6, 0x6a, 0xf7, 0x13, 0xd1, 0x63, 0xa3, 0x70, 0xee, 0xdc, 0x22, + 0x20, 0x5a, 0x06, 0xb4, 0x08, 0xdc, 0x5a, 0x2f, 0x55, 0xcb, 0xdd, 0xbf, 0x03, 0x1b, 0x99, 0xbf, + 0xd8, 0x00, 0x3d, 0xb3, 0xe0, 0xc0, 0x6d, 0xed, 0xbf, 0x6b, 0xb5, 0x3d, 0x43, 0xc3, 0x00, 0x9b, + 0x3b, 0x2d, 0xef, 0x60, 0x77, 0xdb, 0x40, 0x8d, 0x6f, 0x08, 0x4a, 0x85, 0x97, 0x3b, 0x70, 0x75, + 0x8f, 0x33, 0x9f, 0xc6, 0x31, 0x36, 0x2f, 0xfe, 0xd2, 0xaa, 0xe5, 0xe6, 0x42, 0x6e, 0xf5, 0x91, + 0x58, 0x5a, 0x1d, 0x3d, 0x40, 0x98, 0xc2, 0xd6, 0x3a, 0x36, 0x7c, 0xf7, 0x42, 0xf9, 0x97, 0xb5, + 0xcd, 0x7b, 0xff, 0x2a, 0x53, 0x2b, 0x36, 0x9b, 0x93, 0x29, 0xd1, 0xce, 0xa6, 0x44, 0x3b, 0x9f, + 0x12, 0xf4, 0x29, 0x25, 0xe8, 0x6b, 0x4a, 0xd0, 0xf7, 0x94, 0xa0, 0x49, 0x4a, 0xd0, 0xaf, 0x94, + 0xa0, 0xdf, 0x29, 0xd1, 0xce, 0x53, 0x82, 0x3e, 0xcf, 0x88, 0x36, 0x99, 0x11, 0xed, 0x6c, 0x46, + 0xb4, 0xf7, 0xfa, 0xbc, 0xf9, 0xe9, 0xc3, 0xa8, 0xdb, 0xdd, 0x94, 0x2f, 0xfe, 0xd1, 0x9f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x34, 0x74, 0x3e, 0x36, 0x98, 0x03, 0x00, 0x00, } func (x Type) String() string { diff --git a/pkg/frontend/v1/frontendv1pb/frontend.proto b/pkg/frontend/v1/frontendv1pb/frontend.proto index 56bc4d52706..603aeafef97 100644 --- a/pkg/frontend/v1/frontendv1pb/frontend.proto +++ b/pkg/frontend/v1/frontendv1pb/frontend.proto @@ -6,7 +6,7 @@ package frontend; option go_package = "frontendv1pb"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; import "github.com/cortexproject/cortex/pkg/querier/stats/stats.proto"; diff --git a/pkg/frontend/v2/frontendv2pb/frontend.pb.go b/pkg/frontend/v2/frontendv2pb/frontend.pb.go index b5ab1a79350..451729cc6eb 100644 --- a/pkg/frontend/v2/frontendv2pb/frontend.pb.go +++ b/pkg/frontend/v2/frontendv2pb/frontend.pb.go @@ -127,30 +127,30 @@ func init() { func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } var fileDescriptor_eca3873955a29cfe = []byte{ - // 367 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xbd, 0x4e, 0x32, 0x41, - 0x14, 0xdd, 0xf9, 0x7e, 0x93, 0x81, 0x58, 0x8c, 0xd1, 0x6c, 0x28, 0x46, 0xa4, 0xa2, 0x71, 0x27, - 0x41, 0x2b, 0xa3, 0x89, 0x21, 0x86, 0x68, 0x27, 0x2b, 0x95, 0x1d, 0xbb, 0x0c, 0xcb, 0x8f, 0xec, - 0x0c, 0x33, 0xb3, 0x20, 0x9d, 0x8f, 0xe0, 0x63, 0xf8, 0x28, 0x96, 0x5b, 0x12, 0x0b, 0x23, 0x43, - 0x63, 0xc9, 0x23, 0x18, 0x66, 0x80, 0x2c, 0x31, 0x31, 0xb1, 0xb9, 0xb9, 0x27, 0x7b, 0xce, 0xbd, - 0xe7, 0xee, 0x19, 0xb8, 0xd3, 0x16, 0x2c, 0x56, 0x34, 0x6e, 0x79, 0x5c, 0x30, 0xc5, 0x50, 0x7e, - 0x8d, 0x47, 0x15, 0x1e, 0x14, 0x8e, 0xa2, 0xae, 0xea, 0x24, 0x81, 0x17, 0xb2, 0x01, 0x89, 0x58, - 0xc4, 0x88, 0x21, 0x05, 0x49, 0xdb, 0x20, 0x03, 0x4c, 0x67, 0xc5, 0x85, 0x93, 0x0c, 0x7d, 0x4c, - 0x9b, 0x23, 0x3a, 0x66, 0xa2, 0x2f, 0x49, 0xc8, 0x06, 0x03, 0x16, 0x93, 0x8e, 0x52, 0x3c, 0x12, - 0x3c, 0xdc, 0x34, 0x2b, 0xd5, 0x79, 0x46, 0x15, 0x32, 0xa1, 0xe8, 0x03, 0x17, 0xac, 0x47, 0x43, - 0xb5, 0x42, 0x84, 0xf7, 0x23, 0x32, 0x4c, 0xa8, 0xe8, 0x52, 0x41, 0xa4, 0x6a, 0x2a, 0x69, 0xab, - 0x95, 0x97, 0x52, 0x00, 0x51, 0x3d, 0xa1, 0x62, 0xe2, 0x53, 0x99, 0xdc, 0x2b, 0x9f, 0x0e, 0x13, - 0x2a, 0x15, 0x72, 0xe1, 0xff, 0xa5, 0x66, 0x72, 0x7d, 0xe9, 0x82, 0x22, 0x28, 0xff, 0xf1, 0xd7, - 0x10, 0x9d, 0xc2, 0xfc, 0xd2, 0x81, 0x4f, 0x25, 0x67, 0xb1, 0xa4, 0xee, 0xaf, 0x22, 0x28, 0xe7, - 0x2a, 0xfb, 0xde, 0xc6, 0xd6, 0x55, 0xa3, 0x71, 0xb3, 0xfe, 0xea, 0x6f, 0x71, 0x51, 0x0b, 0xfe, - 0x35, 0xbb, 0xdd, 0xdf, 0x46, 0x94, 0xf7, 0xac, 0x93, 0xdb, 0x65, 0xad, 0x5e, 0xbc, 0xbe, 0x1d, - 0x9c, 0xfd, 0xf8, 0x18, 0xcf, 0x98, 0x37, 0x13, 0x7c, 0x3b, 0xbc, 0xb4, 0x07, 0x77, 0xb7, 0x2e, - 0xb2, 0xcb, 0x2b, 0x3d, 0x88, 0x6a, 0xab, 0x74, 0x6a, 0x4c, 0xd4, 0xed, 0x10, 0xd4, 0x80, 0xb9, - 0x0c, 0x19, 0x15, 0xbd, 0x6c, 0x82, 0xde, 0xd7, 0x3f, 0x53, 0x38, 0xfc, 0x86, 0x61, 0x37, 0x95, - 0x9c, 0x6a, 0x35, 0x9d, 0x61, 0x67, 0x3a, 0xc3, 0xce, 0x62, 0x86, 0xc1, 0xa3, 0xc6, 0xe0, 0x59, - 0x63, 0xf0, 0xa2, 0x31, 0x48, 0x35, 0x06, 0xef, 0x1a, 0x83, 0x0f, 0x8d, 0x9d, 0x85, 0xc6, 0xe0, - 0x69, 0x8e, 0x9d, 0x74, 0x8e, 0x9d, 0xe9, 0x1c, 0x3b, 0x77, 0x5b, 0xaf, 0x27, 0xf8, 0x67, 0x02, - 0x3a, 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x31, 0xd0, 0x9a, 0x67, 0x64, 0x02, 0x00, 0x00, + // 359 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xcd, 0x4e, 0x02, 0x31, + 0x10, 0xde, 0xfa, 0x9b, 0x14, 0xe2, 0xa1, 0xfe, 0x64, 0xc3, 0xa1, 0x22, 0x27, 0x4e, 0x6d, 0x82, + 0x9e, 0x8c, 0x26, 0x86, 0x18, 0xa2, 0x37, 0x59, 0x39, 0x79, 0x83, 0xa5, 0x2e, 0x3f, 0xb2, 0x2d, + 0x6d, 0x17, 0xe4, 0xe6, 0x23, 0xf8, 0x18, 0x3e, 0x8a, 0xc7, 0x3d, 0x12, 0x0f, 0x46, 0xca, 0xc5, + 0x23, 0x8f, 0x60, 0x76, 0x0b, 0x04, 0x62, 0x62, 0xe2, 0x65, 0x32, 0x93, 0xf9, 0xbe, 0x99, 0x6f, + 0xfa, 0x15, 0xee, 0x3d, 0x4a, 0x1e, 0x6a, 0x16, 0x36, 0x89, 0x90, 0x5c, 0x73, 0x94, 0x5d, 0xd4, + 0x83, 0x92, 0x68, 0xe4, 0x0e, 0x02, 0x1e, 0xf0, 0xb4, 0x41, 0x93, 0xcc, 0x62, 0x72, 0x67, 0x41, + 0x5b, 0xb7, 0xa2, 0x06, 0xf1, 0x79, 0x8f, 0x0e, 0x59, 0x7d, 0xc0, 0x86, 0x5c, 0x76, 0x15, 0xf5, + 0x79, 0xaf, 0xc7, 0x43, 0xda, 0xd2, 0x5a, 0x04, 0x52, 0xf8, 0xcb, 0x64, 0xce, 0xba, 0x5c, 0x61, + 0xf9, 0x5c, 0x6a, 0xf6, 0x2c, 0x24, 0xef, 0x30, 0x5f, 0xcf, 0x2b, 0x2a, 0xba, 0x01, 0xed, 0x47, + 0x4c, 0xb6, 0x99, 0xa4, 0x4a, 0xd7, 0xb5, 0xb2, 0xd1, 0xd2, 0x0b, 0x31, 0x80, 0xa8, 0x1a, 0x31, + 0x39, 0xf2, 0x98, 0x8a, 0x9e, 0xb4, 0xc7, 0xfa, 0x11, 0x53, 0x1a, 0xb9, 0x70, 0x37, 0xe1, 0x8c, + 0x6e, 0xaf, 0x5d, 0x90, 0x07, 0xc5, 0x2d, 0x6f, 0x51, 0xa2, 0x73, 0x98, 0x4d, 0x14, 0x78, 0x4c, + 0x09, 0x1e, 0x2a, 0xe6, 0x6e, 0xe4, 0x41, 0x31, 0x53, 0x3a, 0x22, 0x4b, 0x59, 0x37, 0xb5, 0xda, + 0xdd, 0xa2, 0xeb, 0xad, 0x61, 0x51, 0x13, 0x6e, 0xa7, 0xbb, 0xdd, 0xcd, 0x94, 0x94, 0x25, 0x56, + 0xc9, 0x7d, 0x12, 0xcb, 0x57, 0x1f, 0x9f, 0xc7, 0x17, 0xff, 0x3e, 0x86, 0xa4, 0xe2, 0xd3, 0x09, + 0x9e, 0x1d, 0x5e, 0x38, 0x84, 0xfb, 0x6b, 0x17, 0xd9, 0xe5, 0xa5, 0x0e, 0x44, 0x95, 0xb9, 0x09, + 0x15, 0x2e, 0xab, 0x76, 0x08, 0xaa, 0xc1, 0xcc, 0x0a, 0x18, 0xe5, 0xc9, 0xaa, 0x51, 0xe4, 0xf7, + 0xcb, 0xe4, 0x4e, 0xfe, 0x40, 0xd8, 0x4d, 0x05, 0xa7, 0x5c, 0x8e, 0x27, 0xd8, 0x19, 0x4f, 0xb0, + 0x33, 0x9b, 0x60, 0xf0, 0x62, 0x30, 0x78, 0x33, 0x18, 0xbc, 0x1b, 0x0c, 0x62, 0x83, 0xc1, 0x97, + 0xc1, 0xe0, 0xdb, 0x60, 0x67, 0x66, 0x30, 0x78, 0x9d, 0x62, 0x27, 0x9e, 0x62, 0x67, 0x3c, 0xc5, + 0xce, 0xc3, 0xda, 0x27, 0x69, 0xec, 0xa4, 0x06, 0x9d, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xa4, + 0xdf, 0xd6, 0xe9, 0x4b, 0x02, 0x00, 0x00, } func (this *QueryResultRequest) Equal(that interface{}) bool { diff --git a/pkg/frontend/v2/frontendv2pb/frontend.proto b/pkg/frontend/v2/frontendv2pb/frontend.proto index 555c249d405..738497e4f73 100644 --- a/pkg/frontend/v2/frontendv2pb/frontend.proto +++ b/pkg/frontend/v2/frontendv2pb/frontend.proto @@ -4,7 +4,7 @@ package frontendv2pb; option go_package = "frontendv2pb"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; import "github.com/cortexproject/cortex/pkg/querier/stats/stats.proto"; diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go index 67ebbd01ab5..29e03f9009c 100644 --- a/pkg/ingester/client/ingester.pb.go +++ b/pkg/ingester/client/ingester.pb.go @@ -1452,89 +1452,89 @@ func init() { func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } var fileDescriptor_60f6df4f3586b478 = []byte{ - // 1312 bytes of a gzipped FileDescriptorProto + // 1304 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0xd4, 0xd6, - 0x13, 0xf7, 0x4b, 0x76, 0x97, 0xec, 0xec, 0x66, 0xd9, 0xbc, 0x00, 0x59, 0xcc, 0x17, 0x07, 0xfc, - 0x15, 0x6d, 0xd4, 0x96, 0x04, 0xd2, 0x1f, 0x82, 0xfe, 0x42, 0x09, 0x04, 0x48, 0x21, 0x04, 0x9c, - 0x40, 0xab, 0x4a, 0x95, 0xe5, 0xec, 0x3e, 0x12, 0x17, 0xff, 0xc2, 0x7e, 0x46, 0xd0, 0x53, 0xa5, - 0xfe, 0x01, 0xad, 0x7a, 0xea, 0xa9, 0x52, 0x6f, 0x3d, 0xf7, 0x0f, 0xe8, 0x99, 0x23, 0x47, 0x54, - 0x55, 0xa8, 0x2c, 0x97, 0x1e, 0xe9, 0x7f, 0x50, 0xf9, 0xfd, 0xf0, 0xda, 0x8e, 0x37, 0x09, 0x12, - 0xe9, 0x6d, 0x3d, 0xf3, 0x99, 0xcf, 0xcc, 0x9b, 0x99, 0xf7, 0x66, 0x16, 0x5a, 0xb6, 0xb7, 0x49, - 0x22, 0x4a, 0xc2, 0xd9, 0x20, 0xf4, 0xa9, 0x8f, 0x6b, 0x5d, 0x3f, 0xa4, 0xe4, 0xa1, 0x7a, 0x7a, - 0xd3, 0xa6, 0x5b, 0xf1, 0xc6, 0x6c, 0xd7, 0x77, 0xe7, 0x36, 0xfd, 0x4d, 0x7f, 0x8e, 0xa9, 0x37, - 0xe2, 0xbb, 0xec, 0x8b, 0x7d, 0xb0, 0x5f, 0xdc, 0x4c, 0x3d, 0x9f, 0x81, 0x73, 0x86, 0x20, 0xf4, - 0xbf, 0x26, 0x5d, 0x2a, 0xbe, 0xe6, 0x82, 0x7b, 0x9b, 0x52, 0xb1, 0x21, 0x7e, 0x70, 0x53, 0xfd, - 0x13, 0x68, 0x18, 0xc4, 0xea, 0x19, 0xe4, 0x7e, 0x4c, 0x22, 0x8a, 0x67, 0xe1, 0xc0, 0xfd, 0x98, - 0x84, 0x36, 0x89, 0x3a, 0xe8, 0xc4, 0xe8, 0x4c, 0x63, 0xfe, 0xd0, 0xac, 0x80, 0xdf, 0x8a, 0x49, - 0xf8, 0x48, 0xc0, 0x0c, 0x09, 0xd2, 0x2f, 0x40, 0x93, 0x9b, 0x47, 0x81, 0xef, 0x45, 0x04, 0xcf, - 0xc1, 0x81, 0x90, 0x44, 0xb1, 0x43, 0xa5, 0xfd, 0xe1, 0x82, 0x3d, 0xc7, 0x19, 0x12, 0xa5, 0xff, - 0x84, 0xa0, 0x99, 0xa5, 0xc6, 0xef, 0x00, 0x8e, 0xa8, 0x15, 0x52, 0x93, 0xda, 0x2e, 0x89, 0xa8, - 0xe5, 0x06, 0xa6, 0x9b, 0x90, 0xa1, 0x99, 0x51, 0xa3, 0xcd, 0x34, 0xeb, 0x52, 0xb1, 0x12, 0xe1, - 0x19, 0x68, 0x13, 0xaf, 0x97, 0xc7, 0x8e, 0x30, 0x6c, 0x8b, 0x78, 0xbd, 0x2c, 0xf2, 0x0c, 0x8c, - 0xb9, 0x16, 0xed, 0x6e, 0x91, 0x30, 0xea, 0x8c, 0xe6, 0x8f, 0x76, 0xdd, 0xda, 0x20, 0xce, 0x0a, - 0x57, 0x1a, 0x29, 0x4a, 0xff, 0x05, 0xc1, 0xa1, 0xa5, 0x87, 0xc4, 0x0d, 0x1c, 0x2b, 0xfc, 0x4f, - 0x42, 0x3c, 0xbb, 0x2d, 0xc4, 0xc3, 0x65, 0x21, 0x46, 0x99, 0x18, 0xaf, 0xc1, 0x78, 0x2e, 0xb1, - 0xf8, 0x43, 0x00, 0xe6, 0xa9, 0xac, 0x86, 0xc1, 0xc6, 0x6c, 0xe2, 0x6e, 0x8d, 0xe9, 0x16, 0x2b, - 0x8f, 0x9f, 0x4d, 0x2b, 0x46, 0x06, 0xad, 0xff, 0x88, 0x60, 0x92, 0xb1, 0xad, 0xd1, 0x90, 0x58, - 0x6e, 0xca, 0x79, 0x01, 0x1a, 0xdd, 0xad, 0xd8, 0xbb, 0x97, 0x23, 0x9d, 0x92, 0xa1, 0x0d, 0x28, - 0x2f, 0x26, 0x20, 0xc1, 0x9b, 0xb5, 0x28, 0x04, 0x35, 0xf2, 0x4a, 0x41, 0xad, 0xc1, 0xe1, 0x42, - 0x11, 0x5e, 0xc3, 0x49, 0x7f, 0x47, 0x80, 0x59, 0x4a, 0xef, 0x58, 0x4e, 0x4c, 0x22, 0x59, 0xd8, - 0xe3, 0x00, 0x4e, 0x22, 0x35, 0x3d, 0xcb, 0x25, 0xac, 0xa0, 0x75, 0xa3, 0xce, 0x24, 0x37, 0x2c, - 0x97, 0x0c, 0xa9, 0xfb, 0xc8, 0x2b, 0xd4, 0x7d, 0x74, 0xd7, 0xba, 0x57, 0x4e, 0xa0, 0xbd, 0xd4, - 0xfd, 0x1c, 0x4c, 0xe6, 0xe2, 0x17, 0x39, 0x39, 0x09, 0x4d, 0x7e, 0x80, 0x07, 0x4c, 0xce, 0xb2, - 0x52, 0x37, 0x1a, 0xce, 0x00, 0xaa, 0x7f, 0x0a, 0x47, 0x33, 0x96, 0x85, 0x4a, 0xef, 0xc1, 0xfe, - 0x1e, 0x4c, 0x5c, 0x97, 0x19, 0x89, 0xf6, 0xf9, 0x46, 0xe8, 0xef, 0x8b, 0x32, 0x09, 0x67, 0x22, - 0xca, 0x69, 0x68, 0x0c, 0xca, 0x24, 0x83, 0x84, 0xb4, 0x4e, 0x91, 0xfe, 0x11, 0x74, 0x06, 0x66, - 0x85, 0x23, 0xee, 0x6a, 0x8c, 0xa1, 0x7d, 0x3b, 0x22, 0xe1, 0x1a, 0xb5, 0xa8, 0x3c, 0x9f, 0xfe, - 0x27, 0x82, 0x89, 0x8c, 0x50, 0x50, 0x9d, 0x92, 0xef, 0xb7, 0xed, 0x7b, 0x66, 0x68, 0x51, 0xde, - 0x32, 0xc8, 0x18, 0x4f, 0xa5, 0x86, 0x45, 0x49, 0xd2, 0x55, 0x5e, 0xec, 0x9a, 0x69, 0xf7, 0xa3, - 0x99, 0x8a, 0x51, 0xf7, 0x62, 0x97, 0x77, 0x67, 0x92, 0x3b, 0x2b, 0xb0, 0xcd, 0x02, 0xd3, 0x28, - 0x63, 0x6a, 0x5b, 0x81, 0xbd, 0x9c, 0x23, 0x9b, 0x85, 0xc9, 0x30, 0x76, 0x48, 0x11, 0x5e, 0x61, - 0xf0, 0x89, 0x44, 0x95, 0xc7, 0xff, 0x1f, 0xc6, 0xad, 0x2e, 0xb5, 0x1f, 0x10, 0xe9, 0xbf, 0xca, - 0xfc, 0x37, 0xb9, 0x90, 0x87, 0xa0, 0x7f, 0x05, 0x93, 0xc9, 0xe9, 0x96, 0x2f, 0xe5, 0xcf, 0x37, - 0x05, 0x07, 0xe2, 0x88, 0x84, 0xa6, 0xdd, 0x13, 0x77, 0xa1, 0x96, 0x7c, 0x2e, 0xf7, 0xf0, 0x69, - 0xa8, 0xf4, 0x2c, 0x6a, 0xb1, 0xb3, 0x34, 0xe6, 0x8f, 0xca, 0x66, 0xdd, 0x96, 0x21, 0x83, 0xc1, - 0xf4, 0x2b, 0x80, 0x13, 0x55, 0x94, 0x67, 0x3f, 0x0b, 0xd5, 0x28, 0x11, 0x88, 0xab, 0x7b, 0x2c, - 0xcb, 0x52, 0x88, 0xc4, 0xe0, 0x48, 0xfd, 0x37, 0x04, 0xda, 0x0a, 0xa1, 0xa1, 0xdd, 0x8d, 0x2e, - 0xfb, 0x61, 0xfe, 0x6e, 0xec, 0xf3, 0xdb, 0x7c, 0x0e, 0x9a, 0xf2, 0xf2, 0x99, 0x11, 0xa1, 0x3b, - 0xbf, 0xcf, 0x0d, 0x09, 0x5d, 0x23, 0x54, 0xbf, 0x06, 0xd3, 0x43, 0x63, 0x16, 0xa9, 0x98, 0x81, - 0x9a, 0xcb, 0x20, 0x22, 0x17, 0xed, 0xc1, 0x33, 0xc6, 0x4d, 0x0d, 0xa1, 0xd7, 0x6f, 0xc1, 0xa9, - 0x21, 0x64, 0x85, 0x36, 0xdf, 0x3b, 0x65, 0x07, 0x8e, 0x08, 0xca, 0x15, 0x42, 0xad, 0xa4, 0x60, - 0xb2, 0xeb, 0x57, 0x61, 0x6a, 0x9b, 0x46, 0xd0, 0xbf, 0x07, 0x63, 0xae, 0x90, 0x09, 0x07, 0x9d, - 0xa2, 0x83, 0xd4, 0x26, 0x45, 0xea, 0xff, 0x20, 0x38, 0x58, 0x18, 0x17, 0x49, 0x09, 0xee, 0x86, - 0xbe, 0x6b, 0xca, 0x4d, 0x68, 0xd0, 0x6d, 0xad, 0x44, 0xbe, 0x2c, 0xc4, 0xcb, 0xbd, 0x6c, 0x3b, - 0x8e, 0xe4, 0xda, 0xd1, 0x83, 0x1a, 0xbb, 0xbf, 0x72, 0x6a, 0x4e, 0x0e, 0x42, 0x61, 0x29, 0xba, - 0x69, 0xd9, 0xe1, 0xe2, 0x42, 0x32, 0x04, 0xfe, 0x78, 0x36, 0xfd, 0x4a, 0xbb, 0x12, 0xb7, 0x5f, - 0xe8, 0x59, 0x01, 0x25, 0xa1, 0x21, 0xbc, 0xe0, 0xb7, 0xa1, 0xc6, 0xa7, 0x5b, 0xa7, 0xc2, 0xfc, - 0x8d, 0xcb, 0x2e, 0xc8, 0x0e, 0x40, 0x01, 0xd1, 0xbf, 0x47, 0x50, 0xe5, 0x27, 0xdd, 0xaf, 0xd6, - 0x54, 0x61, 0x8c, 0x78, 0x5d, 0xbf, 0x67, 0x7b, 0x9b, 0xec, 0xd9, 0xa8, 0x1a, 0xe9, 0x37, 0xc6, - 0xe2, 0xa6, 0x26, 0xef, 0x43, 0x53, 0x5c, 0xc7, 0x05, 0x18, 0xcf, 0x75, 0x4e, 0x6e, 0x35, 0x42, - 0x7b, 0x5a, 0x8d, 0x4c, 0x68, 0x66, 0x35, 0xf8, 0x14, 0x54, 0xe8, 0xa3, 0x80, 0xbf, 0x7f, 0xad, - 0xf9, 0x09, 0x69, 0xcd, 0xd4, 0xeb, 0x8f, 0x02, 0x62, 0x30, 0x75, 0x12, 0x0d, 0x9b, 0xac, 0xbc, - 0x7c, 0xec, 0x37, 0x3e, 0x04, 0x55, 0x36, 0x6c, 0x58, 0xe8, 0x75, 0x83, 0x7f, 0xe8, 0xdf, 0x21, - 0x68, 0x0d, 0x3a, 0xe5, 0xb2, 0xed, 0x90, 0xd7, 0xd1, 0x28, 0x2a, 0x8c, 0xdd, 0xb5, 0x1d, 0xc2, - 0x62, 0xe0, 0xee, 0xd2, 0xef, 0xb2, 0x4c, 0xbd, 0xf5, 0x19, 0xd4, 0xd3, 0x23, 0xe0, 0x3a, 0x54, - 0x97, 0x6e, 0xdd, 0x5e, 0xb8, 0xde, 0x56, 0xf0, 0x38, 0xd4, 0x6f, 0xac, 0xae, 0x9b, 0xfc, 0x13, - 0xe1, 0x83, 0xd0, 0x30, 0x96, 0xae, 0x2c, 0x7d, 0x61, 0xae, 0x2c, 0xac, 0x5f, 0xbc, 0xda, 0x1e, - 0xc1, 0x18, 0x5a, 0x5c, 0x70, 0x63, 0x55, 0xc8, 0x46, 0xe7, 0x7f, 0x1e, 0x83, 0x31, 0x19, 0x23, - 0x3e, 0x0f, 0x95, 0x9b, 0x71, 0xb4, 0x85, 0x8f, 0x0c, 0x3a, 0xf5, 0xf3, 0xd0, 0xa6, 0x44, 0xdc, - 0x3c, 0x75, 0x6a, 0x9b, 0x9c, 0xdf, 0x3b, 0x5d, 0xc1, 0x1f, 0x40, 0x95, 0xed, 0x41, 0xb8, 0x74, - 0x33, 0x57, 0xcb, 0xf7, 0x6d, 0x5d, 0xc1, 0x97, 0xa0, 0x91, 0xd9, 0xed, 0x86, 0x58, 0x1f, 0xcb, - 0x49, 0xf3, 0x4f, 0x8a, 0xae, 0x9c, 0x41, 0x78, 0x15, 0x5a, 0x4c, 0x25, 0x57, 0xb2, 0x08, 0xff, - 0x4f, 0x9a, 0x94, 0xad, 0xca, 0xea, 0xf1, 0x21, 0xda, 0x34, 0xac, 0xab, 0xd0, 0xc8, 0xac, 0x23, - 0x58, 0xcd, 0x35, 0x5e, 0x6e, 0x3b, 0x1b, 0x04, 0x57, 0xb2, 0xf9, 0xe8, 0x0a, 0xbe, 0x23, 0x16, - 0x93, 0xec, 0x62, 0xb3, 0x23, 0xdf, 0xc9, 0x12, 0x5d, 0xc9, 0x91, 0x97, 0x00, 0x06, 0xcb, 0x04, - 0x3e, 0x9a, 0x33, 0xca, 0x2e, 0x41, 0xaa, 0x5a, 0xa6, 0x4a, 0xc3, 0x5b, 0x83, 0x76, 0x71, 0x27, - 0xd9, 0x89, 0xec, 0xc4, 0x76, 0x55, 0x49, 0x6c, 0x8b, 0x50, 0x4f, 0x87, 0x2e, 0xee, 0x94, 0xcc, - 0x61, 0x4e, 0x36, 0x7c, 0x42, 0xeb, 0x0a, 0xbe, 0x0c, 0xcd, 0x05, 0xc7, 0xd9, 0x0b, 0x8d, 0x9a, - 0xd5, 0x44, 0x45, 0x1e, 0x27, 0x9d, 0x16, 0xc5, 0xd1, 0x84, 0xdf, 0x48, 0x1f, 0x84, 0x1d, 0x87, - 0xb7, 0xfa, 0xe6, 0xae, 0xb8, 0xd4, 0xdb, 0x37, 0x70, 0x7c, 0xc7, 0x41, 0xb8, 0x67, 0x9f, 0xa7, - 0x77, 0xc1, 0x95, 0x64, 0x7d, 0x1d, 0x0e, 0x16, 0xe6, 0x22, 0xd6, 0x0a, 0x2c, 0x85, 0x51, 0xaa, - 0x4e, 0x0f, 0xd5, 0x4b, 0xde, 0xc5, 0x8f, 0x9f, 0x3c, 0xd7, 0x94, 0xa7, 0xcf, 0x35, 0xe5, 0xe5, - 0x73, 0x0d, 0x7d, 0xdb, 0xd7, 0xd0, 0xaf, 0x7d, 0x0d, 0x3d, 0xee, 0x6b, 0xe8, 0x49, 0x5f, 0x43, - 0x7f, 0xf5, 0x35, 0xf4, 0x77, 0x5f, 0x53, 0x5e, 0xf6, 0x35, 0xf4, 0xc3, 0x0b, 0x4d, 0x79, 0xf2, - 0x42, 0x53, 0x9e, 0xbe, 0xd0, 0x94, 0x2f, 0x6b, 0x5d, 0xc7, 0x26, 0x1e, 0xdd, 0xa8, 0xb1, 0xbf, - 0xf3, 0xef, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x47, 0x07, 0x0a, 0x45, 0x52, 0x10, 0x00, 0x00, + 0x13, 0xf7, 0x4b, 0x76, 0x97, 0xec, 0xec, 0x66, 0xd9, 0xbc, 0x04, 0xb2, 0x98, 0x2f, 0x0e, 0xf8, + 0x2b, 0xda, 0xa8, 0x2d, 0x09, 0xa4, 0x3f, 0x04, 0xfd, 0x85, 0x12, 0x08, 0x90, 0x42, 0x08, 0x38, + 0x81, 0x56, 0x95, 0x2a, 0xcb, 0xd9, 0x7d, 0x24, 0x2e, 0xfe, 0x85, 0xfd, 0x8c, 0xa0, 0xa7, 0x4a, + 0xfd, 0x03, 0x5a, 0xf5, 0xd4, 0x53, 0xa5, 0xde, 0x7a, 0xee, 0x1f, 0xd0, 0x33, 0x47, 0x8e, 0xa8, + 0xaa, 0x50, 0x59, 0x2e, 0x3d, 0xd2, 0xff, 0xa0, 0xf2, 0xfb, 0xe1, 0xb5, 0x1d, 0x6f, 0x12, 0x24, + 0xe8, 0x6d, 0x3d, 0xf3, 0x99, 0xcf, 0xcc, 0x9b, 0x99, 0xf7, 0x66, 0x16, 0x5a, 0xb6, 0xb7, 0x45, + 0x22, 0x4a, 0xc2, 0xb9, 0x20, 0xf4, 0xa9, 0x8f, 0x6b, 0x5d, 0x3f, 0xa4, 0xe4, 0x81, 0x3a, 0xb5, + 0xe5, 0x6f, 0xf9, 0x4c, 0x34, 0x9f, 0xfc, 0xe2, 0x5a, 0xf5, 0xdc, 0x96, 0x4d, 0xb7, 0xe3, 0xcd, + 0xb9, 0xae, 0xef, 0xce, 0x73, 0x60, 0x10, 0xfa, 0x5f, 0x93, 0x2e, 0x15, 0x5f, 0xf3, 0xc1, 0xdd, + 0x2d, 0xa9, 0xd8, 0x14, 0x3f, 0xb8, 0xa9, 0xfe, 0x09, 0x34, 0x0c, 0x62, 0xf5, 0x0c, 0x72, 0x2f, + 0x26, 0x11, 0xc5, 0x73, 0x70, 0xe0, 0x5e, 0x4c, 0x42, 0x9b, 0x44, 0x1d, 0x74, 0x7c, 0x74, 0xb6, + 0xb1, 0x30, 0x35, 0x27, 0xe0, 0x37, 0x63, 0x12, 0x3e, 0x14, 0x30, 0x43, 0x82, 0xf4, 0xf3, 0xd0, + 0xe4, 0xe6, 0x51, 0xe0, 0x7b, 0x11, 0xc1, 0xf3, 0x70, 0x20, 0x24, 0x51, 0xec, 0x50, 0x69, 0x7f, + 0xa8, 0x60, 0xcf, 0x71, 0x86, 0x44, 0xe9, 0x3f, 0x21, 0x68, 0x66, 0xa9, 0xf1, 0x3b, 0x80, 0x23, + 0x6a, 0x85, 0xd4, 0xa4, 0xb6, 0x4b, 0x22, 0x6a, 0xb9, 0x81, 0xe9, 0x26, 0x64, 0x68, 0x76, 0xd4, + 0x68, 0x33, 0xcd, 0x86, 0x54, 0xac, 0x46, 0x78, 0x16, 0xda, 0xc4, 0xeb, 0xe5, 0xb1, 0x23, 0x0c, + 0xdb, 0x22, 0x5e, 0x2f, 0x8b, 0x3c, 0x0d, 0x63, 0xae, 0x45, 0xbb, 0xdb, 0x24, 0x8c, 0x3a, 0xa3, + 0xf9, 0xa3, 0x5d, 0xb3, 0x36, 0x89, 0xb3, 0xca, 0x95, 0x46, 0x8a, 0xd2, 0x7f, 0x41, 0x30, 0xb5, + 0xfc, 0x80, 0xb8, 0x81, 0x63, 0x85, 0xff, 0x49, 0x88, 0x67, 0x76, 0x84, 0x78, 0xa8, 0x2c, 0xc4, + 0x28, 0x13, 0xe3, 0x55, 0x18, 0xcf, 0x25, 0x16, 0x7f, 0x08, 0xc0, 0x3c, 0x95, 0xd5, 0x30, 0xd8, + 0x9c, 0x4b, 0xdc, 0xad, 0x33, 0xdd, 0x52, 0xe5, 0xd1, 0xd3, 0x19, 0xc5, 0xc8, 0xa0, 0xf5, 0x1f, + 0x11, 0x4c, 0x32, 0xb6, 0x75, 0x1a, 0x12, 0xcb, 0x4d, 0x39, 0xcf, 0x43, 0xa3, 0xbb, 0x1d, 0x7b, + 0x77, 0x73, 0xa4, 0xd3, 0x32, 0xb4, 0x01, 0xe5, 0x85, 0x04, 0x24, 0x78, 0xb3, 0x16, 0x85, 0xa0, + 0x46, 0x5e, 0x2a, 0xa8, 0x75, 0x38, 0x54, 0x28, 0xc2, 0x2b, 0x38, 0xe9, 0xef, 0x08, 0x30, 0x4b, + 0xe9, 0x6d, 0xcb, 0x89, 0x49, 0x24, 0x0b, 0x7b, 0x0c, 0xc0, 0x49, 0xa4, 0xa6, 0x67, 0xb9, 0x84, + 0x15, 0xb4, 0x6e, 0xd4, 0x99, 0xe4, 0xba, 0xe5, 0x92, 0x21, 0x75, 0x1f, 0x79, 0x89, 0xba, 0x8f, + 0xee, 0x59, 0xf7, 0xca, 0x71, 0xb4, 0x9f, 0xba, 0x9f, 0x85, 0xc9, 0x5c, 0xfc, 0x22, 0x27, 0x27, + 0xa0, 0xc9, 0x0f, 0x70, 0x9f, 0xc9, 0x59, 0x56, 0xea, 0x46, 0xc3, 0x19, 0x40, 0xf5, 0x4f, 0xe1, + 0x48, 0xc6, 0xb2, 0x50, 0xe9, 0x7d, 0xd8, 0xdf, 0x85, 0x89, 0x6b, 0x32, 0x23, 0xd1, 0x6b, 0xbe, + 0x11, 0xfa, 0xfb, 0xa2, 0x4c, 0xc2, 0x99, 0x88, 0x72, 0x06, 0x1a, 0x83, 0x32, 0xc9, 0x20, 0x21, + 0xad, 0x53, 0xa4, 0x7f, 0x04, 0x9d, 0x81, 0x59, 0xe1, 0x88, 0x7b, 0x1a, 0x63, 0x68, 0xdf, 0x8a, + 0x48, 0xb8, 0x4e, 0x2d, 0x2a, 0xcf, 0xa7, 0xff, 0x89, 0x60, 0x22, 0x23, 0x14, 0x54, 0x27, 0xe5, + 0x33, 0x6d, 0xfb, 0x9e, 0x19, 0x5a, 0x94, 0xb7, 0x0c, 0x32, 0xc6, 0x53, 0xa9, 0x61, 0x51, 0x92, + 0x74, 0x95, 0x17, 0xbb, 0x66, 0xda, 0xfd, 0x68, 0xb6, 0x62, 0xd4, 0xbd, 0xd8, 0xe5, 0xdd, 0x99, + 0xe4, 0xce, 0x0a, 0x6c, 0xb3, 0xc0, 0x34, 0xca, 0x98, 0xda, 0x56, 0x60, 0xaf, 0xe4, 0xc8, 0xe6, + 0x60, 0x32, 0x8c, 0x1d, 0x52, 0x84, 0x57, 0x18, 0x7c, 0x22, 0x51, 0xe5, 0xf1, 0xff, 0x87, 0x71, + 0xab, 0x4b, 0xed, 0xfb, 0x44, 0xfa, 0xaf, 0x32, 0xff, 0x4d, 0x2e, 0xe4, 0x21, 0xe8, 0x5f, 0xc1, + 0x64, 0x72, 0xba, 0x95, 0x8b, 0xf9, 0xf3, 0x4d, 0xc3, 0x81, 0x38, 0x22, 0xa1, 0x69, 0xf7, 0xc4, + 0x5d, 0xa8, 0x25, 0x9f, 0x2b, 0x3d, 0x7c, 0x0a, 0x2a, 0x3d, 0x8b, 0x5a, 0xec, 0x2c, 0x8d, 0x85, + 0x23, 0xb2, 0x59, 0x77, 0x64, 0xc8, 0x60, 0x30, 0xfd, 0x32, 0xe0, 0x44, 0x15, 0xe5, 0xd9, 0xcf, + 0x40, 0x35, 0x4a, 0x04, 0xe2, 0xea, 0x1e, 0xcd, 0xb2, 0x14, 0x22, 0x31, 0x38, 0x52, 0xff, 0x0d, + 0x81, 0xb6, 0x4a, 0x68, 0x68, 0x77, 0xa3, 0x4b, 0x7e, 0x98, 0xbf, 0x1b, 0xaf, 0xf9, 0x6d, 0x3e, + 0x0b, 0x4d, 0x79, 0xf9, 0xcc, 0x88, 0xd0, 0xdd, 0xdf, 0xe7, 0x86, 0x84, 0xae, 0x13, 0xaa, 0x5f, + 0x85, 0x99, 0xa1, 0x31, 0x8b, 0x54, 0xcc, 0x42, 0xcd, 0x65, 0x10, 0x91, 0x8b, 0xf6, 0xe0, 0x19, + 0xe3, 0xa6, 0x86, 0xd0, 0xeb, 0x37, 0xe1, 0xe4, 0x10, 0xb2, 0x42, 0x9b, 0xef, 0x9f, 0xb2, 0x03, + 0x87, 0x05, 0xe5, 0x2a, 0xa1, 0x56, 0x52, 0x30, 0xd9, 0xf5, 0x6b, 0x30, 0xbd, 0x43, 0x23, 0xe8, + 0xdf, 0x83, 0x31, 0x57, 0xc8, 0x84, 0x83, 0x4e, 0xd1, 0x41, 0x6a, 0x93, 0x22, 0xf5, 0x7f, 0x10, + 0x1c, 0x2c, 0x8c, 0x8b, 0xa4, 0x04, 0x77, 0x42, 0xdf, 0x35, 0xe5, 0xc2, 0x33, 0xe8, 0xb6, 0x56, + 0x22, 0x5f, 0x11, 0xe2, 0x95, 0x5e, 0xb6, 0x1d, 0x47, 0x72, 0xed, 0xe8, 0x41, 0x8d, 0xdd, 0x5f, + 0x39, 0x35, 0x27, 0x07, 0xa1, 0xb0, 0x14, 0xdd, 0xb0, 0xec, 0x70, 0x69, 0x31, 0x19, 0x02, 0x7f, + 0x3c, 0x9d, 0x79, 0xa9, 0x5d, 0x89, 0xdb, 0x2f, 0xf6, 0xac, 0x80, 0x92, 0xd0, 0x10, 0x5e, 0xf0, + 0xdb, 0x50, 0xe3, 0xd3, 0xad, 0x53, 0x61, 0xfe, 0xc6, 0x65, 0x17, 0x64, 0x07, 0xa0, 0x80, 0xe8, + 0xdf, 0x23, 0xa8, 0xf2, 0x93, 0xbe, 0xae, 0xd6, 0x54, 0x61, 0x8c, 0x78, 0x5d, 0xbf, 0x67, 0x7b, + 0x5b, 0xec, 0xd9, 0xa8, 0x1a, 0xe9, 0x37, 0xc6, 0xe2, 0xa6, 0x26, 0xef, 0x43, 0x53, 0x5c, 0xc7, + 0x45, 0x18, 0xcf, 0x75, 0x4e, 0x6e, 0x35, 0x42, 0xfb, 0x5a, 0x8d, 0x4c, 0x68, 0x66, 0x35, 0xf8, + 0x24, 0x54, 0xe8, 0xc3, 0x80, 0xbf, 0x7f, 0xad, 0x85, 0x09, 0x69, 0xcd, 0xd4, 0x1b, 0x0f, 0x03, + 0x62, 0x30, 0x75, 0x12, 0x0d, 0x9b, 0xac, 0xbc, 0x7c, 0xec, 0x37, 0x9e, 0x82, 0x2a, 0x1b, 0x36, + 0x2c, 0xf4, 0xba, 0xc1, 0x3f, 0xf4, 0xef, 0x10, 0xb4, 0x06, 0x9d, 0x72, 0xc9, 0x76, 0xc8, 0xab, + 0x68, 0x14, 0x15, 0xc6, 0xee, 0xd8, 0x0e, 0x61, 0x31, 0x70, 0x77, 0xe9, 0x77, 0x59, 0xa6, 0xde, + 0xfa, 0x0c, 0xea, 0xe9, 0x11, 0x70, 0x1d, 0xaa, 0xcb, 0x37, 0x6f, 0x2d, 0x5e, 0x6b, 0x2b, 0x78, + 0x1c, 0xea, 0xd7, 0xd7, 0x36, 0x4c, 0xfe, 0x89, 0xf0, 0x41, 0x68, 0x18, 0xcb, 0x97, 0x97, 0xbf, + 0x30, 0x57, 0x17, 0x37, 0x2e, 0x5c, 0x69, 0x8f, 0x60, 0x0c, 0x2d, 0x2e, 0xb8, 0xbe, 0x26, 0x64, + 0xa3, 0x0b, 0x3f, 0x8f, 0xc1, 0x98, 0x8c, 0x11, 0x9f, 0x83, 0xca, 0x8d, 0x38, 0xda, 0xc6, 0x87, + 0x07, 0x9d, 0xfa, 0x79, 0x68, 0x53, 0x22, 0x6e, 0x9e, 0x3a, 0xbd, 0x43, 0xce, 0xef, 0x9d, 0xae, + 0xe0, 0x0f, 0xa0, 0xca, 0xf6, 0x20, 0x5c, 0xba, 0x99, 0xab, 0xe5, 0xfb, 0xb6, 0xae, 0xe0, 0x8b, + 0xd0, 0xc8, 0xec, 0x76, 0x43, 0xac, 0x8f, 0xe6, 0xa4, 0xf9, 0x27, 0x45, 0x57, 0x4e, 0x23, 0xbc, + 0x06, 0x2d, 0xa6, 0x92, 0x2b, 0x59, 0x84, 0xff, 0x27, 0x4d, 0xca, 0x56, 0x65, 0xf5, 0xd8, 0x10, + 0x6d, 0x1a, 0xd6, 0x15, 0x68, 0x64, 0xd6, 0x11, 0xac, 0xe6, 0x1a, 0x2f, 0xb7, 0x9d, 0x0d, 0x82, + 0x2b, 0xd9, 0x7c, 0x74, 0x05, 0xdf, 0x16, 0x8b, 0x49, 0x76, 0xb1, 0xd9, 0x95, 0xef, 0x44, 0x89, + 0xae, 0xe4, 0xc8, 0xcb, 0x00, 0x83, 0x65, 0x02, 0x1f, 0xc9, 0x19, 0x65, 0x97, 0x20, 0x55, 0x2d, + 0x53, 0xa5, 0xe1, 0xad, 0x43, 0xbb, 0xb8, 0x93, 0xec, 0x46, 0x76, 0x7c, 0xa7, 0xaa, 0x24, 0xb6, + 0x25, 0xa8, 0xa7, 0x43, 0x17, 0x77, 0x4a, 0xe6, 0x30, 0x27, 0x1b, 0x3e, 0xa1, 0x75, 0x05, 0x5f, + 0x82, 0xe6, 0xa2, 0xe3, 0xec, 0x87, 0x46, 0xcd, 0x6a, 0xa2, 0x22, 0x8f, 0x93, 0x4e, 0x8b, 0xe2, + 0x68, 0xc2, 0x6f, 0xa4, 0x0f, 0xc2, 0xae, 0xc3, 0x5b, 0x7d, 0x73, 0x4f, 0x5c, 0xea, 0xed, 0x1b, + 0x38, 0xb6, 0xeb, 0x20, 0xdc, 0xb7, 0xcf, 0x53, 0x7b, 0xe0, 0x4a, 0xb2, 0xbe, 0x01, 0x07, 0x0b, + 0x73, 0x11, 0x6b, 0x05, 0x96, 0xc2, 0x28, 0x55, 0x67, 0x86, 0xea, 0x25, 0xef, 0xd2, 0xc7, 0x8f, + 0x9f, 0x69, 0xca, 0x93, 0x67, 0x9a, 0xf2, 0xe2, 0x99, 0x86, 0xbe, 0xed, 0x6b, 0xe8, 0xd7, 0xbe, + 0x86, 0x1e, 0xf5, 0x35, 0xf4, 0xb8, 0xaf, 0xa1, 0xbf, 0xfa, 0x1a, 0xfa, 0xbb, 0xaf, 0x29, 0x2f, + 0xfa, 0x1a, 0xfa, 0xe1, 0xb9, 0xa6, 0x3c, 0x7e, 0xae, 0x29, 0x4f, 0x9e, 0x6b, 0xca, 0x97, 0xb5, + 0xae, 0x63, 0x13, 0x8f, 0x6e, 0xd6, 0xd8, 0xdf, 0xf9, 0x77, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, + 0x0a, 0x84, 0x23, 0x5a, 0x39, 0x10, 0x00, 0x00, } func (x MatchType) String() string { diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto index 8e75129ed97..131c07e09fc 100644 --- a/pkg/ingester/client/ingester.proto +++ b/pkg/ingester/client/ingester.proto @@ -5,7 +5,7 @@ package cortex; option go_package = "client"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; option (gogoproto.marshaler_all) = true; diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index b8f8ac23456..d032117b4d2 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1219,7 +1219,7 @@ func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client } it := series.Iterator() - for it.Next() { + for it.Next() != chunkenc.ValNone { t, v := it.At() ts.Samples = append(ts.Samples, cortexpb.Sample{Value: v, TimestampMs: t}) } diff --git a/pkg/querier/batch/batch.go b/pkg/querier/batch/batch.go index 051dd9bc98e..35092e3a604 100644 --- a/pkg/querier/batch/batch.go +++ b/pkg/querier/batch/batch.go @@ -1,12 +1,13 @@ package batch import ( - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/encoding" promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/querier/iterators" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) // GenericChunk is a generic chunk used by the batch iterator, in order to make the batch @@ -75,10 +76,10 @@ type iteratorAdapter struct { } func newIteratorAdapter(underlying iterator) chunkenc.Iterator { - return &iteratorAdapter{ + return iterators.NewCompatibleChunksIterator(&iteratorAdapter{ batchSize: 1, underlying: underlying, - } + }) } // Seek implements chunkenc.Iterator. diff --git a/pkg/querier/batch/batch_test.go b/pkg/querier/batch/batch_test.go index bc1a33ed654..af82b5bc36b 100644 --- a/pkg/querier/batch/batch_test.go +++ b/pkg/querier/batch/batch_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/chunk" @@ -41,7 +42,7 @@ func BenchmarkNewChunkMergeIterator_CreateAndIterate(b *testing.B) { for n := 0; n < b.N; n++ { it := NewChunkMergeIterator(chunks, 0, 0) - for it.Next() { + for it.Next() != chunkenc.ValNone { it.At() } @@ -62,8 +63,8 @@ func TestSeekCorrectlyDealWithSinglePointChunks(t *testing.T) { sut := NewChunkMergeIterator(chunks, 0, 0) // Following calls mimics Prometheus's query engine behaviour for VectorSelector. - require.True(t, sut.Next()) - require.True(t, sut.Seek(0)) + require.True(t, sut.Next() != chunkenc.ValNone) + require.True(t, sut.Seek(0) != chunkenc.ValNone) actual, val := sut.At() require.Equal(t, float64(1*time.Second/time.Millisecond), val) // since mkChunk use ts as value. diff --git a/pkg/querier/batch/chunk_test.go b/pkg/querier/batch/chunk_test.go index 0b60e15ee18..797dc529fd4 100644 --- a/pkg/querier/batch/chunk_test.go +++ b/pkg/querier/batch/chunk_test.go @@ -69,20 +69,20 @@ func mkGenericChunk(t require.TestingT, from model.Time, points int, enc promchu func testIter(t require.TestingT, points int, iter chunkenc.Iterator) { ets := model.TimeFromUnix(0) for i := 0; i < points; i++ { - require.True(t, iter.Next(), strconv.Itoa(i)) + require.NotEqual(t, iter.Next(), chunkenc.ValNone, strconv.Itoa(i)) ts, v := iter.At() require.EqualValues(t, int64(ets), ts, strconv.Itoa(i)) require.EqualValues(t, float64(ets), v, strconv.Itoa(i)) ets = ets.Add(step) } - require.False(t, iter.Next()) + require.Equal(t, iter.Next(), chunkenc.ValNone) } func testSeek(t require.TestingT, points int, iter chunkenc.Iterator) { for i := 0; i < points; i += points / 10 { ets := int64(i * int(step/time.Millisecond)) - require.True(t, iter.Seek(ets)) + require.NotEqual(t, iter.Seek(ets), chunkenc.ValNone) ts, v := iter.At() require.EqualValues(t, ets, ts) require.EqualValues(t, v, float64(ets)) @@ -90,7 +90,7 @@ func testSeek(t require.TestingT, points int, iter chunkenc.Iterator) { for j := i + 1; j < i+points/10; j++ { ets := int64(j * int(step/time.Millisecond)) - require.True(t, iter.Next()) + require.NotEqual(t, iter.Next(), chunkenc.ValNone) ts, v := iter.At() require.EqualValues(t, ets, ts) require.EqualValues(t, float64(ets), v) diff --git a/pkg/querier/block.go b/pkg/querier/block.go index 15b6541948e..5ab049f3d1a 100644 --- a/pkg/querier/block.go +++ b/pkg/querier/block.go @@ -4,14 +4,15 @@ import ( "math" "sort" + "github.com/cortexproject/cortex/pkg/querier/iterators" + "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" - - "github.com/cortexproject/cortex/pkg/querier/series" ) func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMatcher { @@ -125,7 +126,7 @@ func (bqs *blockQuerierSeries) Iterator() chunkenc.Iterator { its = append(its, it) } - return newBlockQuerierSeriesIterator(bqs.Labels(), its) + return iterators.NewCompatibleChunksIterator(newBlockQuerierSeriesIterator(bqs.Labels(), its)) } func newBlockQuerierSeriesIterator(labels labels.Labels, its []chunkenc.Iterator) *blockQuerierSeriesIterator { @@ -173,7 +174,7 @@ func (it *blockQuerierSeriesIterator) Next() bool { return false } - if it.iterators[it.i].Next() { + if it.iterators[it.i].Next() != chunkenc.ValNone { return true } if it.iterators[it.i].Err() != nil { @@ -189,7 +190,7 @@ func (it *blockQuerierSeriesIterator) Next() bool { // we must advance iterator first, to see if it has any samples. // Seek will call At() as its first operation. - if !it.iterators[it.i].Next() { + if it.iterators[it.i].Next() == chunkenc.ValNone { if it.iterators[it.i].Err() != nil { return false } diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go index 01b1dbc4f02..055af0b0725 100644 --- a/pkg/querier/block_test.go +++ b/pkg/querier/block_test.go @@ -78,7 +78,7 @@ func TestBlockQuerierSeries(t *testing.T) { sampleIx := 0 it := series.Iterator() - for it.Next() { + for it.Next() != chunkenc.ValNone { ts, val := it.At() require.True(t, sampleIx < len(testData.expectedSamples)) assert.Equal(t, int64(testData.expectedSamples[sampleIx].Timestamp), ts) @@ -208,7 +208,7 @@ func verifyNextSeries(t *testing.T, ss storage.SeriesSet, labels labels.Labels, prevTS := int64(0) count := 0 - for it := s.Iterator(); it.Next(); { + for it := s.Iterator(); it.Next() != chunkenc.ValNone; { count++ ts, v := it.At() require.Equal(t, math.Sin(float64(ts)), v) @@ -333,7 +333,7 @@ func Benchmark_blockQuerierSeriesSet_iteration(b *testing.B) { set := blockQuerierSeriesSet{series: series} for set.Next() { - for t := set.At().Iterator(); t.Next(); { + for t := set.At().Iterator(); t.Next() != chunkenc.ValNone; { t.At() } } diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 5d7f3923528..67f258baed6 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -711,7 +711,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { var actualValues []valueResult it := set.At().Iterator() - for it.Next() { + for it.Next() != chunkenc.ValNone { t, v := it.At() actualValues = append(actualValues, valueResult{ t: t, diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index 410de35421c..dec55e368bc 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -320,13 +321,13 @@ func verifySeries(t *testing.T, series storage.Series, l labels.Labels, samples it := series.Iterator() for _, s := range samples { - require.True(t, it.Next()) + require.NotEqual(t, it.Next(), chunkenc.ValNone) require.Nil(t, it.Err()) ts, v := it.At() require.Equal(t, s.Value, v) require.Equal(t, s.TimestampMs, ts) } - require.False(t, it.Next()) + require.Equal(t, it.Next(), chunkenc.ValNone) require.Nil(t, it.Err()) } func TestDistributorQuerier_LabelNames(t *testing.T) { diff --git a/pkg/querier/iterators/chunk_merge_iterator.go b/pkg/querier/iterators/chunk_merge_iterator.go index c056fa20930..c0b48d717cf 100644 --- a/pkg/querier/iterators/chunk_merge_iterator.go +++ b/pkg/querier/iterators/chunk_merge_iterator.go @@ -30,7 +30,7 @@ func NewChunkMergeIterator(cs []chunk.Chunk, _, _ model.Time) chunkenc.Iterator for _, iter := range c.its { if iter.Next() { - c.h = append(c.h, iter) + c.h = append(c.h, NewCompatibleChunksIterator(iter)) continue } @@ -40,7 +40,7 @@ func NewChunkMergeIterator(cs []chunk.Chunk, _, _ model.Time) chunkenc.Iterator } heap.Init(&c.h) - return c + return NewCompatibleChunksIterator(c) } // Build a list of lists of non-overlapping chunk iterators. @@ -78,7 +78,7 @@ func (c *chunkMergeIterator) Seek(t int64) bool { for _, iter := range c.its { if iter.Seek(t) { - c.h = append(c.h, iter) + c.h = append(c.h, NewCompatibleChunksIterator(iter)) continue } @@ -107,7 +107,7 @@ func (c *chunkMergeIterator) Next() bool { for c.currTime == lastTime && len(c.h) > 0 { c.currTime, c.currValue = c.h[0].At() - if c.h[0].Next() { + if c.h[0].Next() != chunkenc.ValNone { heap.Fix(&c.h, 0) continue } diff --git a/pkg/querier/iterators/chunk_merge_iterator_test.go b/pkg/querier/iterators/chunk_merge_iterator_test.go index 51e5671e8a7..4ed6e46737e 100644 --- a/pkg/querier/iterators/chunk_merge_iterator_test.go +++ b/pkg/querier/iterators/chunk_merge_iterator_test.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -54,13 +55,13 @@ func TestChunkMergeIterator(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) { iter := NewChunkMergeIterator(tc.chunks, 0, 0) for i := tc.mint; i < tc.maxt; i++ { - require.True(t, iter.Next()) + require.NotEqual(t, iter.Next(), chunkenc.ValNone) ts, s := iter.At() assert.Equal(t, i, ts) assert.Equal(t, float64(i), s) assert.NoError(t, iter.Err()) } - assert.False(t, iter.Next()) + assert.Equal(t, iter.Next(), chunkenc.ValNone) }) } } @@ -73,20 +74,20 @@ func TestChunkMergeIteratorSeek(t *testing.T) { }, 0, 0) for i := int64(0); i < 10; i += 20 { - require.True(t, iter.Seek(i)) + require.NotEqual(t, iter.Seek(i), chunkenc.ValNone) ts, s := iter.At() assert.Equal(t, i, ts) assert.Equal(t, float64(i), s) assert.NoError(t, iter.Err()) for j := i + 1; j < 200; j++ { - require.True(t, iter.Next()) + require.NotEqual(t, iter.Next(), chunkenc.ValNone) ts, s := iter.At() assert.Equal(t, j, ts) assert.Equal(t, float64(j), s) assert.NoError(t, iter.Err()) } - assert.False(t, iter.Next()) + assert.Equal(t, iter.Next(), chunkenc.ValNone) } } diff --git a/pkg/querier/iterators/compatibe_chunk_iterator.go b/pkg/querier/iterators/compatibe_chunk_iterator.go new file mode 100644 index 00000000000..288b1a8a6d4 --- /dev/null +++ b/pkg/querier/iterators/compatibe_chunk_iterator.go @@ -0,0 +1,77 @@ +package iterators + +import ( + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +var ErrNativeHistogramsUnsupported = errors.Newf("querying native histograms is not supported") + +// compChunksIterator is a compatible interface from prometheus before the native histograms implementation. +// As for now cortex does not support this feature, this interface was create as a bridge and should be deleted +// together with the compatibleChunksIterator when cortex supports native histograms. +// See: https://github.com/prometheus/prometheus/blob/5ddfba78936b7ca7ed3600869f623359e6b5ecb0/tsdb/chunkenc/chunk.go#L89-L105 +type compChunksIterator interface { + Next() bool + Seek(t int64) bool + At() (int64, float64) + Err() error +} + +type compatibleChunksIterator struct { + chunkenc.Iterator + it compChunksIterator + err error +} + +func NewCompatibleChunksIterator(i compChunksIterator) *compatibleChunksIterator { + return &compatibleChunksIterator{it: i} +} + +func (c *compatibleChunksIterator) Next() chunkenc.ValueType { + if c.it.Next() { + return chunkenc.ValFloat + } + + return chunkenc.ValNone +} + +func (c *compatibleChunksIterator) Seek(t int64) chunkenc.ValueType { + if c.it.Seek(t) { + return chunkenc.ValFloat + } + + return chunkenc.ValNone +} + +func (c *compatibleChunksIterator) At() (int64, float64) { + return c.it.At() +} + +func (c *compatibleChunksIterator) AtHistogram() (int64, *histogram.Histogram) { + c.err = ErrNativeHistogramsUnsupported + return 0, nil +} + +func (c *compatibleChunksIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + c.err = ErrNativeHistogramsUnsupported + return 0, nil +} + +func (c *compatibleChunksIterator) AtT() int64 { + t, _ := c.it.At() + return t +} + +func (c *compatibleChunksIterator) Err() error { + if c.err != nil { + return c.err + } + + return c.it.Err() +} + +func (c *compatibleChunksIterator) AtTime() int64 { + return c.AtT() +} diff --git a/pkg/querier/remote_read.go b/pkg/querier/remote_read.go index 65dbc96e93e..5c943741e39 100644 --- a/pkg/querier/remote_read.go +++ b/pkg/querier/remote_read.go @@ -6,6 +6,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/ingester/client" @@ -82,7 +83,7 @@ func seriesSetToQueryResponse(s storage.SeriesSet) (*client.QueryResponse, error series := s.At() samples := []cortexpb.Sample{} it := series.Iterator() - for it.Next() { + for it.Next() != chunkenc.ValNone { t, v := it.At() samples = append(samples, cortexpb.Sample{ TimestampMs: t, diff --git a/pkg/querier/series/series_set.go b/pkg/querier/series/series_set.go index 1b6cdc52229..9d30b69c8b4 100644 --- a/pkg/querier/series/series_set.go +++ b/pkg/querier/series/series_set.go @@ -19,13 +19,14 @@ package series import ( "sort" + "github.com/cortexproject/cortex/pkg/prom1/storage/metric" + "github.com/cortexproject/cortex/pkg/purger" + "github.com/cortexproject/cortex/pkg/querier/iterators" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/prom1/storage/metric" - "github.com/cortexproject/cortex/pkg/purger" ) // ConcreteSeriesSet implements storage.SeriesSet. @@ -99,10 +100,10 @@ type concreteSeriesIterator struct { // NewConcreteSeriesIterator instaniates an in memory chunkenc.Iterator func NewConcreteSeriesIterator(series *ConcreteSeries) chunkenc.Iterator { - return &concreteSeriesIterator{ + return iterators.NewCompatibleChunksIterator(&concreteSeriesIterator{ cur: -1, series: series, - } + }) } func (c *concreteSeriesIterator) Seek(t int64) bool { @@ -128,7 +129,7 @@ func (c *concreteSeriesIterator) Err() error { // NewErrIterator instantiates an errIterator func NewErrIterator(err error) chunkenc.Iterator { - return errIterator{err} + return iterators.NewCompatibleChunksIterator(errIterator{err}) } // errIterator implements chunkenc.Iterator, just returning an error. @@ -261,14 +262,14 @@ type DeletedSeriesIterator struct { } func NewDeletedSeriesIterator(itr chunkenc.Iterator, deletedIntervals []model.Interval) chunkenc.Iterator { - return &DeletedSeriesIterator{ + return iterators.NewCompatibleChunksIterator(&DeletedSeriesIterator{ itr: itr, deletedIntervals: deletedIntervals, - } + }) } func (d DeletedSeriesIterator) Seek(t int64) bool { - if found := d.itr.Seek(t); !found { + if found := d.itr.Seek(t); found == chunkenc.ValNone { return false } @@ -286,7 +287,7 @@ func (d DeletedSeriesIterator) At() (t int64, v float64) { } func (d DeletedSeriesIterator) Next() bool { - for d.itr.Next() { + for d.itr.Next() != chunkenc.ValNone { ts, _ := d.itr.At() if d.isDeleted(ts) { @@ -339,7 +340,7 @@ type emptySeriesIterator struct { } func NewEmptySeriesIterator() chunkenc.Iterator { - return emptySeriesIterator{} + return iterators.NewCompatibleChunksIterator(emptySeriesIterator{}) } func (emptySeriesIterator) Seek(t int64) bool { diff --git a/pkg/querier/series/series_set_test.go b/pkg/querier/series/series_set_test.go index 1813af3ec2c..0ba6f32ab9b 100644 --- a/pkg/querier/series/series_set_test.go +++ b/pkg/querier/series/series_set_test.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" ) @@ -81,7 +82,7 @@ func TestDeletedSeriesIterator(t *testing.T) { it := NewDeletedSeriesIterator(NewConcreteSeriesIterator(&cs), c.r) ranges := c.r[:] - for it.Next() { + for it.Next() != chunkenc.ValNone { i++ for _, tr := range ranges { if inbound(model.Time(i), tr) { @@ -139,7 +140,7 @@ func TestDeletedIterator_WithSeek(t *testing.T) { for _, c := range cases { it := NewDeletedSeriesIterator(NewConcreteSeriesIterator(&cs), c.r) - require.Equal(t, c.ok, it.Seek(c.seek)) + require.NotEqual(t, c.ok, it.Seek(c.seek), chunkenc.ValNone) if c.ok { ts, _ := it.At() require.Equal(t, c.seekedTs, ts) diff --git a/pkg/querier/stats/stats.pb.go b/pkg/querier/stats/stats.pb.go index d0a0653cfe3..39a1775ad6c 100644 --- a/pkg/querier/stats/stats.pb.go +++ b/pkg/querier/stats/stats.pb.go @@ -119,32 +119,31 @@ func init() { func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } var fileDescriptor_b4756a0aec8b9d44 = []byte{ - // 388 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0x3f, 0x6e, 0xe2, 0x40, - 0x18, 0xc5, 0x3d, 0x80, 0x57, 0x30, 0xde, 0x82, 0x9d, 0xa5, 0x30, 0x48, 0x3b, 0xa0, 0xad, 0x28, - 0x76, 0x87, 0x15, 0xdb, 0x44, 0x89, 0x14, 0x21, 0xfe, 0xe4, 0x00, 0x26, 0x55, 0x1a, 0x6b, 0x6c, - 0x0f, 0xc6, 0xc2, 0x78, 0x22, 0x7b, 0x9c, 0xc4, 0x5d, 0x8e, 0x90, 0x32, 0x47, 0xc8, 0x51, 0x28, - 0x29, 0x91, 0x22, 0x25, 0xc1, 0x34, 0x29, 0x39, 0x42, 0xe4, 0xb1, 0x11, 0x12, 0xdd, 0xf7, 0xfc, - 0x7b, 0xef, 0xb3, 0xbe, 0xa7, 0x81, 0x5a, 0x24, 0xa8, 0x88, 0xc8, 0x6d, 0xc8, 0x05, 0x47, 0xaa, - 0x14, 0xad, 0xbf, 0xae, 0x27, 0xe6, 0xb1, 0x45, 0x6c, 0xbe, 0xec, 0xb9, 0xdc, 0xe5, 0x3d, 0x49, - 0xad, 0x78, 0x26, 0x95, 0x14, 0x72, 0xca, 0x53, 0x2d, 0xec, 0x72, 0xee, 0xfa, 0xec, 0xe8, 0x72, - 0xe2, 0x90, 0x0a, 0x8f, 0x07, 0x05, 0x6f, 0x9e, 0x72, 0x1a, 0x24, 0x39, 0xfa, 0xfd, 0x5a, 0x82, - 0xea, 0x34, 0xfb, 0x27, 0x1a, 0xc0, 0xda, 0x3d, 0xf5, 0x7d, 0x53, 0x78, 0x4b, 0xa6, 0x83, 0x0e, - 0xe8, 0x6a, 0xfd, 0x26, 0xc9, 0x83, 0xe4, 0x10, 0x24, 0xe3, 0x62, 0xf1, 0xb0, 0xba, 0x7a, 0x6b, - 0x2b, 0xcf, 0xef, 0x6d, 0x60, 0x54, 0xb3, 0xd4, 0xb5, 0xb7, 0x64, 0xe8, 0x1f, 0x6c, 0xcc, 0x98, - 0xb0, 0xe7, 0xcc, 0x31, 0x23, 0x16, 0x7a, 0x2c, 0x32, 0x6d, 0x1e, 0x07, 0x42, 0x2f, 0x75, 0x40, - 0xb7, 0x62, 0xa0, 0x82, 0x4d, 0x25, 0x1a, 0x65, 0x04, 0x11, 0xf8, 0xf3, 0x90, 0xb0, 0xe7, 0x71, - 0xb0, 0x30, 0xad, 0x44, 0xb0, 0x48, 0x2f, 0xcb, 0xc0, 0x8f, 0x02, 0x8d, 0x32, 0x32, 0xcc, 0x00, - 0xfa, 0x03, 0x0f, 0x5b, 0x4c, 0x87, 0x0a, 0x5a, 0xd8, 0x2b, 0xd2, 0x5e, 0x2f, 0xc8, 0x98, 0x0a, - 0x9a, 0xbb, 0x07, 0xf0, 0x3b, 0x7b, 0x10, 0x21, 0x35, 0x67, 0x1e, 0xf3, 0x9d, 0x48, 0x57, 0x3b, - 0xe5, 0xae, 0xd6, 0xff, 0x45, 0xf2, 0xc2, 0xe5, 0xd5, 0x64, 0x92, 0x19, 0xae, 0x24, 0x9f, 0x04, - 0x22, 0x4c, 0x0c, 0x8d, 0x1d, 0xbf, 0xb4, 0x2e, 0x61, 0xfd, 0xd4, 0x80, 0xea, 0xb0, 0xbc, 0x60, - 0x89, 0x6c, 0xa8, 0x66, 0x64, 0x23, 0x6a, 0x40, 0xf5, 0x8e, 0xfa, 0x31, 0x93, 0x87, 0xd6, 0x8c, - 0x5c, 0x9c, 0x97, 0xce, 0xc0, 0xf0, 0x62, 0xbd, 0xc5, 0xca, 0x66, 0x8b, 0x95, 0xfd, 0x16, 0x83, - 0xc7, 0x14, 0x83, 0x97, 0x14, 0x83, 0x55, 0x8a, 0xc1, 0x3a, 0xc5, 0xe0, 0x23, 0xc5, 0xe0, 0x33, - 0xc5, 0xca, 0x3e, 0xc5, 0xe0, 0x69, 0x87, 0x95, 0xf5, 0x0e, 0x2b, 0x9b, 0x1d, 0x56, 0x6e, 0xf2, - 0x47, 0x60, 0x7d, 0x93, 0xad, 0xff, 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x25, 0x6d, 0xec, 0xdd, - 0x21, 0x02, 0x00, 0x00, + // 377 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xbf, 0x6e, 0xea, 0x30, + 0x18, 0xc5, 0x63, 0x20, 0x57, 0xe0, 0xdc, 0x81, 0xeb, 0xcb, 0x10, 0x90, 0xae, 0x41, 0x77, 0x62, + 0xa8, 0x42, 0x45, 0x97, 0xaa, 0x95, 0x2a, 0xc4, 0x9f, 0x3e, 0x40, 0xe8, 0xd4, 0x25, 0x32, 0xc4, + 0x84, 0x88, 0x10, 0x57, 0x89, 0xd3, 0x36, 0x5b, 0x1f, 0xa1, 0x63, 0x1f, 0xa1, 0x8f, 0xc2, 0xc8, + 0x88, 0x54, 0xa9, 0x2d, 0x61, 0xe9, 0xc8, 0x23, 0x54, 0xb6, 0x83, 0x2a, 0xb1, 0xf9, 0xfb, 0x7e, + 0xe7, 0x1c, 0xe9, 0x3b, 0x86, 0x46, 0xcc, 0x09, 0x8f, 0xad, 0xbb, 0x88, 0x71, 0x86, 0x74, 0x39, + 0x34, 0x6a, 0x1e, 0xf3, 0x98, 0xdc, 0x74, 0xc4, 0x4b, 0xc1, 0x06, 0xf6, 0x18, 0xf3, 0x02, 0xda, + 0x91, 0xd3, 0x24, 0x99, 0x75, 0xdc, 0x24, 0x22, 0xdc, 0x67, 0x61, 0xce, 0xeb, 0xc7, 0x9c, 0x84, + 0xa9, 0x42, 0xff, 0xdf, 0x0a, 0x50, 0x1f, 0x8b, 0x68, 0xd4, 0x83, 0x95, 0x07, 0x12, 0x04, 0x0e, + 0xf7, 0x97, 0xd4, 0x04, 0x2d, 0xd0, 0x36, 0xba, 0x75, 0x4b, 0x19, 0xad, 0x83, 0xd1, 0x1a, 0xe6, + 0xc1, 0xfd, 0xf2, 0xea, 0xbd, 0xa9, 0xbd, 0x7c, 0x34, 0x81, 0x5d, 0x16, 0xae, 0x1b, 0x7f, 0x49, + 0xd1, 0x29, 0xac, 0xcd, 0x28, 0x9f, 0xce, 0xa9, 0xeb, 0xc4, 0x34, 0xf2, 0x69, 0xec, 0x4c, 0x59, + 0x12, 0x72, 0xb3, 0xd0, 0x02, 0xed, 0x92, 0x8d, 0x72, 0x36, 0x96, 0x68, 0x20, 0x08, 0xb2, 0xe0, + 0xdf, 0x83, 0x63, 0x3a, 0x4f, 0xc2, 0x85, 0x33, 0x49, 0x39, 0x8d, 0xcd, 0xa2, 0x34, 0xfc, 0xc9, + 0xd1, 0x40, 0x90, 0xbe, 0x00, 0xe8, 0x04, 0x1e, 0x52, 0x1c, 0x97, 0x70, 0x92, 0xcb, 0x4b, 0x52, + 0x5e, 0xcd, 0xc9, 0x90, 0x70, 0xa2, 0xd4, 0x3d, 0xf8, 0x9b, 0x3e, 0xf2, 0x88, 0x38, 0x33, 0x9f, + 0x06, 0x6e, 0x6c, 0xea, 0xad, 0x62, 0xdb, 0xe8, 0xfe, 0xb3, 0x54, 0xaf, 0xf2, 0x6a, 0x6b, 0x24, + 0x04, 0xd7, 0x92, 0x8f, 0x42, 0x1e, 0xa5, 0xb6, 0x41, 0x7f, 0x36, 0x8d, 0x2b, 0x58, 0x3d, 0x16, + 0xa0, 0x2a, 0x2c, 0x2e, 0x68, 0x2a, 0x1b, 0xaa, 0xd8, 0xe2, 0x89, 0x6a, 0x50, 0xbf, 0x27, 0x41, + 0x42, 0xe5, 0xa1, 0x15, 0x5b, 0x0d, 0x17, 0x85, 0x73, 0xd0, 0xbf, 0x5c, 0x6f, 0xb1, 0xb6, 0xd9, + 0x62, 0x6d, 0xbf, 0xc5, 0xe0, 0x29, 0xc3, 0xe0, 0x35, 0xc3, 0x60, 0x95, 0x61, 0xb0, 0xce, 0x30, + 0xf8, 0xcc, 0x30, 0xf8, 0xca, 0xb0, 0xb6, 0xcf, 0x30, 0x78, 0xde, 0x61, 0x6d, 0xbd, 0xc3, 0xda, + 0x66, 0x87, 0xb5, 0x5b, 0xf5, 0xd7, 0x93, 0x5f, 0xb2, 0xf5, 0xb3, 0xef, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xe2, 0xdd, 0x06, 0xc8, 0x08, 0x02, 0x00, 0x00, } func (this *Stats) Equal(that interface{}) bool { diff --git a/pkg/querier/stats/stats.proto b/pkg/querier/stats/stats.proto index c0e509d9a17..d61d00f7573 100644 --- a/pkg/querier/stats/stats.proto +++ b/pkg/querier/stats/stats.proto @@ -4,7 +4,7 @@ package stats; option go_package = "stats"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/any.proto"; diff --git a/pkg/querier/timeseries_series_set.go b/pkg/querier/timeseries_series_set.go index bcc30867f9c..c42efa764cb 100644 --- a/pkg/querier/timeseries_series_set.go +++ b/pkg/querier/timeseries_series_set.go @@ -3,11 +3,12 @@ package querier import ( "sort" + "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/querier/iterators" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/cortexpb" ) // timeSeriesSeriesSet is a wrapper around a cortexpb.TimeSeries slice to implement to SeriesSet interface @@ -71,10 +72,10 @@ func (t *timeseries) Labels() labels.Labels { // Iterator implements the storage.Series interface func (t *timeseries) Iterator() chunkenc.Iterator { - return &timeSeriesSeriesIterator{ + return iterators.NewCompatibleChunksIterator(&timeSeriesSeriesIterator{ ts: t, i: -1, - } + }) } // Seek implements SeriesIterator interface diff --git a/pkg/querier/timeseries_series_set_test.go b/pkg/querier/timeseries_series_set_test.go index 17d09210114..99f7636dbf4 100644 --- a/pkg/querier/timeseries_series_set_test.go +++ b/pkg/querier/timeseries_series_set_test.go @@ -3,6 +3,7 @@ package querier import ( "testing" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/cortexpb" @@ -36,7 +37,7 @@ func TestTimeSeriesSeriesSet(t *testing.T) { require.Equal(t, ss.ts[0].Labels[0].Value, series.Labels()[0].Value) it := series.Iterator() - require.True(t, it.Next()) + require.NotEqual(t, it.Next(), chunkenc.ValNone) ts, v := it.At() require.Equal(t, 3.14, v) require.Equal(t, int64(1234), ts) @@ -51,7 +52,7 @@ func TestTimeSeriesSeriesSet(t *testing.T) { require.True(t, ss.Next()) it = ss.At().Iterator() - require.True(t, it.Seek(2000)) + require.NotEqual(t, it.Seek(2000), chunkenc.ValNone) ts, v = it.At() require.Equal(t, 1.618, v) require.Equal(t, int64(2345), ts) @@ -84,25 +85,25 @@ func TestTimeSeriesIterator(t *testing.T) { } it := ts.Iterator() - require.True(t, it.Seek(1235)) // Seek to middle + require.NotEqual(t, it.Seek(1235), chunkenc.ValNone) // Seek to middle i, _ := it.At() require.EqualValues(t, 1235, i) - require.True(t, it.Seek(1236)) // Seek to end + require.NotEqual(t, it.Seek(1236), chunkenc.ValNone) // Seek to end i, _ = it.At() require.EqualValues(t, 1236, i) - require.False(t, it.Seek(1238)) // Seek past end + require.Equal(t, it.Seek(1238), chunkenc.ValNone) // Seek past end it = ts.Iterator() - require.True(t, it.Next()) - require.True(t, it.Next()) + require.NotEqual(t, it.Next(), chunkenc.ValNone) + require.NotEqual(t, it.Next(), chunkenc.ValNone) i, _ = it.At() require.EqualValues(t, 1235, i) - require.True(t, it.Seek(1234)) // Ensure seek doesn't do anything if already past seek target. + require.NotEqual(t, it.Seek(1234), chunkenc.ValNone) // Ensure seek doesn't do anything if already past seek target. i, _ = it.At() require.EqualValues(t, 1235, i) it = ts.Iterator() - for i := 0; it.Next(); { + for i := 0; it.Next() != chunkenc.ValNone; { j, _ := it.At() switch i { case 0: diff --git a/pkg/querier/tripperware/instantquery/instantquery.pb.go b/pkg/querier/tripperware/instantquery/instantquery.pb.go index b878aa24d8d..ae1ff344ed4 100644 --- a/pkg/querier/tripperware/instantquery/instantquery.pb.go +++ b/pkg/querier/tripperware/instantquery/instantquery.pb.go @@ -404,49 +404,48 @@ func init() { func init() { proto.RegisterFile("instantquery.proto", fileDescriptor_d2ce36475a368033) } var fileDescriptor_d2ce36475a368033 = []byte{ - // 661 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x4f, 0xd4, 0x4e, - 0x18, 0xee, 0x00, 0x5b, 0xe0, 0x5d, 0xe0, 0xf7, 0xcb, 0x60, 0x74, 0x25, 0x64, 0x4a, 0x36, 0x9a, - 0xac, 0x46, 0xba, 0xc9, 0x1a, 0xa3, 0x1e, 0xad, 0x62, 0x30, 0x6a, 0xc0, 0x59, 0xe2, 0xc1, 0xdb, - 0xec, 0x32, 0x2e, 0xd5, 0x2d, 0xad, 0xd3, 0x29, 0xb0, 0x37, 0x3f, 0x82, 0x9f, 0xc2, 0xf8, 0x1d, - 0xbc, 0x7a, 0xe0, 0xc8, 0x91, 0x78, 0x68, 0xa4, 0x5c, 0xcc, 0x9e, 0xf0, 0x1b, 0x98, 0xce, 0x4c, - 0xa1, 0xeb, 0x5f, 0xbc, 0xf5, 0x7d, 0xdf, 0xe7, 0x79, 0xe6, 0x7d, 0x9e, 0x69, 0x0b, 0xd8, 0xdf, - 0x8e, 0x25, 0xdb, 0x96, 0x6f, 0x12, 0x2e, 0x06, 0x6e, 0x24, 0x42, 0x19, 0xe2, 0x99, 0x72, 0x6f, - 0x61, 0xb9, 0xe7, 0xcb, 0xad, 0xa4, 0xe3, 0x76, 0xc3, 0xa0, 0xd9, 0x0b, 0x7b, 0x61, 0x53, 0x81, - 0x3a, 0xc9, 0x4b, 0x55, 0xa9, 0x42, 0x3d, 0x69, 0xf2, 0xc2, 0xdd, 0x12, 0xbc, 0x1b, 0x0a, 0xc9, - 0xf7, 0x22, 0x11, 0xbe, 0xe2, 0x5d, 0x69, 0xaa, 0x66, 0xf4, 0xba, 0x57, 0x0c, 0x3a, 0xe6, 0xc1, - 0x50, 0xef, 0x9f, 0x87, 0x9a, 0x2f, 0xe5, 0x73, 0xd1, 0x94, 0xc2, 0x8f, 0x22, 0x2e, 0x76, 0x99, - 0xe0, 0xcd, 0xd2, 0xf2, 0xf5, 0x4f, 0x63, 0x40, 0xd6, 0x45, 0x18, 0x70, 0xb9, 0xc5, 0x93, 0xf8, - 0x91, 0x76, 0xf2, 0x2c, 0x07, 0x50, 0x1e, 0x47, 0xe1, 0x76, 0xcc, 0x71, 0x1d, 0xec, 0xb6, 0x64, - 0x32, 0x89, 0x6b, 0x68, 0x09, 0x35, 0xa6, 0x3d, 0x18, 0xa6, 0x8e, 0x1d, 0xab, 0x0e, 0x35, 0x13, - 0xbc, 0x01, 0x13, 0x0f, 0x98, 0x64, 0xb5, 0xb1, 0x25, 0xd4, 0xa8, 0xb6, 0x1a, 0xee, 0x48, 0x4c, - 0xbf, 0xd6, 0xcf, 0xf1, 0xde, 0xc5, 0xfd, 0xd4, 0xb1, 0x86, 0xa9, 0x33, 0xb7, 0xc9, 0x24, 0xbb, - 0x11, 0x06, 0xbe, 0xe4, 0x41, 0x24, 0x07, 0x54, 0xa9, 0xe1, 0x5b, 0x30, 0xbd, 0x22, 0x44, 0x28, - 0x36, 0x06, 0x11, 0xaf, 0x8d, 0xab, 0xc3, 0x2f, 0x0d, 0x53, 0x67, 0x9e, 0x17, 0xcd, 0x12, 0xe3, - 0x0c, 0x89, 0xaf, 0x41, 0x45, 0x15, 0xb5, 0x09, 0x45, 0x99, 0x1f, 0xa6, 0xce, 0x7f, 0x8a, 0x52, - 0x82, 0x6b, 0x04, 0x7e, 0x08, 0x93, 0xab, 0x9c, 0x6d, 0x72, 0x11, 0xd7, 0x2a, 0x4b, 0xe3, 0x8d, - 0x6a, 0xeb, 0xaa, 0x5b, 0x4a, 0xaa, 0xb4, 0x79, 0x91, 0x86, 0x46, 0x7b, 0x95, 0x61, 0xea, 0xa0, - 0x65, 0x5a, 0x90, 0xeb, 0xdf, 0x10, 0x2c, 0xfc, 0xde, 0x26, 0x76, 0x01, 0x28, 0x8f, 0x93, 0xbe, - 0x54, 0x4e, 0x74, 0x8c, 0x73, 0xc3, 0xd4, 0x01, 0x71, 0xda, 0xa5, 0x25, 0x04, 0xa6, 0x60, 0xeb, - 0xca, 0x04, 0x7a, 0xfd, 0x3c, 0x81, 0x6a, 0x86, 0x37, 0x67, 0x22, 0xb5, 0xb5, 0x36, 0x35, 0x4a, - 0x78, 0x0d, 0x2a, 0xf9, 0xa5, 0xc5, 0x2a, 0xc8, 0x6a, 0xeb, 0xca, 0x5f, 0x8c, 0xe6, 0x17, 0x1b, - 0xeb, 0xec, 0x14, 0xad, 0x9c, 0x9d, 0x6a, 0xd4, 0xdf, 0x23, 0x58, 0xfc, 0xd3, 0x26, 0xd8, 0x05, - 0x7b, 0x87, 0x77, 0x65, 0x28, 0x94, 0xe3, 0x6a, 0xeb, 0xc2, 0xa8, 0x8b, 0xe7, 0x6a, 0xb6, 0x6a, - 0x51, 0x83, 0xc2, 0x8b, 0x30, 0x25, 0xd8, 0xae, 0x37, 0x90, 0x3c, 0x56, 0xbe, 0x67, 0x56, 0x2d, - 0x7a, 0xda, 0xc9, 0xd5, 0x02, 0x26, 0x85, 0xbf, 0x67, 0x0c, 0xfc, 0xa0, 0xf6, 0x54, 0xcd, 0x72, - 0x35, 0x8d, 0xf2, 0xa6, 0xc0, 0x24, 0x50, 0xbf, 0x03, 0xb6, 0x3e, 0x0b, 0xbb, 0x30, 0x19, 0xb3, - 0x20, 0xea, 0xf3, 0xfc, 0x5d, 0x1e, 0xff, 0x59, 0xa4, 0xad, 0x86, 0xb4, 0x00, 0xd5, 0x3f, 0x22, - 0xb0, 0x75, 0x0f, 0xef, 0x81, 0xdd, 0x67, 0x1d, 0xde, 0x2f, 0x98, 0xf3, 0x6e, 0xf1, 0x55, 0xba, - 0x4f, 0xf2, 0xfe, 0x3a, 0xf3, 0x85, 0xf7, 0x38, 0xcf, 0xfe, 0x73, 0xea, 0xfc, 0xd3, 0x57, 0xad, - 0xf9, 0xf7, 0x36, 0x59, 0x24, 0xb9, 0xc8, 0x2f, 0x2e, 0xe0, 0x52, 0xf8, 0x5d, 0x6a, 0xce, 0xc3, - 0xb7, 0xc1, 0xd6, 0xfb, 0x98, 0x97, 0xe1, 0xff, 0xb3, 0x93, 0xf5, 0x6e, 0xde, 0xac, 0xb9, 0xf2, - 0xca, 0x0e, 0xeb, 0x27, 0x9c, 0x1a, 0x78, 0x7d, 0x0d, 0x6c, 0x9d, 0x0a, 0x5e, 0x81, 0x59, 0xdd, - 0x6b, 0x4b, 0xc1, 0x59, 0x50, 0x78, 0xb8, 0x3c, 0xf2, 0x0e, 0xb4, 0x4b, 0x08, 0x6f, 0x22, 0x97, - 0xa4, 0xa3, 0x2c, 0xcf, 0x3b, 0x38, 0x22, 0xd6, 0xe1, 0x11, 0xb1, 0x4e, 0x8e, 0x08, 0x7a, 0x9b, - 0x11, 0xf4, 0x21, 0x23, 0x68, 0x3f, 0x23, 0xe8, 0x20, 0x23, 0xe8, 0x4b, 0x46, 0xd0, 0xd7, 0x8c, - 0x58, 0x27, 0x19, 0x41, 0xef, 0x8e, 0x89, 0x75, 0x70, 0x4c, 0xac, 0xc3, 0x63, 0x62, 0xbd, 0x18, - 0xf9, 0x3f, 0x76, 0x6c, 0xf5, 0xdf, 0xb9, 0xf9, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x58, 0xb4, - 0x4f, 0x4a, 0x05, 0x00, 0x00, + // 652 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4f, 0xd4, 0x40, + 0x14, 0xee, 0x00, 0x5b, 0xe0, 0x2d, 0xa0, 0x19, 0x88, 0xae, 0x84, 0x4c, 0x49, 0xa3, 0xc9, 0x6a, + 0xb4, 0x9b, 0xac, 0x31, 0xea, 0xd1, 0x2a, 0x06, 0xa3, 0x06, 0x9c, 0x25, 0x1e, 0xbc, 0xcd, 0x2e, + 0x93, 0xa5, 0xba, 0xa5, 0x75, 0x3a, 0x0b, 0xec, 0xcd, 0x3f, 0xc1, 0xbf, 0xc2, 0xf8, 0x3f, 0x78, + 0xf5, 0xc0, 0x91, 0x23, 0xf1, 0xd0, 0x48, 0xb9, 0x98, 0x3d, 0xe1, 0x7f, 0x60, 0x3a, 0x33, 0x85, + 0xae, 0x3f, 0xf1, 0xd6, 0x7e, 0xef, 0xfb, 0xbe, 0xf7, 0xde, 0x37, 0xd3, 0x02, 0x0e, 0xb6, 0x13, + 0xc9, 0xb6, 0xe5, 0xdb, 0x3e, 0x17, 0x03, 0x2f, 0x16, 0x91, 0x8c, 0xf0, 0x4c, 0x19, 0x5b, 0x5c, + 0xe8, 0x46, 0xdd, 0x48, 0x15, 0x1a, 0xf9, 0x93, 0xe6, 0x2c, 0xde, 0xef, 0x06, 0x72, 0xab, 0xdf, + 0xf6, 0x3a, 0x51, 0xd8, 0xe8, 0x44, 0x42, 0xf2, 0xbd, 0x58, 0x44, 0xaf, 0x79, 0x47, 0x9a, 0xb7, + 0x46, 0xfc, 0xa6, 0x5b, 0x14, 0xda, 0xe6, 0xc1, 0x48, 0x1f, 0x9e, 0x47, 0x9a, 0xf7, 0x0e, 0xb8, + 0x68, 0x48, 0x11, 0xc4, 0x31, 0x17, 0xbb, 0x4c, 0xf0, 0x46, 0x69, 0x46, 0xf7, 0xf3, 0x18, 0x90, + 0x75, 0x11, 0x85, 0x5c, 0x6e, 0xf1, 0x7e, 0xf2, 0x44, 0x0f, 0xfc, 0x22, 0x27, 0x50, 0x9e, 0xc4, + 0xd1, 0x76, 0xc2, 0xb1, 0x0b, 0x76, 0x4b, 0x32, 0xd9, 0x4f, 0x6a, 0x68, 0x19, 0xd5, 0xa7, 0x7d, + 0x18, 0xa6, 0x8e, 0x9d, 0x28, 0x84, 0x9a, 0x0a, 0xde, 0x80, 0x89, 0x47, 0x4c, 0xb2, 0xda, 0xd8, + 0x32, 0xaa, 0x57, 0x9b, 0x75, 0x6f, 0x24, 0x8d, 0xdf, 0xfb, 0xe7, 0x7c, 0xff, 0xd2, 0x7e, 0xea, + 0x58, 0xc3, 0xd4, 0x99, 0xdb, 0x64, 0x92, 0xdd, 0x8c, 0xc2, 0x40, 0xf2, 0x30, 0x96, 0x03, 0xaa, + 0xdc, 0xf0, 0x1d, 0x98, 0x5e, 0x11, 0x22, 0x12, 0x1b, 0x83, 0x98, 0xd7, 0xc6, 0x55, 0xf3, 0xcb, + 0xc3, 0xd4, 0x99, 0xe7, 0x05, 0x58, 0x52, 0x9c, 0x31, 0xf1, 0x75, 0xa8, 0xa8, 0x97, 0xda, 0x84, + 0x92, 0xcc, 0x0f, 0x53, 0xe7, 0x82, 0x92, 0x94, 0xe8, 0x9a, 0x81, 0x1f, 0xc3, 0xe4, 0x2a, 0x67, + 0x9b, 0x5c, 0x24, 0xb5, 0xca, 0xf2, 0x78, 0xbd, 0xda, 0xbc, 0xe6, 0x95, 0x92, 0x2a, 0x4d, 0x5e, + 0xa4, 0xa1, 0xd9, 0x7e, 0x65, 0x98, 0x3a, 0xe8, 0x16, 0x2d, 0xc4, 0xee, 0x77, 0x04, 0x8b, 0x7f, + 0x5e, 0x13, 0x7b, 0x00, 0x94, 0x27, 0xfd, 0x9e, 0x54, 0x9b, 0xe8, 0x18, 0xe7, 0x86, 0xa9, 0x03, + 0xe2, 0x14, 0xa5, 0x25, 0x06, 0xa6, 0x60, 0xeb, 0x37, 0x13, 0xe8, 0x8d, 0xf3, 0x04, 0xaa, 0x15, + 0xfe, 0x9c, 0x89, 0xd4, 0xd6, 0xde, 0xd4, 0x38, 0xe1, 0x35, 0xa8, 0xe4, 0x87, 0x96, 0xa8, 0x20, + 0xab, 0xcd, 0xab, 0xff, 0x58, 0x34, 0x3f, 0xd8, 0x44, 0x67, 0xa7, 0x64, 0xe5, 0xec, 0x14, 0xe0, + 0x7e, 0x40, 0xb0, 0xf4, 0xb7, 0x49, 0xb0, 0x07, 0xf6, 0x0e, 0xef, 0xc8, 0x48, 0xa8, 0x8d, 0xab, + 0xcd, 0x85, 0xd1, 0x2d, 0x5e, 0xaa, 0xda, 0xaa, 0x45, 0x0d, 0x0b, 0x2f, 0xc1, 0x94, 0x60, 0xbb, + 0xfe, 0x40, 0xf2, 0x44, 0xed, 0x3d, 0xb3, 0x6a, 0xd1, 0x53, 0x24, 0x77, 0x0b, 0x99, 0x14, 0xc1, + 0x9e, 0x59, 0xe0, 0x27, 0xb7, 0xe7, 0xaa, 0x96, 0xbb, 0x69, 0x96, 0x3f, 0x05, 0x26, 0x01, 0xf7, + 0x1e, 0xd8, 0xba, 0x17, 0xf6, 0x60, 0x32, 0x61, 0x61, 0xdc, 0xe3, 0xf9, 0x5d, 0x1e, 0xff, 0xd5, + 0xa4, 0xa5, 0x8a, 0xb4, 0x20, 0xb9, 0x9f, 0x10, 0xd8, 0x1a, 0xc3, 0x7b, 0x60, 0xf7, 0x58, 0x9b, + 0xf7, 0x0a, 0xe5, 0xbc, 0x57, 0x7c, 0x95, 0xde, 0xb3, 0x1c, 0x5f, 0x67, 0x81, 0xf0, 0x9f, 0xe6, + 0xd9, 0x7f, 0x49, 0x9d, 0xff, 0xfa, 0xaa, 0xb5, 0xfe, 0xc1, 0x26, 0x8b, 0x25, 0x17, 0xf9, 0xc1, + 0x85, 0x5c, 0x8a, 0xa0, 0x43, 0x4d, 0x3f, 0x7c, 0x17, 0x6c, 0x3d, 0x8f, 0xb9, 0x0c, 0x17, 0xcf, + 0x3a, 0xeb, 0xd9, 0xfc, 0x59, 0x73, 0xe4, 0x95, 0x1d, 0xd6, 0xeb, 0x73, 0x6a, 0xe8, 0xee, 0x1a, + 0xd8, 0x3a, 0x15, 0xbc, 0x02, 0xb3, 0x1a, 0x6b, 0x49, 0xc1, 0x59, 0x58, 0xec, 0x70, 0x65, 0xe4, + 0x0e, 0xb4, 0x4a, 0x0c, 0x7f, 0x22, 0xb7, 0xa4, 0xa3, 0x2a, 0xdf, 0x3f, 0x38, 0x22, 0xd6, 0xe1, + 0x11, 0xb1, 0x4e, 0x8e, 0x08, 0x7a, 0x97, 0x11, 0xf4, 0x31, 0x23, 0x68, 0x3f, 0x23, 0xe8, 0x20, + 0x23, 0xe8, 0x6b, 0x46, 0xd0, 0xb7, 0x8c, 0x58, 0x27, 0x19, 0x41, 0xef, 0x8f, 0x89, 0x75, 0x70, + 0x4c, 0xac, 0xc3, 0x63, 0x62, 0xbd, 0x1a, 0xf9, 0x0d, 0xb6, 0x6d, 0xf5, 0xdf, 0xb9, 0xfd, 0x23, + 0x00, 0x00, 0xff, 0xff, 0xa2, 0x2a, 0x9a, 0xd2, 0x31, 0x05, 0x00, 0x00, } func (this *PrometheusInstantQueryResponse) Equal(that interface{}) bool { diff --git a/pkg/querier/tripperware/instantquery/instantquery.proto b/pkg/querier/tripperware/instantquery/instantquery.proto index 889b7a651b4..8a45015dd1f 100644 --- a/pkg/querier/tripperware/instantquery/instantquery.proto +++ b/pkg/querier/tripperware/instantquery/instantquery.proto @@ -4,7 +4,7 @@ package instantquery; option go_package = "instantquery"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; import "github.com/cortexproject/cortex/pkg/querier/tripperware/query.proto"; diff --git a/pkg/querier/tripperware/query.pb.go b/pkg/querier/tripperware/query.pb.go index 49588d2ac7d..f8fb8f92034 100644 --- a/pkg/querier/tripperware/query.pb.go +++ b/pkg/querier/tripperware/query.pb.go @@ -332,39 +332,38 @@ func init() { func init() { proto.RegisterFile("query.proto", fileDescriptor_5c6ac9b241082464) } var fileDescriptor_5c6ac9b241082464 = []byte{ - // 502 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xbd, 0x2d, 0x0d, 0x62, 0x5d, 0x21, 0xb4, 0x80, 0x48, 0x2b, 0x58, 0x87, 0x9c, 0x2a, - 0xa1, 0x3a, 0x52, 0x39, 0x01, 0xa7, 0xfa, 0x84, 0xc4, 0xbf, 0xe0, 0x20, 0x0e, 0x5c, 0xd0, 0x3a, - 0x1d, 0x52, 0x83, 0x97, 0xdd, 0xee, 0xae, 0xa1, 0xdc, 0x78, 0x04, 0x2e, 0x3c, 0x00, 0x37, 0x1e, - 0x84, 0x43, 0x8f, 0x39, 0x56, 0x1c, 0x2c, 0xe2, 0x5c, 0x90, 0x4f, 0x7d, 0x04, 0xe4, 0xb5, 0xdd, - 0x04, 0x88, 0x82, 0x2a, 0x6e, 0x3b, 0xf3, 0xcd, 0x6f, 0xbe, 0xc9, 0xec, 0xc6, 0xd8, 0x3d, 0x48, - 0x41, 0x7d, 0xf0, 0xa5, 0x12, 0x46, 0x10, 0xd7, 0xa8, 0x58, 0x4a, 0x50, 0xef, 0x99, 0x82, 0xcd, - 0xed, 0x51, 0x6c, 0xf6, 0xd3, 0xc8, 0x1f, 0x0a, 0xde, 0x1b, 0x89, 0x91, 0xe8, 0xd9, 0x9a, 0x28, - 0x7d, 0x65, 0x23, 0x1b, 0xd8, 0x53, 0xc5, 0x6e, 0xde, 0x99, 0x2b, 0x1f, 0x0a, 0x65, 0xe0, 0x50, - 0x2a, 0xf1, 0x1a, 0x86, 0xa6, 0x8e, 0x7a, 0xf2, 0xcd, 0xa8, 0x11, 0xa2, 0xfa, 0x50, 0xa1, 0xdd, - 0x6f, 0x08, 0xaf, 0x0f, 0x18, 0x97, 0x09, 0x0c, 0x8c, 0x02, 0xc6, 0xc9, 0x21, 0x6e, 0x25, 0x2c, - 0x82, 0x44, 0xb7, 0x51, 0x67, 0x75, 0xcb, 0xdd, 0xb9, 0xec, 0x37, 0xa0, 0xff, 0xb0, 0xcc, 0xf7, - 0x59, 0xac, 0x82, 0x07, 0x47, 0x99, 0xe7, 0x7c, 0xcf, 0xbc, 0x33, 0x19, 0x57, 0xfc, 0xee, 0x1e, - 0x93, 0x06, 0x54, 0x91, 0x79, 0x2d, 0x0e, 0x46, 0xc5, 0xc3, 0xb0, 0xf6, 0x23, 0x77, 0xf1, 0x79, - 0x6d, 0x27, 0xd1, 0xed, 0x15, 0x6b, 0x7d, 0x69, 0x66, 0x5d, 0x8d, 0x18, 0x5c, 0x2c, 0x7d, 0x4b, - 0xf4, 0x1d, 0x4b, 0x52, 0xd0, 0x61, 0x03, 0x74, 0x39, 0xbe, 0xd6, 0x57, 0x82, 0x83, 0xd9, 0x87, - 0x54, 0x87, 0xa0, 0xa5, 0x78, 0xab, 0x61, 0x60, 0x98, 0xd1, 0x24, 0x9c, 0xb5, 0x45, 0x1d, 0xb4, - 0xe5, 0xee, 0xdc, 0xf2, 0xe7, 0x56, 0xed, 0x2f, 0xc0, 0xaa, 0x6a, 0x4b, 0x07, 0x6e, 0x91, 0x79, - 0x0d, 0x3f, 0xb3, 0xfb, 0xbc, 0x82, 0xe9, 0x72, 0x90, 0x3c, 0xc1, 0x57, 0x8d, 0x30, 0x2c, 0x79, - 0x5a, 0xde, 0x31, 0x8b, 0x92, 0x46, 0xb5, 0x43, 0xac, 0x06, 0x1b, 0x45, 0xe6, 0x2d, 0x2e, 0x08, - 0x17, 0xa7, 0xc9, 0x17, 0x84, 0xaf, 0x2f, 0x54, 0xfa, 0xa0, 0x06, 0x06, 0x64, 0xbd, 0xb4, 0x7b, - 0xff, 0xf8, 0x75, 0x7f, 0xd2, 0x76, 0xda, 0xba, 0x45, 0xd0, 0x29, 0x32, 0x6f, 0xa9, 0x49, 0xb8, - 0x54, 0xed, 0xc6, 0xf8, 0x8c, 0x8e, 0xe4, 0x0a, 0x5e, 0xb3, 0x77, 0x59, 0xad, 0x25, 0xac, 0x02, - 0x72, 0x13, 0xaf, 0x9b, 0x98, 0x83, 0x36, 0x8c, 0xcb, 0x97, 0xbc, 0x7c, 0x0f, 0xa5, 0xe8, 0x9e, - 0xe6, 0x1e, 0xe9, 0xee, 0x33, 0xdc, 0xfe, 0xdb, 0xea, 0x3e, 0xb0, 0x3d, 0x50, 0x64, 0x03, 0x9f, - 0x7b, 0xcc, 0x78, 0xd5, 0xf3, 0x42, 0xb0, 0x56, 0x64, 0x1e, 0xda, 0x0e, 0x6d, 0x8a, 0xdc, 0xc0, - 0xad, 0xe7, 0xf6, 0xed, 0xd8, 0x75, 0x9d, 0x8a, 0x75, 0xb2, 0x3b, 0xf8, 0xfd, 0x1d, 0x1d, 0xa4, - 0xa0, 0xcd, 0xff, 0x36, 0x0d, 0x76, 0xc7, 0x13, 0xea, 0x1c, 0x4f, 0xa8, 0x73, 0x32, 0xa1, 0xe8, - 0x63, 0x4e, 0xd1, 0xd7, 0x9c, 0xa2, 0xa3, 0x9c, 0xa2, 0x71, 0x4e, 0xd1, 0x8f, 0x9c, 0xa2, 0x9f, - 0x39, 0x75, 0x4e, 0x72, 0x8a, 0x3e, 0x4d, 0xa9, 0x33, 0x9e, 0x52, 0xe7, 0x78, 0x4a, 0x9d, 0x17, - 0xf3, 0x1f, 0x84, 0xa8, 0x65, 0xff, 0xad, 0xb7, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x52, 0xc5, - 0xe2, 0x36, 0x33, 0x04, 0x00, 0x00, + // 495 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xbf, 0x6e, 0xd4, 0x4e, + 0x10, 0xf6, 0x26, 0xbf, 0xdc, 0x4f, 0xac, 0x23, 0x84, 0x96, 0x20, 0x2e, 0x11, 0xac, 0x8f, 0xab, + 0x22, 0x21, 0x7c, 0x52, 0xa8, 0x80, 0x2a, 0xae, 0x90, 0xf8, 0x77, 0xd8, 0x88, 0x82, 0x06, 0xad, + 0x2f, 0xa3, 0x8b, 0xc1, 0xcb, 0x6e, 0x76, 0xd7, 0x10, 0x3a, 0x1e, 0x81, 0x86, 0x07, 0xa0, 0xe3, + 0x41, 0x28, 0x52, 0x5e, 0x19, 0x51, 0x58, 0x9c, 0xaf, 0x41, 0xae, 0xf2, 0x08, 0xc8, 0x6b, 0x3b, + 0x17, 0xe0, 0x74, 0x28, 0xa2, 0xdb, 0xfd, 0xbe, 0xf9, 0xe6, 0x9b, 0x9d, 0x9d, 0xc1, 0xee, 0x41, + 0x06, 0xea, 0xbd, 0x2f, 0x95, 0x30, 0x82, 0xb8, 0x46, 0x25, 0x52, 0x82, 0x7a, 0xc7, 0x14, 0x6c, + 0x6d, 0x8c, 0xc5, 0x58, 0x58, 0x7c, 0x50, 0x9d, 0xea, 0x90, 0xad, 0x3b, 0xe3, 0xc4, 0xec, 0x67, + 0xb1, 0x3f, 0x12, 0x7c, 0x30, 0x12, 0xca, 0xc0, 0xa1, 0x54, 0xe2, 0x15, 0x8c, 0x4c, 0x73, 0x1b, + 0xc8, 0xd7, 0xe3, 0x96, 0x88, 0x9b, 0x43, 0x2d, 0xed, 0x7f, 0x45, 0x78, 0x3d, 0x62, 0x5c, 0xa6, + 0x10, 0x19, 0x05, 0x8c, 0x93, 0x43, 0xdc, 0x49, 0x59, 0x0c, 0xa9, 0xee, 0xa2, 0xde, 0xea, 0xb6, + 0xbb, 0x73, 0xd9, 0x6f, 0x85, 0xfe, 0xc3, 0x0a, 0x1f, 0xb2, 0x44, 0x05, 0x0f, 0x8e, 0x72, 0xcf, + 0xf9, 0x96, 0x7b, 0xe7, 0x32, 0xae, 0xf5, 0xbb, 0x7b, 0x4c, 0x1a, 0x50, 0x65, 0xee, 0x75, 0x38, + 0x18, 0x95, 0x8c, 0xc2, 0xc6, 0x8f, 0xdc, 0xc5, 0xff, 0x6b, 0x5b, 0x89, 0xee, 0xae, 0x58, 0xeb, + 0x4b, 0x73, 0xeb, 0xba, 0xc4, 0xe0, 0x62, 0xe5, 0x5b, 0x49, 0xdf, 0xb2, 0x34, 0x03, 0x1d, 0xb6, + 0x82, 0x3e, 0xc7, 0x57, 0x87, 0x4a, 0x70, 0x30, 0xfb, 0x90, 0xe9, 0x10, 0xb4, 0x14, 0x6f, 0x34, + 0x44, 0x86, 0x19, 0x4d, 0xc2, 0x79, 0x5a, 0xd4, 0x43, 0xdb, 0xee, 0xce, 0x4d, 0xff, 0x4c, 0x47, + 0xfd, 0x05, 0xb2, 0x3a, 0xda, 0xaa, 0x03, 0xb7, 0xcc, 0xbd, 0x56, 0x3f, 0xb7, 0xfb, 0xb4, 0x82, + 0xe9, 0x72, 0x21, 0x79, 0x82, 0xaf, 0x18, 0x61, 0x58, 0xfa, 0xb4, 0xfa, 0x4a, 0x16, 0xa7, 0x2d, + 0x6b, 0x8b, 0x58, 0x0d, 0x36, 0xcb, 0xdc, 0x5b, 0x1c, 0x10, 0x2e, 0x86, 0xc9, 0x67, 0x84, 0xaf, + 0x2d, 0x64, 0x86, 0xa0, 0x22, 0x03, 0xb2, 0x69, 0xda, 0xbd, 0xbf, 0xbc, 0xee, 0x77, 0xb5, 0xad, + 0xb6, 0x49, 0x11, 0xf4, 0xca, 0xdc, 0x5b, 0x6a, 0x12, 0x2e, 0x65, 0xfb, 0x09, 0x3e, 0xa7, 0x23, + 0xd9, 0xc0, 0x6b, 0xf6, 0x2f, 0xeb, 0xb6, 0x84, 0xf5, 0x85, 0xdc, 0xc0, 0xeb, 0x26, 0xe1, 0xa0, + 0x0d, 0xe3, 0xf2, 0x25, 0xaf, 0xe6, 0xa1, 0x22, 0xdd, 0x53, 0xec, 0x91, 0xee, 0x3f, 0xc3, 0xdd, + 0x3f, 0xad, 0xee, 0x03, 0xdb, 0x03, 0x45, 0x36, 0xf1, 0x7f, 0x8f, 0x19, 0xaf, 0x73, 0x5e, 0x08, + 0xd6, 0xca, 0xdc, 0x43, 0xb7, 0x42, 0x0b, 0x91, 0xeb, 0xb8, 0xf3, 0xdc, 0xce, 0x8e, 0x6d, 0xd7, + 0x29, 0xd9, 0x80, 0xfd, 0xe8, 0xd7, 0x39, 0x3a, 0xc8, 0x40, 0x9b, 0x7f, 0x4d, 0x1a, 0xec, 0x4e, + 0xa6, 0xd4, 0x39, 0x9e, 0x52, 0xe7, 0x64, 0x4a, 0xd1, 0x87, 0x82, 0xa2, 0x2f, 0x05, 0x45, 0x47, + 0x05, 0x45, 0x93, 0x82, 0xa2, 0xef, 0x05, 0x45, 0x3f, 0x0a, 0xea, 0x9c, 0x14, 0x14, 0x7d, 0x9c, + 0x51, 0x67, 0x32, 0xa3, 0xce, 0xf1, 0x8c, 0x3a, 0x2f, 0xce, 0xee, 0x7d, 0xdc, 0xb1, 0xdb, 0x7a, + 0xfb, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x6b, 0xc1, 0x41, 0x1a, 0x04, 0x00, 0x00, } func (this *SampleStream) Equal(that interface{}) bool { diff --git a/pkg/querier/tripperware/query.proto b/pkg/querier/tripperware/query.proto index 5a3252773ae..9d3afacf2c7 100644 --- a/pkg/querier/tripperware/query.proto +++ b/pkg/querier/tripperware/query.proto @@ -4,7 +4,7 @@ package tripperware; option go_package = "tripperware"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; option (gogoproto.marshaler_all) = true; diff --git a/pkg/querier/tripperware/queryrange/queryrange.pb.go b/pkg/querier/tripperware/queryrange/queryrange.pb.go index 240437531df..2a179187fa6 100644 --- a/pkg/querier/tripperware/queryrange/queryrange.pb.go +++ b/pkg/querier/tripperware/queryrange/queryrange.pb.go @@ -446,55 +446,55 @@ func init() { func init() { proto.RegisterFile("queryrange.proto", fileDescriptor_79b02382e213d0b2) } var fileDescriptor_79b02382e213d0b2 = []byte{ - // 767 bytes of a gzipped FileDescriptorProto + // 763 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xbd, 0x6e, 0xdb, 0x48, 0x10, 0x16, 0x45, 0xfd, 0xae, 0x0f, 0xb2, 0x6f, 0x6d, 0xdc, 0x51, 0x2a, 0x48, 0x41, 0xf0, 0x01, - 0x3a, 0xc0, 0xa6, 0x00, 0x1f, 0xae, 0xbc, 0xc3, 0x99, 0xfe, 0x81, 0x2f, 0x8d, 0x03, 0x3a, 0x55, - 0x9a, 0x60, 0x25, 0x4e, 0x28, 0xda, 0x12, 0x49, 0x2f, 0x97, 0x88, 0xd5, 0xe5, 0x11, 0x52, 0xe6, - 0x11, 0x82, 0x20, 0x0f, 0xe2, 0xd2, 0x45, 0x0a, 0x57, 0x74, 0x2c, 0x37, 0x01, 0x2b, 0x3f, 0x42, - 0xc0, 0xdd, 0xa5, 0x44, 0xdb, 0x09, 0xd2, 0x08, 0x3b, 0xb3, 0xdf, 0xcc, 0x7e, 0xdf, 0x37, 0x1c, - 0xa1, 0xb5, 0xf3, 0x18, 0xe8, 0x8c, 0x12, 0xdf, 0x05, 0x33, 0xa4, 0x01, 0x0b, 0x30, 0x5a, 0x66, - 0x3a, 0xdb, 0xae, 0xc7, 0xc6, 0xf1, 0xd0, 0x1c, 0x05, 0xd3, 0x81, 0x1b, 0xb8, 0xc1, 0x80, 0x43, - 0x86, 0xf1, 0x6b, 0x1e, 0xf1, 0x80, 0x9f, 0x44, 0x69, 0x47, 0x77, 0x83, 0xc0, 0x9d, 0xc0, 0x12, - 0xe5, 0xc4, 0x94, 0x30, 0x2f, 0xf0, 0xe5, 0x7d, 0xfb, 0xf1, 0x3d, 0xf1, 0x67, 0xf2, 0x6a, 0xaf, - 0xf0, 0xd2, 0x28, 0xa0, 0x0c, 0x2e, 0x42, 0x1a, 0x9c, 0xc2, 0x88, 0xc9, 0x68, 0x10, 0x9e, 0xb9, - 0x83, 0x8c, 0x99, 0x07, 0x74, 0xc0, 0xa8, 0x17, 0x86, 0x40, 0xdf, 0x10, 0x0a, 0x3c, 0x27, 0x9b, - 0xf4, 0x6e, 0xca, 0xe8, 0xd7, 0xe7, 0x34, 0x98, 0x02, 0x1b, 0x43, 0x1c, 0xd9, 0x70, 0x1e, 0x43, - 0xc4, 0x30, 0x46, 0x95, 0x90, 0xb0, 0xb1, 0xa6, 0x74, 0x95, 0x7e, 0xd3, 0xe6, 0x67, 0xbc, 0x81, - 0xaa, 0x11, 0x23, 0x94, 0x69, 0xe5, 0xae, 0xd2, 0x57, 0x6d, 0x11, 0xe0, 0x35, 0xa4, 0x82, 0xef, - 0x68, 0x2a, 0xcf, 0x65, 0xc7, 0xac, 0x36, 0x62, 0x10, 0x6a, 0x15, 0x9e, 0xe2, 0x67, 0xfc, 0x0f, - 0xaa, 0x33, 0x6f, 0x0a, 0x41, 0xcc, 0xb4, 0x6a, 0x57, 0xe9, 0xaf, 0xec, 0xb4, 0x4d, 0xa1, 0xcb, - 0xcc, 0x75, 0x99, 0xfb, 0x52, 0xb7, 0xd5, 0xb8, 0x4c, 0x8c, 0xd2, 0xfb, 0x1b, 0x43, 0xb1, 0xf3, - 0x9a, 0xec, 0x69, 0xce, 0x59, 0xab, 0x71, 0x3e, 0x22, 0xc0, 0x47, 0xa8, 0x35, 0x22, 0xa3, 0xb1, - 0xe7, 0xbb, 0xc7, 0x61, 0x56, 0x19, 0x69, 0x75, 0xde, 0xbb, 0x63, 0x16, 0x06, 0xb4, 0xf7, 0x00, - 0x61, 0x55, 0xb2, 0xe6, 0xf6, 0xa3, 0x3a, 0x7c, 0x80, 0xea, 0x47, 0x40, 0x1c, 0xa0, 0x91, 0xd6, - 0xe8, 0xaa, 0xfd, 0x95, 0x9d, 0x4d, 0xb3, 0xe0, 0x97, 0xf9, 0xc4, 0x1f, 0x01, 0xb6, 0xaa, 0x69, - 0x62, 0x28, 0xdb, 0x76, 0x5e, 0x2b, 0x1d, 0x62, 0x91, 0xd6, 0x14, 0x34, 0x79, 0xd0, 0xfb, 0x58, - 0x46, 0xb8, 0xd8, 0x21, 0x0a, 0x03, 0x3f, 0x02, 0xdc, 0x43, 0xb5, 0x13, 0x46, 0x58, 0x1c, 0x09, - 0x93, 0x2d, 0x94, 0x26, 0x46, 0x2d, 0xe2, 0x19, 0x5b, 0xde, 0xe0, 0x43, 0x54, 0xd9, 0x27, 0x8c, - 0x70, 0xc7, 0x1f, 0xe9, 0x5a, 0x76, 0xcc, 0x10, 0xd6, 0x6f, 0x99, 0xae, 0x34, 0x31, 0x5a, 0x0e, - 0x61, 0x64, 0x2b, 0x98, 0x7a, 0x0c, 0xa6, 0x21, 0x9b, 0xd9, 0xbc, 0x1e, 0xff, 0x8d, 0x9a, 0x07, - 0x94, 0x06, 0xf4, 0xc5, 0x2c, 0x04, 0x3e, 0xaa, 0xa6, 0xf5, 0x7b, 0x9a, 0x18, 0xeb, 0x90, 0x27, - 0x0b, 0x15, 0x4b, 0x24, 0xfe, 0x13, 0x55, 0x79, 0xc0, 0x47, 0xd9, 0xb4, 0xd6, 0xd3, 0xc4, 0x58, - 0xe5, 0x25, 0x05, 0xb8, 0x40, 0xe0, 0xc3, 0xa5, 0x83, 0x55, 0xee, 0xe0, 0x1f, 0x3f, 0x74, 0x50, - 0xe8, 0xff, 0xbe, 0x85, 0xbd, 0xcf, 0x0a, 0x6a, 0x3d, 0x94, 0x86, 0x4d, 0x84, 0x6c, 0x88, 0xe2, - 0x09, 0xe3, 0xec, 0x85, 0x59, 0xad, 0x34, 0x31, 0x10, 0x5d, 0x64, 0xed, 0x02, 0x02, 0xef, 0xa2, - 0x9a, 0x88, 0xb4, 0x32, 0x67, 0xd2, 0x7e, 0xc0, 0xe4, 0x84, 0x4c, 0xc3, 0x09, 0x9c, 0x30, 0x0a, - 0x64, 0x6a, 0xb5, 0xa4, 0x6b, 0x35, 0xd1, 0xca, 0x96, 0x85, 0xf8, 0x38, 0x1f, 0xa4, 0xca, 0x8d, - 0xdf, 0xfc, 0x89, 0x96, 0x6c, 0x5a, 0x91, 0xb0, 0x87, 0x97, 0x15, 0xed, 0x11, 0xdf, 0xc0, 0x29, - 0x6a, 0x65, 0x1f, 0x22, 0x38, 0x8b, 0xf1, 0xb7, 0x91, 0x7a, 0x06, 0x33, 0x29, 0xa7, 0x9e, 0x26, - 0x46, 0x16, 0xda, 0xd9, 0x4f, 0xb6, 0x2c, 0x70, 0xc1, 0xc0, 0x67, 0x91, 0x54, 0x80, 0x8b, 0x83, - 0x3f, 0xe0, 0x57, 0xd6, 0xaa, 0xa4, 0x9e, 0x43, 0xed, 0xfc, 0xd0, 0xfb, 0xa4, 0xa0, 0x9a, 0x00, - 0x61, 0x23, 0x5f, 0xd9, 0xec, 0x19, 0xd5, 0x6a, 0xa6, 0x89, 0x21, 0x12, 0xf9, 0xf6, 0xb6, 0xc5, - 0xf6, 0xf2, 0x8d, 0x16, 0x2c, 0xc0, 0x77, 0xc4, 0x1a, 0x77, 0x51, 0x83, 0x51, 0x32, 0x82, 0x57, - 0x9e, 0x23, 0xe7, 0x9f, 0xcf, 0x8a, 0xa7, 0xff, 0x77, 0xf0, 0xbf, 0xa8, 0x41, 0xa5, 0x1c, 0xb9, - 0xd5, 0x1b, 0x4f, 0xb6, 0x7a, 0xd7, 0x9f, 0x59, 0xbf, 0xa4, 0x89, 0xb1, 0x40, 0xda, 0x8b, 0xd3, - 0xb3, 0x4a, 0x43, 0x5d, 0xab, 0xf4, 0xb6, 0x84, 0x35, 0x85, 0x6d, 0xec, 0xa0, 0x86, 0xe3, 0x45, - 0x64, 0x38, 0x01, 0x87, 0x13, 0x6f, 0xd8, 0x8b, 0xd8, 0xfa, 0xef, 0xea, 0x56, 0x2f, 0x5d, 0xdf, - 0xea, 0xa5, 0xfb, 0x5b, 0x5d, 0x79, 0x3b, 0xd7, 0x95, 0x0f, 0x73, 0x5d, 0xb9, 0x9c, 0xeb, 0xca, - 0xd5, 0x5c, 0x57, 0xbe, 0xcc, 0x75, 0xe5, 0xeb, 0x5c, 0x2f, 0xdd, 0xcf, 0x75, 0xe5, 0xdd, 0x9d, - 0x5e, 0xba, 0xba, 0xd3, 0x4b, 0xd7, 0x77, 0x7a, 0xe9, 0x65, 0xe1, 0xff, 0x79, 0x58, 0xe3, 0xdc, - 0xfe, 0xfa, 0x16, 0x00, 0x00, 0xff, 0xff, 0x4e, 0xa5, 0x08, 0x40, 0xc6, 0x05, 0x00, 0x00, + 0x3a, 0xc0, 0x47, 0x01, 0x3e, 0x5c, 0x79, 0x41, 0x4c, 0xff, 0xc0, 0x49, 0xe3, 0x80, 0x4e, 0x95, + 0x26, 0x58, 0x89, 0x13, 0x8a, 0xb6, 0x44, 0xd2, 0xcb, 0x25, 0x62, 0x75, 0x79, 0x84, 0x94, 0x79, + 0x84, 0x20, 0xc8, 0x83, 0xb8, 0x74, 0x91, 0xc2, 0x15, 0x1d, 0xcb, 0x4d, 0xc0, 0xca, 0x8f, 0x10, + 0x70, 0x77, 0x29, 0xd1, 0x76, 0x82, 0x34, 0xc6, 0xcc, 0xec, 0x37, 0xa3, 0xef, 0xfb, 0x86, 0x63, + 0xb4, 0x76, 0x16, 0x03, 0x9d, 0x51, 0xe2, 0xbb, 0x60, 0x86, 0x34, 0x60, 0x01, 0x46, 0xcb, 0x4a, + 0x67, 0xc3, 0x0d, 0xdc, 0x80, 0x97, 0x07, 0x59, 0x24, 0x10, 0x1d, 0xdd, 0x0d, 0x02, 0x77, 0x02, + 0x03, 0x9e, 0x0d, 0xe3, 0x37, 0x03, 0x27, 0xa6, 0x84, 0x79, 0x81, 0x2f, 0xdf, 0xdb, 0x0f, 0xdf, + 0x89, 0x3f, 0x93, 0x4f, 0xbb, 0xae, 0xc7, 0xc6, 0xf1, 0xd0, 0x1c, 0x05, 0xd3, 0xc1, 0x28, 0xa0, + 0x0c, 0xce, 0x43, 0x1a, 0x9c, 0xc0, 0x88, 0xc9, 0x6c, 0x10, 0x9e, 0xba, 0x83, 0x8c, 0x80, 0x07, + 0x74, 0xc0, 0xa8, 0x17, 0x86, 0x40, 0xdf, 0x12, 0x0a, 0xbc, 0x26, 0x87, 0xf4, 0xae, 0xcb, 0xe8, + 0xf7, 0x17, 0x34, 0x98, 0x02, 0x1b, 0x43, 0x1c, 0xd9, 0x70, 0x16, 0x43, 0xc4, 0x30, 0x46, 0x95, + 0x90, 0xb0, 0xb1, 0xa6, 0x74, 0x95, 0x7e, 0xd3, 0xe6, 0x31, 0xde, 0x40, 0xd5, 0x88, 0x11, 0xca, + 0xb4, 0x72, 0x57, 0xe9, 0xab, 0xb6, 0x48, 0xf0, 0x1a, 0x52, 0xc1, 0x77, 0x34, 0x95, 0xd7, 0xb2, + 0x30, 0xeb, 0x8d, 0x18, 0x84, 0x5a, 0x85, 0x97, 0x78, 0x8c, 0xff, 0x47, 0x75, 0xe6, 0x4d, 0x21, + 0x88, 0x99, 0x56, 0xed, 0x2a, 0xfd, 0x95, 0xed, 0xb6, 0x29, 0x74, 0x99, 0xb9, 0x2e, 0x73, 0x4f, + 0xea, 0xb6, 0x1a, 0x17, 0x89, 0x51, 0xfa, 0x70, 0x6d, 0x28, 0x76, 0xde, 0x93, 0xfd, 0x34, 0xe7, + 0xac, 0xd5, 0x38, 0x1f, 0x91, 0xe0, 0x43, 0xd4, 0x1a, 0x91, 0xd1, 0xd8, 0xf3, 0xdd, 0xa3, 0x30, + 0xeb, 0x8c, 0xb4, 0x3a, 0x9f, 0xdd, 0x31, 0x0b, 0x7b, 0xd8, 0xbd, 0x87, 0xb0, 0x2a, 0xd9, 0x70, + 0xfb, 0x41, 0x1f, 0xde, 0x47, 0xf5, 0x43, 0x20, 0x0e, 0xd0, 0x48, 0x6b, 0x74, 0xd5, 0xfe, 0xca, + 0xf6, 0xa6, 0x59, 0xf0, 0xcb, 0x7c, 0xe4, 0x8f, 0x00, 0x5b, 0xd5, 0x34, 0x31, 0x94, 0x7f, 0xec, + 0xbc, 0x57, 0x3a, 0xc4, 0x22, 0xad, 0x29, 0x68, 0xf2, 0xa4, 0xf7, 0xa9, 0x8c, 0x70, 0x71, 0x42, + 0x14, 0x06, 0x7e, 0x04, 0xb8, 0x87, 0x6a, 0xc7, 0x8c, 0xb0, 0x38, 0x12, 0x26, 0x5b, 0x28, 0x4d, + 0x8c, 0x5a, 0xc4, 0x2b, 0xb6, 0x7c, 0xc1, 0x07, 0xa8, 0xb2, 0x47, 0x18, 0xe1, 0x8e, 0x3f, 0xd0, + 0xb5, 0x9c, 0x98, 0x21, 0xac, 0x3f, 0x32, 0x5d, 0x69, 0x62, 0xb4, 0x1c, 0xc2, 0xc8, 0x56, 0x30, + 0xf5, 0x18, 0x4c, 0x43, 0x36, 0xb3, 0x79, 0x3f, 0xfe, 0x0f, 0x35, 0xf7, 0x29, 0x0d, 0xe8, 0xcb, + 0x59, 0x08, 0x7c, 0x55, 0x4d, 0xeb, 0xcf, 0x34, 0x31, 0xd6, 0x21, 0x2f, 0x16, 0x3a, 0x96, 0x48, + 0xfc, 0x37, 0xaa, 0xf2, 0x84, 0xaf, 0xb2, 0x69, 0xad, 0xa7, 0x89, 0xb1, 0xca, 0x5b, 0x0a, 0x70, + 0x81, 0xc0, 0x07, 0x4b, 0x07, 0xab, 0xdc, 0xc1, 0xbf, 0x7e, 0xea, 0xa0, 0xd0, 0xff, 0x63, 0x0b, + 0x7b, 0x5f, 0x14, 0xd4, 0xba, 0x2f, 0x0d, 0x9b, 0x08, 0xd9, 0x10, 0xc5, 0x13, 0xc6, 0xd9, 0x0b, + 0xb3, 0x5a, 0x69, 0x62, 0x20, 0xba, 0xa8, 0xda, 0x05, 0x04, 0xde, 0x41, 0x35, 0x91, 0x69, 0x65, + 0xce, 0xa4, 0x7d, 0x8f, 0xc9, 0x31, 0x99, 0x86, 0x13, 0x38, 0x66, 0x14, 0xc8, 0xd4, 0x6a, 0x49, + 0xd7, 0x6a, 0x62, 0x94, 0x2d, 0x1b, 0xf1, 0x51, 0xbe, 0x48, 0x95, 0x1b, 0xbf, 0xf9, 0x0b, 0x2d, + 0xd9, 0xb6, 0x22, 0x61, 0x0f, 0x6f, 0x2b, 0xda, 0x23, 0xbe, 0x81, 0x13, 0xd4, 0xca, 0x3e, 0x44, + 0x70, 0x16, 0xeb, 0x6f, 0x23, 0xf5, 0x14, 0x66, 0x52, 0x4e, 0x3d, 0x4d, 0x8c, 0x2c, 0xb5, 0xb3, + 0x3f, 0xd9, 0xb1, 0xc0, 0x39, 0x03, 0x9f, 0x45, 0x52, 0x01, 0x2e, 0x2e, 0x7e, 0x9f, 0x3f, 0x59, + 0xab, 0x92, 0x7a, 0x0e, 0xb5, 0xf3, 0xa0, 0xf7, 0x59, 0x41, 0x35, 0x01, 0xc2, 0x46, 0x7e, 0xb2, + 0xd9, 0xcf, 0xa8, 0x56, 0x33, 0x4d, 0x0c, 0x51, 0xc8, 0xaf, 0xb7, 0x2d, 0xae, 0x97, 0x5f, 0xb4, + 0x60, 0x01, 0xbe, 0x23, 0xce, 0xb8, 0x8b, 0x1a, 0x8c, 0x92, 0x11, 0xbc, 0xf6, 0x1c, 0xb9, 0xff, + 0x7c, 0x57, 0xbc, 0xfc, 0xcc, 0xc1, 0x4f, 0x50, 0x83, 0x4a, 0x39, 0xf2, 0xaa, 0x37, 0x1e, 0x5d, + 0xf5, 0x8e, 0x3f, 0xb3, 0x7e, 0x4b, 0x13, 0x63, 0x81, 0xb4, 0x17, 0xd1, 0xf3, 0x4a, 0x43, 0x5d, + 0xab, 0xf4, 0xb6, 0x84, 0x35, 0x85, 0x6b, 0xec, 0xa0, 0x86, 0xe3, 0x45, 0x64, 0x38, 0x01, 0x87, + 0x13, 0x6f, 0xd8, 0x8b, 0xdc, 0x7a, 0x7a, 0x79, 0xa3, 0x97, 0xae, 0x6e, 0xf4, 0xd2, 0xdd, 0x8d, + 0xae, 0xbc, 0x9b, 0xeb, 0xca, 0xc7, 0xb9, 0xae, 0x5c, 0xcc, 0x75, 0xe5, 0x72, 0xae, 0x2b, 0x5f, + 0xe7, 0xba, 0xf2, 0x6d, 0xae, 0x97, 0xee, 0xe6, 0xba, 0xf2, 0xfe, 0x56, 0x2f, 0x5d, 0xde, 0xea, + 0xa5, 0xab, 0x5b, 0xbd, 0xf4, 0xaa, 0xf0, 0x6f, 0x78, 0x58, 0xe3, 0xdc, 0xfe, 0xfd, 0x1e, 0x00, + 0x00, 0xff, 0xff, 0xe0, 0x63, 0x3d, 0x6f, 0xad, 0x05, 0x00, 0x00, } func (this *PrometheusRequest) Equal(that interface{}) bool { diff --git a/pkg/querier/tripperware/queryrange/queryrange.proto b/pkg/querier/tripperware/queryrange/queryrange.proto index 67c0206d8c3..f09279693cb 100644 --- a/pkg/querier/tripperware/queryrange/queryrange.proto +++ b/pkg/querier/tripperware/queryrange/queryrange.proto @@ -4,7 +4,7 @@ package queryrange; option go_package = "queryrange"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/any.proto"; import "github.com/cortexproject/cortex/pkg/querier/tripperware/query.proto"; diff --git a/pkg/querier/tripperware/queryrange/series_test.go b/pkg/querier/tripperware/queryrange/series_test.go index e5de5ea6bf2..0ad1b67a3a2 100644 --- a/pkg/querier/tripperware/queryrange/series_test.go +++ b/pkg/querier/tripperware/queryrange/series_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/cortexpb" @@ -62,7 +63,7 @@ func Test_ResponseToSamples(t *testing.T) { require.Nil(t, set.Err()) sampleCt := 0 - for iter.Next() { + for iter.Next() != chunkenc.ValNone { ts, v := iter.At() require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].TimestampMs, ts) require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].Value, v) diff --git a/pkg/ring/kv/memberlist/kv.pb.go b/pkg/ring/kv/memberlist/kv.pb.go index 4c2eb9265b7..29030cfe9b9 100644 --- a/pkg/ring/kv/memberlist/kv.pb.go +++ b/pkg/ring/kv/memberlist/kv.pb.go @@ -139,22 +139,21 @@ func init() { func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } var fileDescriptor_2216fe83c9c12408 = []byte{ - // 236 bytes of a gzipped FileDescriptorProto + // 218 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xc8, 0x2e, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xca, 0x4d, 0xcd, 0x4d, 0x4a, 0x2d, 0xca, 0xc9, 0x2c, 0x2e, - 0x91, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, - 0xcf, 0xd7, 0x07, 0x2b, 0x49, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x55, 0xc9, - 0x9e, 0x8b, 0xd7, 0x3b, 0xb5, 0x32, 0x2c, 0x31, 0xa7, 0x34, 0x35, 0xb8, 0x24, 0xbf, 0x28, 0x55, - 0x48, 0x8f, 0x8b, 0xb5, 0x20, 0x31, 0xb3, 0xa8, 0x58, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, - 0x42, 0x0f, 0x61, 0xb6, 0x1e, 0x4c, 0x65, 0x40, 0x62, 0x66, 0x51, 0x10, 0x44, 0x99, 0x92, 0x0f, - 0x17, 0x0f, 0xb2, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, - 0x67, 0x10, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x06, 0x92, 0x96, 0x60, 0x52, 0x60, 0xd4, 0xe0, - 0x09, 0x82, 0x70, 0x40, 0xa2, 0xc9, 0xf9, 0x29, 0xa9, 0xc9, 0x12, 0xcc, 0x60, 0x95, 0x10, 0x8e, - 0x93, 0xc9, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xd8, 0xf0, - 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, - 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, - 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, 0xb0, 0x5f, 0x8c, 0x01, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x7a, 0x22, 0xdf, 0xec, 0x12, 0x01, 0x00, 0x00, + 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x0b, 0xeb, 0x83, 0x58, 0x10, 0x15, 0x4a, 0xf6, 0x5c, + 0xbc, 0xde, 0xa9, 0x95, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0xc1, 0x25, 0xf9, 0x45, 0xa9, 0x42, 0x7a, + 0x5c, 0xac, 0x05, 0x89, 0x99, 0x45, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x12, 0x7a, + 0x08, 0x23, 0xf4, 0x60, 0x2a, 0x03, 0x12, 0x33, 0x8b, 0x82, 0x20, 0xca, 0x94, 0x7c, 0xb8, 0x78, + 0x90, 0x85, 0x85, 0x04, 0xb8, 0x98, 0xb3, 0x53, 0x2b, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, + 0x40, 0x4c, 0x21, 0x11, 0x2e, 0xd6, 0x32, 0x90, 0xb4, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x4f, 0x10, + 0x84, 0x03, 0x12, 0x4d, 0xce, 0x4f, 0x49, 0x4d, 0x96, 0x60, 0x06, 0xab, 0x84, 0x70, 0x9c, 0x4c, + 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, + 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, + 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, + 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xfd, 0x62, 0x0c, 0x08, 0x00, 0x00, + 0xff, 0xff, 0xbe, 0xcc, 0xb6, 0xd8, 0xf9, 0x00, 0x00, 0x00, } func (this *KeyValueStore) Equal(that interface{}) bool { diff --git a/pkg/ring/kv/memberlist/kv.proto b/pkg/ring/kv/memberlist/kv.proto index cc5f12463b3..700b2f53054 100644 --- a/pkg/ring/kv/memberlist/kv.proto +++ b/pkg/ring/kv/memberlist/kv.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package memberlist; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/pkg/ring/ring.pb.go b/pkg/ring/ring.pb.go index 453ac4c2e40..bba74142c5a 100644 --- a/pkg/ring/ring.pb.go +++ b/pkg/ring/ring.pb.go @@ -210,34 +210,33 @@ func init() { func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x6e, 0xd3, 0x40, - 0x18, 0x84, 0xf7, 0x8f, 0x37, 0xae, 0xf3, 0x87, 0x56, 0xd6, 0x16, 0x21, 0x53, 0xa1, 0xc5, 0xea, - 0xc9, 0x20, 0xe1, 0x8a, 0xc0, 0x01, 0x21, 0x71, 0x68, 0xa9, 0x41, 0xb6, 0xa2, 0x50, 0x99, 0xa8, - 0x57, 0xe4, 0x24, 0x8b, 0xb1, 0x4a, 0xec, 0xca, 0xde, 0x20, 0x95, 0x13, 0x8f, 0xc0, 0x0b, 0x70, - 0xe7, 0x51, 0x7a, 0xcc, 0x09, 0xf5, 0x84, 0x88, 0x73, 0xe1, 0xd8, 0x47, 0x40, 0xbb, 0x6e, 0x65, - 0x72, 0x9b, 0xcf, 0x33, 0xff, 0x8c, 0x2d, 0x19, 0xb1, 0xcc, 0xf2, 0xd4, 0x3f, 0x2f, 0x0b, 0x59, - 0x30, 0xaa, 0xf4, 0xde, 0x93, 0x34, 0x93, 0x9f, 0x16, 0x13, 0x7f, 0x5a, 0xcc, 0x0f, 0xd2, 0x22, - 0x2d, 0x0e, 0xb4, 0x39, 0x59, 0x7c, 0xd4, 0xa4, 0x41, 0xab, 0xe6, 0x68, 0xff, 0x07, 0x20, 0x3d, - 0x16, 0xd5, 0x94, 0xbd, 0xc2, 0x5e, 0x96, 0xa7, 0xa2, 0x92, 0xa2, 0xac, 0x1c, 0x70, 0x0d, 0xaf, - 0x3f, 0xb8, 0xef, 0xeb, 0x76, 0x65, 0xfb, 0xe1, 0xad, 0x17, 0xe4, 0xb2, 0xbc, 0x38, 0xa2, 0x97, - 0xbf, 0x1f, 0x92, 0xb8, 0xbd, 0xd8, 0x3b, 0xc1, 0x9d, 0xcd, 0x08, 0xb3, 0xd1, 0x38, 0x13, 0x17, - 0x0e, 0xb8, 0xe0, 0xf5, 0x62, 0x25, 0x99, 0x87, 0xdd, 0x2f, 0xc9, 0xe7, 0x85, 0x70, 0x3a, 0x2e, - 0x78, 0xfd, 0x01, 0x6b, 0xea, 0xc3, 0xbc, 0x92, 0x49, 0x3e, 0x15, 0x6a, 0x26, 0x6e, 0x02, 0x2f, - 0x3b, 0x2f, 0x20, 0xa2, 0x56, 0xc7, 0x36, 0xf6, 0x7f, 0x01, 0xde, 0xf9, 0x3f, 0xc1, 0x18, 0xd2, - 0x64, 0x36, 0x2b, 0x6f, 0x7a, 0xb5, 0x66, 0x0f, 0xb0, 0x27, 0xb3, 0xb9, 0xa8, 0x64, 0x32, 0x3f, - 0xd7, 0xe5, 0x46, 0xdc, 0x3e, 0x60, 0x8f, 0xb0, 0x5b, 0xc9, 0x44, 0x0a, 0xc7, 0x70, 0xc1, 0xdb, - 0x19, 0xec, 0x6e, 0xce, 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, 0x26, 0xf2, - 0xca, 0x31, 0x5d, 0xc3, 0xdb, 0x8e, 0x6f, 0x48, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, 0x35, 0xa3, - 0x4a, 0xb3, 0xa7, 0x78, 0xb7, 0x14, 0x69, 0xa6, 0xbe, 0x58, 0xcc, 0x3e, 0xb4, 0xfb, 0x96, 0xde, - 0xdf, 0x6d, 0xbd, 0xf1, 0xad, 0x15, 0x51, 0x8b, 0xda, 0xdd, 0x88, 0x5a, 0x5d, 0xdb, 0x7c, 0x3c, - 0xc4, 0xed, 0x8d, 0x57, 0x60, 0x88, 0xe6, 0xe1, 0xeb, 0x71, 0x78, 0x1a, 0xd8, 0x84, 0xf5, 0x71, - 0x6b, 0x18, 0x1c, 0x9e, 0x86, 0xa3, 0xb7, 0x36, 0x28, 0x38, 0x09, 0x46, 0xc7, 0x0a, 0x3a, 0x0a, - 0xa2, 0x77, 0xe1, 0x48, 0x81, 0xc1, 0x2c, 0xa4, 0xc3, 0xe0, 0xcd, 0xd8, 0xa6, 0x47, 0xcf, 0x97, - 0x2b, 0x4e, 0xae, 0x56, 0x9c, 0x5c, 0xaf, 0x38, 0x7c, 0xab, 0x39, 0xfc, 0xac, 0x39, 0x5c, 0xd6, - 0x1c, 0x96, 0x35, 0x87, 0x3f, 0x35, 0x87, 0xbf, 0x35, 0x27, 0xd7, 0x35, 0x87, 0xef, 0x6b, 0x4e, - 0x96, 0x6b, 0x4e, 0xae, 0xd6, 0x9c, 0x4c, 0x4c, 0xfd, 0x0f, 0x3c, 0xfb, 0x17, 0x00, 0x00, 0xff, - 0xff, 0x97, 0x76, 0x41, 0xaf, 0x46, 0x02, 0x00, 0x00, + // 409 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x8a, 0xd3, 0x50, + 0x18, 0x85, 0xef, 0x9f, 0xdc, 0x64, 0xd2, 0xbf, 0xce, 0x10, 0xee, 0x0c, 0x12, 0x07, 0xb9, 0x86, + 0x59, 0x45, 0x17, 0x15, 0xab, 0x0b, 0x11, 0x5c, 0xcc, 0x38, 0x51, 0x12, 0x4a, 0x1d, 0x62, 0x99, + 0xad, 0xc4, 0xe9, 0x25, 0x84, 0xb1, 0x49, 0x49, 0xae, 0x42, 0x5d, 0xf9, 0x08, 0xbe, 0x80, 0x7b, + 0x1f, 0xa5, 0xcb, 0xae, 0xa4, 0x2b, 0xb1, 0xe9, 0xc6, 0x65, 0x1f, 0x41, 0x6e, 0xd2, 0x92, 0xe9, + 0xee, 0x9c, 0xff, 0x9c, 0x9c, 0x2f, 0x81, 0x20, 0x16, 0x69, 0x96, 0xf4, 0xa6, 0x45, 0x2e, 0x73, + 0x46, 0x95, 0x3e, 0x3d, 0x49, 0xf2, 0x24, 0xaf, 0x0f, 0x4f, 0x95, 0x6a, 0xb2, 0xb3, 0x9f, 0x80, + 0xf4, 0x52, 0x94, 0x37, 0xec, 0x35, 0x76, 0xd2, 0x2c, 0x11, 0xa5, 0x14, 0x45, 0xe9, 0x80, 0xab, + 0x7b, 0xdd, 0xfe, 0x83, 0x5e, 0x3d, 0xa2, 0xe2, 0x5e, 0xb0, 0xcb, 0xfc, 0x4c, 0x16, 0xb3, 0x0b, + 0x3a, 0xff, 0xf3, 0x88, 0x44, 0xed, 0x13, 0xa7, 0x57, 0x78, 0xb4, 0x5f, 0x61, 0x36, 0xea, 0xb7, + 0x62, 0xe6, 0x80, 0x0b, 0x5e, 0x27, 0x52, 0x92, 0x79, 0x68, 0x7c, 0x8d, 0x3f, 0x7f, 0x11, 0x8e, + 0xe6, 0x82, 0xd7, 0xed, 0xb3, 0x66, 0x3e, 0xc8, 0x4a, 0x19, 0x67, 0x37, 0x42, 0x61, 0xa2, 0xa6, + 0xf0, 0x4a, 0x7b, 0x09, 0x21, 0xb5, 0x34, 0x5b, 0x3f, 0xfb, 0x0d, 0x78, 0xef, 0x6e, 0x83, 0x31, + 0xa4, 0xf1, 0x78, 0x5c, 0x6c, 0x77, 0x6b, 0xcd, 0x1e, 0x62, 0x47, 0xa6, 0x13, 0x51, 0xca, 0x78, + 0x32, 0xad, 0xc7, 0xf5, 0xa8, 0x3d, 0xb0, 0xc7, 0x68, 0x94, 0x32, 0x96, 0xc2, 0xd1, 0x5d, 0xf0, + 0x8e, 0xfa, 0xc7, 0xfb, 0xd8, 0x0f, 0x2a, 0x8a, 0x9a, 0x06, 0xbb, 0x8f, 0xa6, 0xcc, 0x6f, 0x45, + 0x56, 0x3a, 0xa6, 0xab, 0x7b, 0x87, 0xd1, 0xd6, 0x29, 0xe8, 0xb7, 0x3c, 0x13, 0xce, 0x41, 0x03, + 0x55, 0x9a, 0x3d, 0xc3, 0x93, 0x42, 0x24, 0xa9, 0xfa, 0x62, 0x31, 0xfe, 0xd8, 0xf2, 0xad, 0x9a, + 0x7f, 0xdc, 0x66, 0xa3, 0x5d, 0x14, 0x52, 0x8b, 0xda, 0x46, 0x48, 0x2d, 0xc3, 0x36, 0x9f, 0x0c, + 0xf0, 0x70, 0xef, 0x15, 0x18, 0xa2, 0x79, 0xfe, 0x66, 0x14, 0x5c, 0xfb, 0x36, 0x61, 0x5d, 0x3c, + 0x18, 0xf8, 0xe7, 0xd7, 0xc1, 0xf0, 0x9d, 0x0d, 0xca, 0x5c, 0xf9, 0xc3, 0x4b, 0x65, 0x34, 0x65, + 0xc2, 0xf7, 0xc1, 0x50, 0x19, 0x9d, 0x59, 0x48, 0x07, 0xfe, 0xdb, 0x91, 0x4d, 0x2f, 0x5e, 0x2c, + 0x56, 0x9c, 0x2c, 0x57, 0x9c, 0x6c, 0x56, 0x1c, 0xbe, 0x57, 0x1c, 0x7e, 0x55, 0x1c, 0xe6, 0x15, + 0x87, 0x45, 0xc5, 0xe1, 0x6f, 0xc5, 0xe1, 0x5f, 0xc5, 0xc9, 0xa6, 0xe2, 0xf0, 0x63, 0xcd, 0xc9, + 0x62, 0xcd, 0xc9, 0x72, 0xcd, 0xc9, 0x27, 0xb3, 0xfe, 0x07, 0x9e, 0xff, 0x0f, 0x00, 0x00, 0xff, + 0xff, 0xd3, 0x1c, 0x09, 0x3a, 0x2d, 0x02, 0x00, 0x00, } func (x InstanceState) String() string { diff --git a/pkg/ring/ring.proto b/pkg/ring/ring.proto index 8f464dc2c8c..5dfeea8fad6 100644 --- a/pkg/ring/ring.proto +++ b/pkg/ring/ring.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package ring; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 72559f42a04..0e1d99ffe03 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/value" @@ -43,6 +44,10 @@ type PusherAppender struct { evaluationDelay time.Duration } +func (a *PusherAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram) (storage.SeriesRef, error) { + return 0, errors.New("querying native histograms is not supported") +} + func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.labels = append(a.labels, l) @@ -158,7 +163,7 @@ func EngineQueryFunc(engine v1.QueryEngine, q storage.Queryable, overrides Rules return v, nil case promql.Scalar: return promql.Vector{promql.Sample{ - Point: promql.Point(v), + Point: promql.Point{T: v.T, V: v.V}, Metric: labels.Labels{}, }}, nil default: diff --git a/pkg/ruler/ruler.pb.go b/pkg/ruler/ruler.pb.go index 1d3501eb20f..e427a077629 100644 --- a/pkg/ruler/ruler.pb.go +++ b/pkg/ruler/ruler.pb.go @@ -380,50 +380,50 @@ func init() { func init() { proto.RegisterFile("ruler.proto", fileDescriptor_9ecbec0a4cfddea6) } var fileDescriptor_9ecbec0a4cfddea6 = []byte{ - // 677 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcf, 0x4f, 0x13, 0x41, - 0x14, 0xde, 0x29, 0x6c, 0x69, 0xa7, 0x88, 0xc9, 0x50, 0xcd, 0xda, 0x98, 0x29, 0xa9, 0x17, 0x62, - 0xc2, 0x92, 0x20, 0x89, 0xf1, 0x80, 0x66, 0x09, 0xe8, 0xc5, 0x83, 0x59, 0xd4, 0x2b, 0x99, 0xb6, - 0xc3, 0xb2, 0xba, 0xec, 0xac, 0x33, 0xb3, 0x0d, 0x47, 0xfe, 0x04, 0x8e, 0x9e, 0x3d, 0xf9, 0xa7, - 0x70, 0xe4, 0x48, 0x8c, 0x41, 0x59, 0x2e, 0x1e, 0xf9, 0x13, 0xcc, 0xfc, 0x58, 0xdb, 0x2a, 0x26, - 0x6e, 0x0c, 0x97, 0x76, 0xdf, 0xbc, 0xf7, 0x7d, 0xdf, 0xbc, 0xef, 0xcd, 0x0c, 0x6c, 0xf1, 0x3c, - 0xa1, 0xdc, 0xcf, 0x38, 0x93, 0x0c, 0xb9, 0x3a, 0xe8, 0xac, 0x44, 0xb1, 0xdc, 0xcf, 0xfb, 0xfe, - 0x80, 0x1d, 0xac, 0x46, 0x2c, 0x62, 0xab, 0x3a, 0xdb, 0xcf, 0xf7, 0x74, 0xa4, 0x03, 0xfd, 0x65, - 0x50, 0x1d, 0x1c, 0x31, 0x16, 0x25, 0x74, 0x5c, 0x35, 0xcc, 0x39, 0x91, 0x31, 0x4b, 0x6d, 0xbe, - 0xfb, 0x7b, 0x5e, 0xc6, 0x07, 0x54, 0x48, 0x72, 0x90, 0xd9, 0x82, 0x27, 0x13, 0x7a, 0x03, 0xc6, - 0x25, 0x3d, 0xcc, 0x38, 0x7b, 0x47, 0x07, 0xd2, 0x46, 0xab, 0xd9, 0xfb, 0xa8, 0x4c, 0xf4, 0xed, - 0x87, 0x85, 0x6e, 0xfc, 0x0b, 0x54, 0x77, 0xa5, 0x7f, 0x45, 0xd6, 0x37, 0xff, 0x06, 0xde, 0x5b, - 0x80, 0xf3, 0xa1, 0x0a, 0x43, 0xfa, 0x21, 0xa7, 0x42, 0xf6, 0x9e, 0xc2, 0x5b, 0x36, 0x16, 0x19, - 0x4b, 0x05, 0x45, 0x2b, 0xb0, 0x1e, 0x71, 0x96, 0x67, 0xc2, 0x03, 0x4b, 0x33, 0xcb, 0xad, 0xb5, - 0x3b, 0xbe, 0xf1, 0xeb, 0x85, 0x5a, 0xdc, 0x91, 0x44, 0xd2, 0x2d, 0x2a, 0x06, 0xa1, 0x2d, 0xea, - 0x7d, 0xaa, 0xc1, 0x85, 0xe9, 0x14, 0x7a, 0x08, 0x5d, 0x9d, 0xf4, 0xc0, 0x12, 0x58, 0x6e, 0xad, - 0xb5, 0x7d, 0xa3, 0xaf, 0x64, 0x74, 0xa5, 0xc6, 0x9b, 0x12, 0xf4, 0x18, 0xce, 0x93, 0x81, 0x8c, - 0x47, 0x74, 0x57, 0x17, 0x79, 0x35, 0xad, 0xd9, 0xb6, 0x9a, 0x0a, 0x32, 0x96, 0x6c, 0x99, 0x4a, - 0xbd, 0x5d, 0xf4, 0x16, 0x2e, 0xd2, 0x11, 0x49, 0x72, 0x6d, 0xfb, 0xeb, 0xd2, 0x5e, 0x6f, 0x46, - 0x4b, 0x76, 0x7c, 0x33, 0x00, 0xbf, 0x1c, 0x80, 0xff, 0xab, 0x62, 0xb3, 0x71, 0x72, 0xde, 0x75, - 0x8e, 0xbf, 0x75, 0x41, 0x78, 0x1d, 0x01, 0xda, 0x81, 0x68, 0xbc, 0xbc, 0x65, 0xc7, 0xea, 0xcd, - 0x6a, 0xda, 0x7b, 0x7f, 0xd0, 0x96, 0x05, 0x86, 0xf5, 0xa3, 0x62, 0xbd, 0x06, 0xde, 0xfb, 0x5a, - 0x33, 0x2e, 0x8f, 0x3d, 0x7a, 0x00, 0x67, 0x55, 0x8b, 0xd6, 0xa2, 0xdb, 0x13, 0x16, 0xe9, 0x56, - 0x75, 0x12, 0xb5, 0xa1, 0x2b, 0x14, 0xc2, 0xab, 0x2d, 0x81, 0xe5, 0x66, 0x68, 0x02, 0x74, 0x17, - 0xd6, 0xf7, 0x29, 0x49, 0xe4, 0xbe, 0x6e, 0xb6, 0x19, 0xda, 0x08, 0xdd, 0x87, 0xcd, 0x84, 0x08, - 0xb9, 0xcd, 0x39, 0xe3, 0x7a, 0xc3, 0xcd, 0x70, 0xbc, 0xa0, 0xc6, 0x4a, 0x12, 0xca, 0xa5, 0xf0, - 0xdc, 0xa9, 0xb1, 0x06, 0x6a, 0x71, 0x62, 0xac, 0xa6, 0xe8, 0x6f, 0xf6, 0xd6, 0x6f, 0xc6, 0xde, - 0xb9, 0xff, 0xb3, 0xf7, 0xc8, 0x85, 0x0b, 0xd3, 0x7d, 0x8c, 0xad, 0x03, 0x93, 0xd6, 0xa5, 0xb0, - 0x9e, 0x90, 0x3e, 0x4d, 0xca, 0x73, 0xb6, 0xe8, 0x97, 0x77, 0xcc, 0x7f, 0xa9, 0xd6, 0x5f, 0x91, - 0x98, 0x6f, 0x06, 0x4a, 0xeb, 0xcb, 0x79, 0xb7, 0xd2, 0x1d, 0x35, 0xf8, 0x60, 0x48, 0x32, 0x49, - 0x79, 0x68, 0x55, 0xd0, 0x21, 0x6c, 0x91, 0x34, 0x65, 0x52, 0x6f, 0x53, 0x78, 0x33, 0x37, 0x2a, - 0x3a, 0x29, 0xa5, 0xfa, 0x57, 0x3e, 0x51, 0x7d, 0x10, 0x40, 0x68, 0x02, 0x14, 0xc0, 0xa6, 0xbd, - 0x6d, 0x44, 0x7a, 0x6e, 0x85, 0x59, 0x36, 0x0c, 0x2c, 0x90, 0xe8, 0x19, 0x6c, 0xec, 0xc5, 0x9c, - 0x0e, 0x15, 0x43, 0x95, 0xd3, 0x30, 0xa7, 0x51, 0x81, 0x44, 0xdb, 0xb0, 0xc5, 0xa9, 0x60, 0xc9, - 0xc8, 0x70, 0xcc, 0x55, 0xe0, 0x80, 0x25, 0x30, 0x90, 0xe8, 0x39, 0x9c, 0x57, 0x87, 0x7b, 0x57, - 0xd0, 0x54, 0x2a, 0x9e, 0x46, 0x15, 0x1e, 0x85, 0xdc, 0xa1, 0xa9, 0x34, 0xdb, 0x19, 0x91, 0x24, - 0x1e, 0xee, 0xe6, 0xa9, 0x8c, 0x13, 0xaf, 0x59, 0x85, 0x46, 0x03, 0xdf, 0x28, 0xdc, 0xda, 0x06, - 0x74, 0xd5, 0xe5, 0xe5, 0x68, 0xdd, 0x7c, 0x08, 0xb4, 0x38, 0xf1, 0x86, 0x95, 0xaf, 0x6d, 0xa7, - 0x3d, 0xbd, 0x68, 0x9e, 0xdc, 0x9e, 0xb3, 0xb9, 0x7e, 0x7a, 0x81, 0x9d, 0xb3, 0x0b, 0xec, 0x5c, - 0x5d, 0x60, 0x70, 0x54, 0x60, 0xf0, 0xb9, 0xc0, 0xe0, 0xa4, 0xc0, 0xe0, 0xb4, 0xc0, 0xe0, 0x7b, - 0x81, 0xc1, 0x8f, 0x02, 0x3b, 0x57, 0x05, 0x06, 0xc7, 0x97, 0xd8, 0x39, 0xbd, 0xc4, 0xce, 0xd9, - 0x25, 0x76, 0xfa, 0x75, 0xbd, 0xbd, 0x47, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x29, 0x57, 0x9d, - 0xdd, 0xd2, 0x06, 0x00, 0x00, + // 674 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x4f, 0x4f, 0x13, 0x4f, + 0x18, 0xde, 0x29, 0x6c, 0x69, 0xa7, 0xfc, 0xf8, 0x25, 0x43, 0x35, 0x6b, 0x63, 0xa6, 0xa4, 0x5e, + 0x88, 0x89, 0xdb, 0x04, 0x49, 0x8c, 0x07, 0x34, 0x4b, 0x40, 0x2f, 0x1e, 0xcc, 0xa2, 0x5e, 0xc9, + 0xb4, 0x1d, 0x96, 0xd5, 0x65, 0x67, 0x9d, 0x99, 0x6d, 0x38, 0xf2, 0x11, 0x38, 0x7a, 0xf6, 0xe4, + 0x47, 0xe1, 0xc8, 0x91, 0x18, 0x83, 0xb2, 0x5c, 0x3c, 0xf2, 0x11, 0xcc, 0xfc, 0x59, 0xdb, 0x2a, + 0x26, 0x6e, 0x0c, 0x97, 0x76, 0xdf, 0x3f, 0xcf, 0xf3, 0xbe, 0xef, 0xf3, 0xce, 0x0c, 0x6c, 0xf1, + 0x3c, 0xa1, 0xdc, 0xcf, 0x38, 0x93, 0x0c, 0xb9, 0xda, 0xe8, 0xb4, 0x23, 0x16, 0x31, 0xed, 0xe9, + 0xab, 0x2f, 0x13, 0xec, 0xe0, 0x88, 0xb1, 0x28, 0xa1, 0x7d, 0x6d, 0x0d, 0xf2, 0xbd, 0xfe, 0x28, + 0xe7, 0x44, 0xc6, 0x2c, 0xb5, 0xf1, 0xee, 0xaf, 0x71, 0x19, 0x1f, 0x50, 0x21, 0xc9, 0x41, 0x66, + 0x13, 0x1e, 0x47, 0xb1, 0xdc, 0xcf, 0x07, 0xfe, 0x90, 0x1d, 0xf4, 0x87, 0x8c, 0x4b, 0x7a, 0x98, + 0x71, 0xf6, 0x96, 0x0e, 0xa5, 0xb5, 0xfa, 0xd9, 0xbb, 0xa8, 0x0c, 0x0c, 0xec, 0x87, 0x85, 0x6e, + 0xfc, 0x0d, 0x54, 0x37, 0xaf, 0x7f, 0x45, 0x36, 0x30, 0xff, 0x06, 0xde, 0x5b, 0x82, 0x8b, 0xa1, + 0x32, 0x43, 0xfa, 0x3e, 0xa7, 0x42, 0xf6, 0x9e, 0xc0, 0xff, 0xac, 0x2d, 0x32, 0x96, 0x0a, 0x8a, + 0x1e, 0xc0, 0x7a, 0xc4, 0x59, 0x9e, 0x09, 0x0f, 0xac, 0xcc, 0xad, 0xb6, 0xd6, 0x6e, 0xf9, 0x46, + 0x96, 0xe7, 0xca, 0xb9, 0x23, 0x89, 0xa4, 0x5b, 0x54, 0x0c, 0x43, 0x9b, 0xd4, 0xfb, 0x58, 0x83, + 0x4b, 0xb3, 0x21, 0x74, 0x1f, 0xba, 0x3a, 0xe8, 0x81, 0x15, 0xb0, 0xda, 0x5a, 0x6b, 0xfb, 0xa6, + 0xbe, 0x2a, 0xa3, 0x33, 0x35, 0xde, 0xa4, 0xa0, 0x47, 0x70, 0x91, 0x0c, 0x65, 0x3c, 0xa6, 0xbb, + 0x3a, 0xc9, 0xab, 0xe9, 0x9a, 0x6d, 0x5b, 0x53, 0x41, 0x26, 0x25, 0x5b, 0x26, 0x53, 0xb7, 0x8b, + 0xde, 0xc0, 0x65, 0x3a, 0x26, 0x49, 0xae, 0x65, 0x7f, 0x55, 0xca, 0xeb, 0xcd, 0xe9, 0x92, 0x1d, + 0xdf, 0x2c, 0xc0, 0x2f, 0x17, 0xe0, 0xff, 0xcc, 0xd8, 0x6c, 0x9c, 0x9c, 0x77, 0x9d, 0xe3, 0xaf, + 0x5d, 0x10, 0x5e, 0x47, 0x80, 0x76, 0x20, 0x9a, 0xb8, 0xb7, 0xec, 0x5a, 0xbd, 0x79, 0x4d, 0x7b, + 0xe7, 0x37, 0xda, 0x32, 0xc1, 0xb0, 0x7e, 0x50, 0xac, 0xd7, 0xc0, 0x7b, 0x5f, 0x6a, 0x46, 0xe5, + 0x89, 0x46, 0xf7, 0xe0, 0xbc, 0x1a, 0xd1, 0x4a, 0xf4, 0xff, 0x94, 0x44, 0x7a, 0x54, 0x1d, 0x44, + 0x6d, 0xe8, 0x0a, 0x85, 0xf0, 0x6a, 0x2b, 0x60, 0xb5, 0x19, 0x1a, 0x03, 0xdd, 0x86, 0xf5, 0x7d, + 0x4a, 0x12, 0xb9, 0xaf, 0x87, 0x6d, 0x86, 0xd6, 0x42, 0x77, 0x61, 0x33, 0x21, 0x42, 0x6e, 0x73, + 0xce, 0xb8, 0x6e, 0xb8, 0x19, 0x4e, 0x1c, 0x6a, 0xad, 0x24, 0xa1, 0x5c, 0x0a, 0xcf, 0x9d, 0x59, + 0x6b, 0xa0, 0x9c, 0x53, 0x6b, 0x35, 0x49, 0x7f, 0x92, 0xb7, 0x7e, 0x33, 0xf2, 0x2e, 0xfc, 0x9b, + 0xbc, 0x47, 0x2e, 0x5c, 0x9a, 0x9d, 0x63, 0x22, 0x1d, 0x98, 0x96, 0x2e, 0x85, 0xf5, 0x84, 0x0c, + 0x68, 0x52, 0x9e, 0xb3, 0x65, 0xbf, 0xbc, 0x63, 0xfe, 0x0b, 0xe5, 0x7f, 0x49, 0x62, 0xbe, 0x19, + 0xa8, 0x5a, 0x9f, 0xcf, 0xbb, 0x95, 0xee, 0xa8, 0xc1, 0x07, 0x23, 0x92, 0x49, 0xca, 0x43, 0x5b, + 0x05, 0x1d, 0xc2, 0x16, 0x49, 0x53, 0x26, 0x75, 0x9b, 0xc2, 0x9b, 0xbb, 0xd1, 0xa2, 0xd3, 0xa5, + 0xd4, 0xfc, 0x4a, 0x27, 0xaa, 0x0f, 0x02, 0x08, 0x8d, 0x81, 0x02, 0xd8, 0xb4, 0xb7, 0x8d, 0x48, + 0xcf, 0xad, 0xb0, 0xcb, 0x86, 0x81, 0x05, 0x12, 0x3d, 0x85, 0x8d, 0xbd, 0x98, 0xd3, 0x91, 0x62, + 0xa8, 0x72, 0x1a, 0x16, 0x34, 0x2a, 0x90, 0x68, 0x1b, 0xb6, 0x38, 0x15, 0x2c, 0x19, 0x1b, 0x8e, + 0x85, 0x0a, 0x1c, 0xb0, 0x04, 0x06, 0x12, 0x3d, 0x83, 0x8b, 0xea, 0x70, 0xef, 0x0a, 0x9a, 0x4a, + 0xc5, 0xd3, 0xa8, 0xc2, 0xa3, 0x90, 0x3b, 0x34, 0x95, 0xa6, 0x9d, 0x31, 0x49, 0xe2, 0xd1, 0x6e, + 0x9e, 0xca, 0x38, 0xf1, 0x9a, 0x55, 0x68, 0x34, 0xf0, 0xb5, 0xc2, 0xad, 0x6d, 0x40, 0x57, 0x5d, + 0x5e, 0x8e, 0xd6, 0xcd, 0x87, 0x40, 0xcb, 0x53, 0x6f, 0x58, 0xf9, 0xda, 0x76, 0xda, 0xb3, 0x4e, + 0xf3, 0xe4, 0xf6, 0x9c, 0xcd, 0xf5, 0xd3, 0x0b, 0xec, 0x9c, 0x5d, 0x60, 0xe7, 0xea, 0x02, 0x83, + 0xa3, 0x02, 0x83, 0x4f, 0x05, 0x06, 0x27, 0x05, 0x06, 0xa7, 0x05, 0x06, 0xdf, 0x0a, 0x0c, 0xbe, + 0x17, 0xd8, 0xb9, 0x2a, 0x30, 0x38, 0xbe, 0xc4, 0xce, 0xe9, 0x25, 0x76, 0xce, 0x2e, 0xb1, 0x33, + 0xa8, 0xeb, 0xf6, 0x1e, 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, 0x71, 0x5b, 0x12, 0x62, 0xb9, 0x06, + 0x00, 0x00, } func (this *RulesRequest) Equal(that interface{}) bool { diff --git a/pkg/ruler/ruler.proto b/pkg/ruler/ruler.proto index 2b66db41208..6cc6d3f1da6 100644 --- a/pkg/ruler/ruler.proto +++ b/pkg/ruler/ruler.proto @@ -5,7 +5,7 @@ syntax = "proto3"; package ruler; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go index 8f76eaa471b..778448f6633 100644 --- a/pkg/ruler/rulespb/rules.pb.go +++ b/pkg/ruler/rulespb/rules.pb.go @@ -198,37 +198,37 @@ func init() { func init() { proto.RegisterFile("rules.proto", fileDescriptor_8e722d3e922f0937) } var fileDescriptor_8e722d3e922f0937 = []byte{ - // 476 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0x3f, 0x6f, 0xd3, 0x40, - 0x1c, 0xf5, 0x35, 0x8e, 0x63, 0x5f, 0x54, 0x11, 0x1d, 0x15, 0x72, 0x2b, 0x74, 0x89, 0x2a, 0x21, - 0x65, 0xe1, 0x22, 0x15, 0x31, 0x30, 0x20, 0x94, 0xa8, 0x12, 0x52, 0xc4, 0x80, 0x3c, 0xb2, 0x9d, - 0x9d, 0xab, 0x31, 0xb8, 0xbe, 0xd3, 0xf9, 0x8c, 0xda, 0x8d, 0x8f, 0xc0, 0xc8, 0x47, 0xe0, 0xa3, - 0x74, 0xcc, 0x58, 0x31, 0x14, 0xe2, 0x2c, 0x8c, 0x95, 0xf8, 0x00, 0xa0, 0xfb, 0x63, 0x5a, 0xc1, - 0x02, 0x03, 0x53, 0x7e, 0xef, 0xde, 0xbd, 0xbc, 0xf7, 0x7b, 0x67, 0x38, 0x94, 0x4d, 0xc9, 0x6a, - 0x22, 0x24, 0x57, 0x1c, 0xf5, 0x0d, 0x38, 0x78, 0x98, 0x17, 0xea, 0x75, 0x93, 0x92, 0x8c, 0x9f, - 0xce, 0x72, 0x9e, 0xf3, 0x99, 0x61, 0xd3, 0xe6, 0xc4, 0x20, 0x03, 0xcc, 0x64, 0x55, 0x07, 0x38, - 0xe7, 0x3c, 0x2f, 0xd9, 0xcd, 0xad, 0x55, 0x23, 0xa9, 0x2a, 0x78, 0xe5, 0xf8, 0xfd, 0xdf, 0x79, - 0x5a, 0x9d, 0x3b, 0xea, 0xc9, 0x2d, 0xa7, 0x8c, 0x4b, 0xc5, 0xce, 0x84, 0xe4, 0x6f, 0x58, 0xa6, - 0x1c, 0x9a, 0x89, 0xb7, 0x79, 0x47, 0xa4, 0x6e, 0xb0, 0xd2, 0xc3, 0x1f, 0x00, 0xee, 0x26, 0x4d, - 0xc9, 0x9e, 0x4b, 0xde, 0x88, 0x63, 0x56, 0x67, 0x08, 0x41, 0xbf, 0xa2, 0xa7, 0x2c, 0x06, 0x13, - 0x30, 0x8d, 0x12, 0x33, 0xa3, 0xfb, 0x30, 0xd2, 0xbf, 0xb5, 0xa0, 0x19, 0x8b, 0x77, 0x0c, 0x71, - 0x73, 0x80, 0x9e, 0xc1, 0xb0, 0xa8, 0x14, 0x93, 0xef, 0x68, 0x19, 0xf7, 0x26, 0x60, 0x3a, 0x3c, - 0xda, 0x27, 0x36, 0x2c, 0xe9, 0xc2, 0x92, 0x63, 0xb7, 0xcc, 0x22, 0xbc, 0xb8, 0x1a, 0x7b, 0x1f, - 0xbf, 0x8c, 0x41, 0xf2, 0x4b, 0x84, 0x1e, 0x40, 0x5b, 0x59, 0xec, 0x4f, 0x7a, 0xd3, 0xe1, 0xd1, - 0x1d, 0x62, 0xdb, 0xd4, 0xb9, 0x74, 0xa4, 0xc4, 0xb2, 0x3a, 0x59, 0x53, 0x33, 0x19, 0x07, 0x36, - 0x99, 0x9e, 0x11, 0x81, 0x03, 0x2e, 0xf4, 0x1f, 0xd7, 0x71, 0x64, 0xc4, 0x7b, 0x7f, 0x58, 0xcf, - 0xab, 0xf3, 0xa4, 0xbb, 0xb4, 0xf4, 0xc3, 0xfe, 0x28, 0x58, 0xfa, 0xe1, 0x60, 0x14, 0x2e, 0xfd, - 0x30, 0x1c, 0x45, 0x87, 0xdf, 0x77, 0x60, 0xd8, 0x39, 0x69, 0x0b, 0x5d, 0x5e, 0xb7, 0xbc, 0x9e, - 0xd1, 0x3d, 0x18, 0x48, 0x96, 0x71, 0xb9, 0x72, 0x9b, 0x3b, 0x84, 0xf6, 0x60, 0x9f, 0x96, 0x4c, - 0x2a, 0xb3, 0x73, 0x94, 0x58, 0x80, 0x1e, 0xc3, 0xde, 0x09, 0x97, 0xb1, 0xff, 0xf7, 0x3d, 0xe8, - 0xfb, 0xa8, 0x82, 0x41, 0x49, 0x53, 0x56, 0xd6, 0x71, 0xdf, 0xac, 0x71, 0x97, 0x74, 0xef, 0x45, - 0x5e, 0xe8, 0xf3, 0x97, 0xb4, 0x90, 0x8b, 0xb9, 0xd6, 0x7c, 0xbe, 0x1a, 0xff, 0xd3, 0x7b, 0x5b, - 0xfd, 0x7c, 0x45, 0x85, 0x62, 0x32, 0x71, 0x2e, 0xe8, 0x0c, 0x0e, 0x69, 0x55, 0x71, 0x45, 0x6d, - 0x77, 0xc1, 0x7f, 0x35, 0xbd, 0x6d, 0x65, 0xba, 0xdf, 0x5d, 0x3c, 0x5d, 0x6f, 0xb0, 0x77, 0xb9, - 0xc1, 0xde, 0xf5, 0x06, 0x83, 0xf7, 0x2d, 0x06, 0x9f, 0x5a, 0x0c, 0x2e, 0x5a, 0x0c, 0xd6, 0x2d, - 0x06, 0x5f, 0x5b, 0x0c, 0xbe, 0xb5, 0xd8, 0xbb, 0x6e, 0x31, 0xf8, 0xb0, 0xc5, 0xde, 0x7a, 0x8b, - 0xbd, 0xcb, 0x2d, 0xf6, 0x5e, 0x0d, 0xcc, 0x87, 0x20, 0xd2, 0x34, 0x30, 0x85, 0x3e, 0xfa, 0x19, - 0x00, 0x00, 0xff, 0xff, 0xa0, 0xd3, 0x9a, 0x1a, 0x78, 0x03, 0x00, 0x00, + // 471 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xbf, 0x6e, 0xd3, 0x40, + 0x18, 0xf7, 0x35, 0x8e, 0x63, 0x5f, 0x54, 0x11, 0x1d, 0x11, 0x72, 0x2b, 0x74, 0x89, 0x2a, 0x21, + 0x65, 0x3a, 0x4b, 0x45, 0x0c, 0x0c, 0x08, 0x25, 0xaa, 0x84, 0x14, 0x31, 0x20, 0x8f, 0x6c, 0x67, + 0xe7, 0x6a, 0x02, 0xae, 0xef, 0x74, 0x3e, 0xa3, 0x76, 0xe3, 0x11, 0x18, 0x79, 0x04, 0x1e, 0xa5, + 0x63, 0xc6, 0x8a, 0xa1, 0x10, 0x67, 0x61, 0xac, 0xc4, 0x03, 0x80, 0xee, 0x8f, 0x01, 0xc1, 0x02, + 0x43, 0x27, 0x7f, 0xdf, 0xf7, 0xbb, 0xcf, 0xbf, 0x3f, 0x77, 0x70, 0x28, 0x9b, 0x92, 0xd5, 0x44, + 0x48, 0xae, 0x38, 0xea, 0x9b, 0xe6, 0x70, 0x5c, 0xf0, 0x82, 0x9b, 0x49, 0xa2, 0x2b, 0x0b, 0x1e, + 0xe2, 0x82, 0xf3, 0xa2, 0x64, 0x89, 0xe9, 0xb2, 0xe6, 0x34, 0x59, 0x35, 0x92, 0xaa, 0x35, 0xaf, + 0x1c, 0x7e, 0xf0, 0x27, 0x4e, 0xab, 0x0b, 0x07, 0x3d, 0x2e, 0xd6, 0xea, 0x55, 0x93, 0x91, 0x9c, + 0x9f, 0x25, 0x39, 0x97, 0x8a, 0x9d, 0x0b, 0xc9, 0x5f, 0xb3, 0x5c, 0xb9, 0x2e, 0x11, 0x6f, 0x8a, + 0x0e, 0xc8, 0x5c, 0x61, 0x57, 0x8f, 0xbe, 0x03, 0xb8, 0x9f, 0x36, 0x25, 0x7b, 0x26, 0x79, 0x23, + 0x4e, 0x58, 0x9d, 0x23, 0x04, 0xfd, 0x8a, 0x9e, 0xb1, 0x18, 0x4c, 0xc1, 0x2c, 0x4a, 0x4d, 0x8d, + 0xee, 0xc3, 0x48, 0x7f, 0x6b, 0x41, 0x73, 0x16, 0xef, 0x19, 0xe0, 0xd7, 0x00, 0x3d, 0x85, 0xe1, + 0xba, 0x52, 0x4c, 0xbe, 0xa5, 0x65, 0xdc, 0x9b, 0x82, 0xd9, 0xf0, 0xf8, 0x80, 0x58, 0xb1, 0xa4, + 0x13, 0x4b, 0x4e, 0x9c, 0x99, 0x45, 0x78, 0x79, 0x3d, 0xf1, 0x3e, 0x7c, 0x9e, 0x80, 0xf4, 0xe7, + 0x12, 0x7a, 0x00, 0x6d, 0x32, 0xb1, 0x3f, 0xed, 0xcd, 0x86, 0xc7, 0x77, 0x88, 0x0d, 0x4d, 0xeb, + 0xd2, 0x92, 0x52, 0x8b, 0x6a, 0x65, 0x4d, 0xcd, 0x64, 0x1c, 0x58, 0x65, 0xba, 0x46, 0x04, 0x0e, + 0xb8, 0xd0, 0x3f, 0xae, 0xe3, 0xc8, 0x2c, 0x8f, 0xff, 0xa2, 0x9e, 0x57, 0x17, 0x69, 0x77, 0x68, + 0xe9, 0x87, 0xfd, 0x51, 0xb0, 0xf4, 0xc3, 0xc1, 0x28, 0x5c, 0xfa, 0x61, 0x38, 0x8a, 0x8e, 0xbe, + 0xed, 0xc1, 0xb0, 0x63, 0xd2, 0x14, 0x3a, 0xbc, 0xce, 0xbc, 0xae, 0xd1, 0x3d, 0x18, 0x48, 0x96, + 0x73, 0xb9, 0x72, 0xce, 0x5d, 0x87, 0xc6, 0xb0, 0x4f, 0x4b, 0x26, 0x95, 0xf1, 0x1c, 0xa5, 0xb6, + 0x41, 0x8f, 0x60, 0xef, 0x94, 0xcb, 0xd8, 0xff, 0xf7, 0x1c, 0xf4, 0x79, 0x54, 0xc1, 0xa0, 0xa4, + 0x19, 0x2b, 0xeb, 0xb8, 0x6f, 0x6c, 0xdc, 0x25, 0xdd, 0x7d, 0x91, 0xe7, 0x7a, 0xfe, 0x82, 0xae, + 0xe5, 0x62, 0xae, 0x77, 0x3e, 0x5d, 0x4f, 0xfe, 0xeb, 0xbe, 0xed, 0xfe, 0x7c, 0x45, 0x85, 0x62, + 0x32, 0x75, 0x2c, 0xe8, 0x1c, 0x0e, 0x69, 0x55, 0x71, 0x45, 0x6d, 0x76, 0xc1, 0xad, 0x92, 0xfe, + 0x4e, 0x65, 0xb2, 0xdf, 0x5f, 0x3c, 0xd9, 0x6c, 0xb1, 0x77, 0xb5, 0xc5, 0xde, 0xcd, 0x16, 0x83, + 0x77, 0x2d, 0x06, 0x1f, 0x5b, 0x0c, 0x2e, 0x5b, 0x0c, 0x36, 0x2d, 0x06, 0x5f, 0x5a, 0x0c, 0xbe, + 0xb6, 0xd8, 0xbb, 0x69, 0x31, 0x78, 0xbf, 0xc3, 0xde, 0x66, 0x87, 0xbd, 0xab, 0x1d, 0xf6, 0x5e, + 0x0e, 0xcc, 0x43, 0x10, 0x59, 0x16, 0x98, 0x40, 0x1f, 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc1, + 0x26, 0xeb, 0x81, 0x5f, 0x03, 0x00, 0x00, } func (this *RuleGroupDesc) Equal(that interface{}) bool { diff --git a/pkg/ruler/rulespb/rules.proto b/pkg/ruler/rulespb/rules.proto index 16274cec013..de508848938 100644 --- a/pkg/ruler/rulespb/rules.proto +++ b/pkg/ruler/rulespb/rules.proto @@ -5,7 +5,7 @@ package rules; option go_package = "rulespb"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/any.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; diff --git a/pkg/scheduler/schedulerpb/scheduler.pb.go b/pkg/scheduler/schedulerpb/scheduler.pb.go index 1b6fda6e692..d3288f95b39 100644 --- a/pkg/scheduler/schedulerpb/scheduler.pb.go +++ b/pkg/scheduler/schedulerpb/scheduler.pb.go @@ -438,48 +438,48 @@ func init() { func init() { proto.RegisterFile("scheduler.proto", fileDescriptor_2b3fc28395a6d9c5) } var fileDescriptor_2b3fc28395a6d9c5 = []byte{ - // 650 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x4f, 0x13, 0x41, - 0x14, 0xde, 0x29, 0x6d, 0x81, 0x57, 0x94, 0x75, 0x00, 0xad, 0x0d, 0x0e, 0x4d, 0x63, 0x4c, 0x25, - 0xb1, 0x35, 0xd5, 0x44, 0x0f, 0xc4, 0xa4, 0xc2, 0x22, 0x8d, 0xb8, 0x85, 0xe9, 0x34, 0xfe, 0xb8, - 0x34, 0xb4, 0x1d, 0x5a, 0x02, 0xec, 0x2c, 0xb3, 0xbb, 0x92, 0xde, 0x3c, 0x7a, 0xf4, 0xcf, 0xf0, - 0x4f, 0xf1, 0x62, 0xc2, 0x91, 0x83, 0x07, 0x59, 0x2e, 0x1e, 0xf9, 0x13, 0x0c, 0xd3, 0x6d, 0xdd, - 0xd6, 0x16, 0xb8, 0xbd, 0xf7, 0xf6, 0xfb, 0x76, 0xde, 0xf7, 0xbd, 0x37, 0x03, 0xb3, 0x4e, 0xa3, - 0xcd, 0x9b, 0xde, 0x01, 0x97, 0x39, 0x5b, 0x0a, 0x57, 0xe0, 0x44, 0xbf, 0x60, 0xd7, 0x53, 0x4f, - 0x5a, 0x7b, 0x6e, 0xdb, 0xab, 0xe7, 0x1a, 0xe2, 0x30, 0xdf, 0x12, 0x2d, 0x91, 0x57, 0x98, 0xba, - 0xb7, 0xab, 0x32, 0x95, 0xa8, 0xa8, 0xcb, 0x4d, 0x3d, 0x0f, 0xc1, 0x8f, 0xf9, 0xce, 0x67, 0x7e, - 0x2c, 0xe4, 0xbe, 0x93, 0x6f, 0x88, 0xc3, 0x43, 0x61, 0xe5, 0xdb, 0xae, 0x6b, 0xb7, 0xa4, 0xdd, - 0xe8, 0x07, 0x5d, 0x56, 0xa6, 0x00, 0x78, 0xdb, 0xe3, 0x72, 0x8f, 0x4b, 0x26, 0x2a, 0xbd, 0xc3, - 0xf1, 0x22, 0x4c, 0x1f, 0x75, 0xab, 0xa5, 0xb5, 0x24, 0x4a, 0xa3, 0xec, 0x34, 0xfd, 0x57, 0xc8, - 0xfc, 0x44, 0x80, 0xfb, 0x58, 0x26, 0x02, 0x3e, 0x4e, 0xc2, 0xe4, 0x25, 0xa6, 0x13, 0x50, 0xa2, - 0xb4, 0x97, 0xe2, 0x17, 0x90, 0xb8, 0x3c, 0x96, 0xf2, 0x23, 0x8f, 0x3b, 0x6e, 0x32, 0x92, 0x46, - 0xd9, 0x44, 0x61, 0x21, 0xd7, 0x6f, 0x65, 0x83, 0xb1, 0xad, 0xe0, 0x23, 0x0d, 0x23, 0x71, 0x16, - 0x66, 0x77, 0xa5, 0xb0, 0x5c, 0x6e, 0x35, 0x8b, 0xcd, 0xa6, 0xe4, 0x8e, 0x93, 0x9c, 0x50, 0xdd, - 0x0c, 0x97, 0xf1, 0x5d, 0x88, 0x7b, 0x8e, 0x6a, 0x37, 0xaa, 0x00, 0x41, 0x86, 0x33, 0x30, 0xe3, - 0xb8, 0x3b, 0xae, 0x63, 0x58, 0x3b, 0xf5, 0x03, 0xde, 0x4c, 0xc6, 0xd2, 0x28, 0x3b, 0x45, 0x07, - 0x6a, 0x99, 0xaf, 0x11, 0x98, 0x5b, 0x0f, 0xfe, 0x17, 0x76, 0xe1, 0x25, 0x44, 0xdd, 0x8e, 0xcd, - 0x95, 0x9a, 0xdb, 0x85, 0x87, 0xb9, 0xd0, 0x70, 0x72, 0x23, 0xf0, 0xac, 0x63, 0x73, 0xaa, 0x18, - 0xa3, 0xfa, 0x8e, 0x8c, 0xee, 0x3b, 0x64, 0xda, 0xc4, 0xa0, 0x69, 0xe3, 0x14, 0x0d, 0x99, 0x19, - 0xbb, 0xb1, 0x99, 0xc3, 0x56, 0xc4, 0x47, 0x58, 0xb1, 0x0f, 0x73, 0xa1, 0xc9, 0xf6, 0x44, 0xe2, - 0x57, 0x10, 0xbf, 0x84, 0x79, 0x4e, 0xe0, 0xc5, 0xa3, 0x01, 0x2f, 0x46, 0x30, 0x2a, 0x0a, 0x4d, - 0x03, 0x16, 0x9e, 0x87, 0x18, 0x97, 0x52, 0xc8, 0xc0, 0x85, 0x6e, 0x92, 0x59, 0x81, 0x45, 0x53, - 0xb8, 0x7b, 0xbb, 0x9d, 0x60, 0x83, 0x2a, 0x6d, 0xcf, 0x6d, 0x8a, 0x63, 0xab, 0xd7, 0xf0, 0xd5, - 0x5b, 0xb8, 0x04, 0x0f, 0xc6, 0xb0, 0x1d, 0x5b, 0x58, 0x0e, 0x5f, 0x5e, 0x81, 0x7b, 0x63, 0xa6, - 0x84, 0xa7, 0x20, 0x5a, 0x32, 0x4b, 0x4c, 0xd7, 0x70, 0x02, 0x26, 0x0d, 0x73, 0xbb, 0x6a, 0x54, - 0x0d, 0x1d, 0x61, 0x80, 0xf8, 0x6a, 0xd1, 0x5c, 0x35, 0x36, 0xf5, 0xc8, 0x72, 0x03, 0xee, 0x8f, - 0xd5, 0x85, 0xe3, 0x10, 0x29, 0xbf, 0xd5, 0x35, 0x9c, 0x86, 0x45, 0x56, 0x2e, 0xd7, 0xde, 0x15, - 0xcd, 0x8f, 0x35, 0x6a, 0x6c, 0x57, 0x8d, 0x0a, 0xab, 0xd4, 0xb6, 0x0c, 0x5a, 0x63, 0x86, 0x59, - 0x34, 0x99, 0x8e, 0xf0, 0x34, 0xc4, 0x0c, 0x4a, 0xcb, 0x54, 0x8f, 0xe0, 0x3b, 0x70, 0xab, 0xb2, - 0x51, 0x65, 0xac, 0x64, 0xbe, 0xa9, 0xad, 0x95, 0xdf, 0x9b, 0xfa, 0x44, 0xe1, 0x17, 0x0a, 0xf9, - 0xbd, 0x2e, 0x64, 0xef, 0x2a, 0x55, 0x21, 0x11, 0x84, 0x9b, 0x42, 0xd8, 0x78, 0x69, 0xc0, 0xee, - 0xff, 0xef, 0x6b, 0x6a, 0x69, 0xdc, 0x3c, 0x02, 0x6c, 0x46, 0xcb, 0xa2, 0xa7, 0x08, 0x5b, 0xb0, - 0x30, 0xd2, 0x32, 0xfc, 0x78, 0x80, 0x7f, 0xd5, 0x50, 0x52, 0xcb, 0x37, 0x81, 0x76, 0x27, 0x50, - 0xb0, 0x61, 0x3e, 0xac, 0xae, 0xbf, 0x4e, 0x1f, 0x60, 0xa6, 0x17, 0x2b, 0x7d, 0xe9, 0xeb, 0xae, - 0x56, 0x2a, 0x7d, 0xdd, 0xc2, 0x75, 0x15, 0xbe, 0x2e, 0x9e, 0x9c, 0x11, 0xed, 0xf4, 0x8c, 0x68, - 0x17, 0x67, 0x04, 0x7d, 0xf1, 0x09, 0xfa, 0xee, 0x13, 0xf4, 0xc3, 0x27, 0xe8, 0xc4, 0x27, 0xe8, - 0xb7, 0x4f, 0xd0, 0x1f, 0x9f, 0x68, 0x17, 0x3e, 0x41, 0xdf, 0xce, 0x89, 0x76, 0x72, 0x4e, 0xb4, - 0xd3, 0x73, 0xa2, 0x7d, 0x0a, 0x3f, 0xbb, 0xf5, 0xb8, 0x7a, 0x18, 0x9f, 0xfd, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x49, 0x11, 0xf9, 0x34, 0x9d, 0x05, 0x00, 0x00, + // 644 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, + 0x10, 0xf5, 0x86, 0x24, 0xc0, 0x84, 0xdf, 0x0f, 0x77, 0x81, 0x36, 0x8d, 0xe8, 0x12, 0x45, 0x55, + 0x95, 0x72, 0x48, 0xaa, 0xb4, 0x52, 0x7b, 0x40, 0x95, 0x52, 0x30, 0x25, 0x2a, 0x75, 0x60, 0xb3, + 0x51, 0xff, 0x5c, 0x22, 0x92, 0x2c, 0x09, 0x02, 0xbc, 0x66, 0x6d, 0x17, 0xe5, 0xd6, 0x63, 0x8f, + 0xfd, 0x18, 0xfd, 0x28, 0xbd, 0x54, 0xe2, 0xc8, 0xa1, 0x87, 0x62, 0x2e, 0x3d, 0xf2, 0x11, 0xaa, + 0x38, 0x76, 0xea, 0xa4, 0x0e, 0x70, 0x9b, 0x1d, 0xbf, 0xe7, 0x9d, 0xf7, 0x66, 0x66, 0x61, 0xde, + 0x6a, 0x75, 0x79, 0xdb, 0x39, 0xe2, 0xb2, 0x60, 0x4a, 0x61, 0x0b, 0x9c, 0x1a, 0x26, 0xcc, 0x66, + 0x66, 0xb1, 0x23, 0x3a, 0xc2, 0xcb, 0x17, 0xfb, 0xd1, 0x00, 0x92, 0x79, 0xd6, 0x39, 0xb0, 0xbb, + 0x4e, 0xb3, 0xd0, 0x12, 0xc7, 0xc5, 0x53, 0xbe, 0xf7, 0x89, 0x9f, 0x0a, 0x79, 0x68, 0x15, 0x5b, + 0xe2, 0xf8, 0x58, 0x18, 0xc5, 0xae, 0x6d, 0x9b, 0x1d, 0x69, 0xb6, 0x86, 0xc1, 0x80, 0x95, 0x2b, + 0x01, 0xde, 0x75, 0xb8, 0x3c, 0xe0, 0x92, 0x89, 0x5a, 0x70, 0x07, 0x5e, 0x86, 0xd9, 0x93, 0x41, + 0xb6, 0xb2, 0x91, 0x46, 0x59, 0x94, 0x9f, 0xa5, 0x7f, 0x13, 0xb9, 0x1f, 0x08, 0xf0, 0x10, 0xcb, + 0x84, 0xcf, 0xc7, 0x69, 0x98, 0xee, 0x63, 0x7a, 0x3e, 0x25, 0x4e, 0x83, 0x23, 0x7e, 0x0e, 0xa9, + 0xfe, 0xb5, 0x94, 0x9f, 0x38, 0xdc, 0xb2, 0xd3, 0xb1, 0x2c, 0xca, 0xa7, 0x4a, 0x4b, 0x85, 0x61, + 0x29, 0x5b, 0x8c, 0xed, 0xf8, 0x1f, 0x69, 0x18, 0x89, 0xf3, 0x30, 0xbf, 0x2f, 0x85, 0x61, 0x73, + 0xa3, 0x5d, 0x6e, 0xb7, 0x25, 0xb7, 0xac, 0xf4, 0x94, 0x57, 0xcd, 0x78, 0x1a, 0xdf, 0x85, 0xa4, + 0x63, 0x79, 0xe5, 0xc6, 0x3d, 0x80, 0x7f, 0xc2, 0x39, 0x98, 0xb3, 0xec, 0x3d, 0xdb, 0xd2, 0x8c, + 0xbd, 0xe6, 0x11, 0x6f, 0xa7, 0x13, 0x59, 0x94, 0x9f, 0xa1, 0x23, 0xb9, 0xdc, 0x97, 0x18, 0x2c, + 0x6c, 0xfa, 0xff, 0x0b, 0xbb, 0xf0, 0x02, 0xe2, 0x76, 0xcf, 0xe4, 0x9e, 0x9a, 0xff, 0x4b, 0x0f, + 0x0b, 0xa1, 0x1e, 0x14, 0x22, 0xf0, 0xac, 0x67, 0x72, 0xea, 0x31, 0xa2, 0xea, 0x8e, 0x45, 0xd7, + 0x1d, 0x32, 0x6d, 0x6a, 0xd4, 0xb4, 0x49, 0x8a, 0xc6, 0xcc, 0x4c, 0xdc, 0xda, 0xcc, 0x71, 0x2b, + 0x92, 0x11, 0x56, 0x1c, 0xc2, 0x42, 0xa8, 0xb3, 0x81, 0x48, 0xfc, 0x12, 0x92, 0x7d, 0x98, 0x63, + 0xf9, 0x5e, 0x3c, 0x1a, 0xf1, 0x22, 0x82, 0x51, 0xf3, 0xd0, 0xd4, 0x67, 0xe1, 0x45, 0x48, 0x70, + 0x29, 0x85, 0xf4, 0x5d, 0x18, 0x1c, 0x72, 0x6b, 0xb0, 0xac, 0x0b, 0xfb, 0x60, 0xbf, 0xe7, 0x4f, + 0x50, 0xad, 0xeb, 0xd8, 0x6d, 0x71, 0x6a, 0x04, 0x05, 0x5f, 0x3f, 0x85, 0x2b, 0xf0, 0x60, 0x02, + 0xdb, 0x32, 0x85, 0x61, 0xf1, 0xd5, 0x35, 0xb8, 0x37, 0xa1, 0x4b, 0x78, 0x06, 0xe2, 0x15, 0xbd, + 0xc2, 0x54, 0x05, 0xa7, 0x60, 0x5a, 0xd3, 0x77, 0xeb, 0x5a, 0x5d, 0x53, 0x11, 0x06, 0x48, 0xae, + 0x97, 0xf5, 0x75, 0x6d, 0x5b, 0x8d, 0xad, 0xb6, 0xe0, 0xfe, 0x44, 0x5d, 0x38, 0x09, 0xb1, 0xea, + 0x1b, 0x55, 0xc1, 0x59, 0x58, 0x66, 0xd5, 0x6a, 0xe3, 0x6d, 0x59, 0xff, 0xd0, 0xa0, 0xda, 0x6e, + 0x5d, 0xab, 0xb1, 0x5a, 0x63, 0x47, 0xa3, 0x0d, 0xa6, 0xe9, 0x65, 0x9d, 0xa9, 0x08, 0xcf, 0x42, + 0x42, 0xa3, 0xb4, 0x4a, 0xd5, 0x18, 0xbe, 0x03, 0xff, 0xd5, 0xb6, 0xea, 0x8c, 0x55, 0xf4, 0xd7, + 0x8d, 0x8d, 0xea, 0x3b, 0x5d, 0x9d, 0x2a, 0xfd, 0x44, 0x21, 0xbf, 0x37, 0x85, 0x0c, 0x56, 0xa9, + 0x0e, 0x29, 0x3f, 0xdc, 0x16, 0xc2, 0xc4, 0x2b, 0x23, 0x76, 0xff, 0xbb, 0xaf, 0x99, 0x95, 0x49, + 0xfd, 0xf0, 0xb1, 0x39, 0x25, 0x8f, 0x9e, 0x20, 0x6c, 0xc0, 0x52, 0xa4, 0x65, 0xf8, 0xf1, 0x08, + 0xff, 0xba, 0xa6, 0x64, 0x56, 0x6f, 0x03, 0x1d, 0x74, 0xa0, 0x64, 0xc2, 0x62, 0x58, 0xdd, 0x70, + 0x9c, 0xde, 0xc3, 0x5c, 0x10, 0x7b, 0xfa, 0xb2, 0x37, 0xad, 0x56, 0x26, 0x7b, 0xd3, 0xc0, 0x0d, + 0x14, 0xbe, 0x2a, 0x9f, 0x5d, 0x10, 0xe5, 0xfc, 0x82, 0x28, 0x57, 0x17, 0x04, 0x7d, 0x76, 0x09, + 0xfa, 0xe6, 0x12, 0xf4, 0xdd, 0x25, 0xe8, 0xcc, 0x25, 0xe8, 0x97, 0x4b, 0xd0, 0x6f, 0x97, 0x28, + 0x57, 0x2e, 0x41, 0x5f, 0x2f, 0x89, 0x72, 0x76, 0x49, 0x94, 0xf3, 0x4b, 0xa2, 0x7c, 0x0c, 0xbf, + 0xae, 0xcd, 0xa4, 0xf7, 0x30, 0x3e, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, 0x88, 0x0c, 0xfe, 0x56, + 0x84, 0x05, 0x00, 0x00, } func (x FrontendToSchedulerType) String() string { diff --git a/pkg/scheduler/schedulerpb/scheduler.proto b/pkg/scheduler/schedulerpb/scheduler.proto index 3ae6437567a..eea28717b83 100644 --- a/pkg/scheduler/schedulerpb/scheduler.proto +++ b/pkg/scheduler/schedulerpb/scheduler.proto @@ -4,7 +4,7 @@ package schedulerpb; option go_package = "schedulerpb"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; option (gogoproto.marshaler_all) = true; diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 11a1976b3de..1709bf12f1f 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -9,7 +9,7 @@ import ( "github.com/alecthomas/units" "github.com/pkg/errors" "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" "github.com/thanos-io/thanos/pkg/store" "github.com/cortexproject/cortex/pkg/storage/bucket" @@ -170,7 +170,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.HeadChunksWriteBufferSize, "blocks-storage.tsdb.head-chunks-write-buffer-size-bytes", chunks.DefaultWriteBufferSize, "The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations.") f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") - f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wal.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") + f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 0, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.") diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index 985e2ef2954..d62ce229819 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1144,7 +1144,7 @@ func readSamplesFromChunks(rawChunks []storepb.AggrChunk) ([]sample, error) { } it := c.Iterator(nil) - for it.Next() { + for it.Next() != chunkenc.ValNone { if it.Err() != nil { return nil, it.Err() } diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore deleted file mode 100644 index cc7e53b46c0..00000000000 --- a/vendor/cloud.google.com/go/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -# Editors -.idea -.vscode -*.swp -.history - -# Test files -*.test -coverage.txt - -# Other -.DS_Store diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json deleted file mode 100644 index e33930b3c26..00000000000 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ /dev/null @@ -1,111 +0,0 @@ -{ - "accessapproval": "1.3.0", - "accesscontextmanager": "1.2.0", - "aiplatform": "1.17.0", - "analytics": "0.9.0", - "apigateway": "1.2.0", - "apigeeconnect": "1.2.0", - "apigeeregistry": "0.2.0", - "apikeys": "0.1.0", - "appengine": "1.3.0", - "area120": "0.4.0", - "artifactregistry": "1.4.0", - "asset": "1.4.0", - "assuredworkloads": "1.2.0", - "automl": "1.4.0", - "baremetalsolution": "0.2.0", - "batch": "0.1.0", - "beyondcorp": "0.1.0", - "billing": "1.2.0", - "binaryauthorization": "1.0.0", - "certificatemanager": "0.2.1", - "channel": "1.7.0", - "cloudbuild": "1.2.0", - "clouddms": "1.2.0", - "cloudtasks": "1.4.0", - "compute": "1.9.0", - "contactcenterinsights": "1.2.3", - "container": "1.3.1", - "containeranalysis": "0.4.0", - "datacatalog": "1.3.1", - "dataflow": "0.5.1", - "dataform": "0.2.0", - "datafusion": "1.3.0", - "datalabeling": "0.3.0", - "dataplex": "1.1.0", - "dataproc": "1.5.0", - "dataqna": "0.4.0", - "datastream": "1.0.0", - "deploy": "1.2.1", - "dialogflow": "1.12.1", - "dlp": "1.4.0", - "documentai": "1.5.0", - "domains": "0.5.0", - "essentialcontacts": "1.2.0", - "eventarc": "1.6.0", - "filestore": "1.2.0", - "functions": "1.5.0", - "gaming": "1.3.1", - "gkebackup": "0.1.0", - "gkeconnect": "0.3.0", - "gkehub": "0.8.0", - "gkemulticloud": "0.2.0", - "grafeas": "0.2.0", - "gsuiteaddons": "1.2.0", - "iam": "0.3.0", - "iap": "1.3.0", - "ids": "1.0.0", - "iot": "1.2.0", - "kms": "1.4.0", - "language": "1.3.0", - "lifesciences": "0.4.0", - "managedidentities": "1.2.0", - "mediatranslation": "0.3.0", - "memcache": "1.3.0", - "metastore": "1.3.0", - "monitoring": "1.6.0", - "networkconnectivity": "1.2.0", - "networkmanagement": "1.3.0", - "networksecurity": "0.3.1", - "notebooks": "1.0.0", - "optimization": "1.0.0", - "orchestration": "1.2.0", - "orgpolicy": "1.3.0", - "osconfig": "1.6.0", - "oslogin": "1.3.0", - "phishingprotection": "0.4.0", - "policytroubleshooter": "1.2.0", - "privatecatalog": "0.4.0", - "recaptchaenterprise/v2": "2.0.1", - "recommendationengine": "0.3.0", - "recommender": "1.4.0", - "redis": "1.6.0", - "resourcemanager": "1.2.0", - "resourcesettings": "1.2.0", - "retail": "1.5.0", - "run": "0.1.1", - "scheduler": "1.3.0", - "secretmanager": "1.5.0", - "security": "1.4.1", - "securitycenter": "1.10.0", - "servicecontrol": "1.3.0", - "servicedirectory": "1.3.0", - "servicemanagement": "1.3.1", - "serviceusage": "1.2.0", - "shell": "1.2.0", - "speech": "1.5.0", - "storagetransfer": "1.3.0", - "talent": "1.0.0", - "texttospeech": "1.3.0", - "tpu": "1.2.0", - "trace": "1.2.0", - "translate": "1.2.0", - "video": "1.7.0", - "videointelligence": "1.4.0", - "vision/v2": "2.1.0", - "vmmigration": "1.1.0", - "vpcaccess": "1.2.0", - "webrisk": "1.3.0", - "websecurityscanner": "1.2.0", - "workflows": "1.5.0" -} diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json deleted file mode 100644 index d9c9389061b..00000000000 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - ".": "0.104.0" -} diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md deleted file mode 100644 index 8d00d2e952d..00000000000 --- a/vendor/cloud.google.com/go/CHANGES.md +++ /dev/null @@ -1,2424 +0,0 @@ -# Changes - -## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24) - - -### Features - -* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490)) - -## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) - - -### Features - -* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) - -## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) - - -### Bug Fixes - -* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35)) - -## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24) - - -### Features - -* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187)) - -### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03) - - -### Bug Fixes - -* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b)) - -## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20) - - -### Features - -* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682)) -* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) - - -### Bug Fixes - -* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095)) -* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b)) -* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672)) -* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8)) -* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059)) - -## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04) - - -### Features - -* **analytics/admin:** add the `AcknowledgeUserDataCollection` operation which acknowledges the terms of user data collection for the specified property feat: add the new resource type `DataStream`, which is planned to eventually replace `WebDataStream`, `IosAppDataStream`, `AndroidAppDataStream` resources fix!: remove `GetEnhancedMeasurementSettings`, `UpdateEnhancedMeasurementSettingsRequest`, `UpdateEnhancedMeasurementSettingsRequest` operations from the API feat: add `CreateDataStream`, `DeleteDataStream`, `UpdateDataStream`, `ListDataStreams` operations to support the new `DataStream` resource feat: add `DISPLAY_VIDEO_360_ADVERTISER_LINK`, `DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL` fields to `ChangeHistoryResourceType` enum feat: add the `account` field to the `Property` type docs: update the documentation with a new list of valid values for `UserLink.direct_roles` field ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **assuredworkloads:** EU Regions and Support With Sovereign Controls ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5)) -* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5)) -* **dialogflow:** added export documentation method feat: added filter in list documentations request feat: added option to import custom metadata from Google Cloud Storage in reload document request feat: added option to apply partial update to the smart messaging allowlist in reload document request feat: added filter in list knowledge bases request ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **dialogflow:** removed OPTIONAL for speech model variant docs: added more docs for speech model variant and improved docs format for participant ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **recaptchaenterprise:** add new reCAPTCHA Enterprise fraud annotations ([3dd34a2](https://www.github.com/googleapis/google-cloud-go/commit/3dd34a262edbff63b9aece8faddc2ff0d98ce42a)) - - -### Bug Fixes - -* **artifactregistry:** fix resource pattern ID segment name ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **compute:** add parameter in compute bazel rules ([#692](https://www.github.com/googleapis/google-cloud-go/issues/692)) ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **profiler:** refine regular expression for parsing backoff duration in E2E tests ([#5229](https://www.github.com/googleapis/google-cloud-go/issues/5229)) ([4438aeb](https://www.github.com/googleapis/google-cloud-go/commit/4438aebca2ec01d4dbf22287aa651937a381e043)) -* **profiler:** remove certificate expiration workaround ([#5222](https://www.github.com/googleapis/google-cloud-go/issues/5222)) ([2da36c9](https://www.github.com/googleapis/google-cloud-go/commit/2da36c95f44d5f88fd93cd949ab78823cea74fe7)) - -## [0.99.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.98.0...v0.99.0) (2021-12-06) - - -### Features - -* **dialogflow/cx:** added `TelephonyTransferCall` in response message ([fe27098](https://www.github.com/googleapis/google-cloud-go/commit/fe27098e5d429911428821ded57384353e699774)) - -## [0.98.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.97.0...v0.98.0) (2021-12-03) - - -### Features - -* **aiplatform:** add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature feat: add network and enable_private_service_connect to IndexEndpoint feat: add service_attachment to IndexPrivateEndpoints feat: add stratified_split field to training_pipeline InputDataConfig ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **aiplatform:** add featurestore service to aiplatform v1 feat: add metadata service to aiplatform v1 ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) -* **aiplatform:** Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8)) -* **aiplatform:** Tensorboard v1 protos release feat:Exposing a field for v1 CustomJob-Tensorboard integration. ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **binaryauthorization:** add new admission rule types to Policy feat: update SignatureAlgorithm enum to match algorithm names in KMS feat: add SystemPolicyV1Beta1 service ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **channel:** add resource type to ChannelPartnerLink ([c206948](https://www.github.com/googleapis/google-cloud-go/commit/c2069487f6af5bcb37d519afeb60e312e35e67d5)) -* **cloudtasks:** add C++ rules for Cloud Tasks ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **compute:** Move compute.v1 from googleapis-discovery to googleapis ([#675](https://www.github.com/googleapis/google-cloud-go/issues/675)) ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **compute:** Switch to string enums for compute ([#685](https://www.github.com/googleapis/google-cloud-go/issues/685)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **contactcenterinsights:** Add ability to update phrase matchers feat: Add issue model stats to time series feat: Add display name to issue model stats ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **contactcenterinsights:** Add WriteDisposition to BigQuery Export API ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **contactcenterinsights:** deprecate issue_matches docs: if conversation medium is unspecified, it will default to PHONE_CALL ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) -* **contactcenterinsights:** new feature flag disable_issue_modeling docs: fixed formatting issues in the reference documentation ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **contactcenterinsights:** remove feature flag disable_issue_modeling ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **datacatalog:** Added BigQueryDateShardedSpec.latest_shard_resource field feat: Added SearchCatalogResult.display_name field feat: Added SearchCatalogResult.description field ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **dataproc:** add Dataproc Serverless for Spark Batches API ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) -* **dataproc:** Add support for dataproc BatchController service ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6)) -* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) -* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) -* **dialogflow/cx:** added support for comparing between versions docs: clarified security settings API reference ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) -* **dialogflow/cx:** added support for Deployments with ListDeployments and GetDeployment apis feat: added support for DeployFlow api under Environments feat: added support for TestCasesConfig under Environment docs: added long running operation explanation for several apis fix!: marked resource name of security setting as not-required ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0)) -* **dialogflow/cx:** allow setting custom CA for generic webhooks and release CompareVersions API docs: clarify DLP template reader usage ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **dialogflow:** added support to configure security settings, language code and time zone on conversation profile ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **dialogflow:** support document metadata filter in article suggestion and smart reply model in human agent assistant ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a)) -* **dlp:** added deidentify replacement dictionaries feat: added field for BigQuery inspect template inclusion lists feat: added field to support infotype versioning ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **domains:** added library for Cloud Domains v1 API. Also added methods for the transfer-in flow docs: improved API comments ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6)) -* **functions:** Secret Manager integration fields 'secret_environment_variables' and 'secret_volumes' added feat: CMEK integration fields 'kms_key_name' and 'docker_repository' added ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **kms:** add OAEP+SHA1 to the list of supported algorithms ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0)) -* **kms:** add RPC retry information for MacSign, MacVerify, and GenerateRandomBytes Committer: [@bdhess](https://www.github.com/bdhess) ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) -* **kms:** add support for Raw PKCS[#1](https://www.github.com/googleapis/google-cloud-go/issues/1) signing keys ([58bea89](https://www.github.com/googleapis/google-cloud-go/commit/58bea89a3d177d5c431ff19310794e3296253353)) -* **monitoring/apiv3:** add CreateServiceTimeSeries RPC ([9e41088](https://www.github.com/googleapis/google-cloud-go/commit/9e41088bb395fbae0e757738277d5c95fa2749c8)) -* **monitoring/dashboard:** Added support for auto-close configurations ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **monitoring/metricsscope:** promote apiv1 to GA ([#5135](https://www.github.com/googleapis/google-cloud-go/issues/5135)) ([33c0f63](https://www.github.com/googleapis/google-cloud-go/commit/33c0f63e0e0ce69d9ef6e57b04d1b8cc10ed2b78)) -* **osconfig:** OSConfig: add OS policy assignment rpcs ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) -* **osconfig:** Update OSConfig API ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a)) -* **osconfig:** Update osconfig v1 and v1alpha RecurringSchedule.Frequency with DAILY frequency ([59e548a](https://www.github.com/googleapis/google-cloud-go/commit/59e548acc249c7bddd9c884c2af35d582a408c4d)) -* **recaptchaenterprise:** add reCAPTCHA Enterprise account defender API methods ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8)) -* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **security/privateca:** add IAMPolicy & Locations mix-in support ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) -* **securitycenter:** Added a new API method UpdateExternalSystem, which enables updating a finding w/ external system metadata. External systems are a child resource under finding, and are housed on the finding itself, and can also be filtered on in Notifications, the ListFindings and GroupFindings API ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **securitycenter:** Added mute related APIs, proto messages and fields ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897)) -* **securitycenter:** Added resource type and display_name field to the FindingResult, and supported them in the filter for ListFindings and GroupFindings. Also added display_name to the resource which is surfaced in NotificationMessage ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **securitycenter:** Added vulnerability field to the finding feat: Added type field to the resource which is surfaced in NotificationMessage ([090cc3a](https://www.github.com/googleapis/google-cloud-go/commit/090cc3ae0f8747a14cc904fc6d429e2f5379bb03)) -* **servicecontrol:** add C++ rules for many Cloud services ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **speech:** add result_end_time to SpeechRecognitionResult ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **speech:** added alternative_language_codes to RecognitionConfig feat: WEBM_OPUS codec feat: SpeechAdaptation configuration feat: word confidence feat: spoken punctuation and spoken emojis feat: hint boost in SpeechContext ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **texttospeech:** update v1 proto ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **workflows/executions:** add a stack_trace field to the Error messages specifying where the error occured feat: add call_log_level field to Execution messages doc: clarify requirement to escape strings within JSON arguments ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) - - -### Bug Fixes - -* **accesscontextmanager:** nodejs package name access-context-manager ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) -* **aiplatform:** Remove invalid resource annotations ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) -* **compute/metadata:** return an error when all retries have failed ([#5063](https://www.github.com/googleapis/google-cloud-go/issues/5063)) ([c792a0d](https://www.github.com/googleapis/google-cloud-go/commit/c792a0d13db019c9964efeee5c6bc85b07ca50fa)), refs [#5062](https://www.github.com/googleapis/google-cloud-go/issues/5062) -* **compute:** make parent_id fields required compute move and insert methods ([#686](https://www.github.com/googleapis/google-cloud-go/issues/686)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **compute:** Move compute_small protos under its own directory ([#681](https://www.github.com/googleapis/google-cloud-go/issues/681)) ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897)) -* **internal/gapicgen:** fix a compute filtering ([#5111](https://www.github.com/googleapis/google-cloud-go/issues/5111)) ([77aa19d](https://www.github.com/googleapis/google-cloud-go/commit/77aa19de7fc33a9e831e6b91bd324d6832b44d99)) -* **internal/godocfx:** only put TOC status on mod if all pkgs have same status ([#4974](https://www.github.com/googleapis/google-cloud-go/issues/4974)) ([309b59e](https://www.github.com/googleapis/google-cloud-go/commit/309b59e583d1bf0dd9ffe84223034eb8a2975d47)) -* **internal/godocfx:** replace * with HTML code ([#5049](https://www.github.com/googleapis/google-cloud-go/issues/5049)) ([a8f7c06](https://www.github.com/googleapis/google-cloud-go/commit/a8f7c066e8d97120ae4e12963e3c9acc8b8906c2)) -* **monitoring/apiv3:** Reintroduce deprecated field/enum for backward compatibility docs: Use absolute link targets in comments ([45fd259](https://www.github.com/googleapis/google-cloud-go/commit/45fd2594d99ef70c776df26866f0a3b537e7e69e)) -* **profiler:** workaround certificate expiration issue in integration tests ([#4955](https://www.github.com/googleapis/google-cloud-go/issues/4955)) ([de9e465](https://www.github.com/googleapis/google-cloud-go/commit/de9e465bea8cd0580c45e87d2cbc2b610615b363)) -* **security/privateca:** include mixin protos as input for mixin rpcs ([479c2f9](https://www.github.com/googleapis/google-cloud-go/commit/479c2f90d556a106b25ebcdb1539d231488182da)) -* **security/privateca:** repair service config to enable mixins ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) -* **video/transcoder:** update nodejs package name to video-transcoder ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) - -## [0.97.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.96.0...v0.97.0) (2021-09-29) - - -### Features - -* **internal** add Retry func to testutil from samples repository [#4902](https://github.com/googleapis/google-cloud-go/pull/4902) - -## [0.96.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.95.0...v0.96.0) (2021-09-28) - - -### Features - -* **civil:** add IsEmpty function to time, date and datetime ([#4728](https://www.github.com/googleapis/google-cloud-go/issues/4728)) ([88bfa64](https://www.github.com/googleapis/google-cloud-go/commit/88bfa64d6df2f3bb7d41e0b8f56717dd3de790e2)), refs [#4727](https://www.github.com/googleapis/google-cloud-go/issues/4727) -* **internal/godocfx:** detect preview versions ([#4899](https://www.github.com/googleapis/google-cloud-go/issues/4899)) ([9b60844](https://www.github.com/googleapis/google-cloud-go/commit/9b608445ce9ebabbc87a50e85ce6ef89125031d2)) -* **internal:** provide wrapping for retried errors ([#4797](https://www.github.com/googleapis/google-cloud-go/issues/4797)) ([ce5f4db](https://www.github.com/googleapis/google-cloud-go/commit/ce5f4dbab884e847a2d9f1f8f3fcfd7df19a505a)) - - -### Bug Fixes - -* **internal/gapicgen:** restore fmting proto files ([#4789](https://www.github.com/googleapis/google-cloud-go/issues/4789)) ([5606b54](https://www.github.com/googleapis/google-cloud-go/commit/5606b54b97bb675487c6c138a4081c827218f933)) -* **internal/trace:** use xerrors.As for trace ([#4813](https://www.github.com/googleapis/google-cloud-go/issues/4813)) ([05fe61c](https://www.github.com/googleapis/google-cloud-go/commit/05fe61c5aa4860bdebbbe3e91a9afaba16aa6184)) - -## [0.95.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.1...v0.95.0) (2021-09-21) - -### Bug Fixes - -* **internal/gapicgen:** add a temporary import ([#4756](https://www.github.com/googleapis/google-cloud-go/issues/4756)) ([4d9c046](https://www.github.com/googleapis/google-cloud-go/commit/4d9c046b66a2dc205e2c14b676995771301440da)) -* **compute/metadata:** remove heavy gax dependency ([#4784](https://www.github.com/googleapis/google-cloud-go/issues/4784)) ([ea00264](https://www.github.com/googleapis/google-cloud-go/commit/ea00264428137471805f2ec67f04f3a5a42928fa)) - -### [0.94.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.0...v0.94.1) (2021-09-02) - - -### Bug Fixes - -* **compute/metadata:** fix retry logic to not panic on error ([#4714](https://www.github.com/googleapis/google-cloud-go/issues/4714)) ([75c63b9](https://www.github.com/googleapis/google-cloud-go/commit/75c63b94d2cf86606fffc3611f7e6150b667eedc)), refs [#4713](https://www.github.com/googleapis/google-cloud-go/issues/4713) - -## [0.94.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.92.0...v0.94.0) (2021-08-31) - - -### Features - -* **aiplatform:** add XAI, model monitoring, and index services to aiplatform v1 ([e385b40](https://www.github.com/googleapis/google-cloud-go/commit/e385b40a1e2ecf81f5fd0910de5c37275951f86b)) -* **analytics/admin:** add `GetDataRetentionSettings`, `UpdateDataRetentionSettings` methods to the API ([8467899](https://www.github.com/googleapis/google-cloud-go/commit/8467899ab6ebf0328c543bfb5fbcddeb2f53a082)) -* **asset:** Release of relationships in v1, Add content type Relationship to support relationship export Committer: lvv@ ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **assuredworkloads:** Add Canada Regions And Support compliance regime ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **cloudbuild/apiv1:** Add ability to configure BuildTriggers to create Builds that require approval before executing and ApproveBuild API to approve or reject pending Builds ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **cloudbuild/apiv1:** add script field to BuildStep message ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **cloudbuild/apiv1:** Update cloudbuild proto with the service_account for BYOSA Triggers. ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **compute/metadata:** retry error when talking to metadata service ([#4648](https://www.github.com/googleapis/google-cloud-go/issues/4648)) ([81c6039](https://www.github.com/googleapis/google-cloud-go/commit/81c6039503121f8da3de4f4cd957b8488a3ef620)), refs [#4642](https://www.github.com/googleapis/google-cloud-go/issues/4642) -* **dataproc:** remove apiv1beta2 client ([#4682](https://www.github.com/googleapis/google-cloud-go/issues/4682)) ([2248554](https://www.github.com/googleapis/google-cloud-go/commit/22485541affb1251604df292670a20e794111d3e)) -* **gaming:** support version reporting API ([cd65cec](https://www.github.com/googleapis/google-cloud-go/commit/cd65cecf15c4a01648da7f8f4f4d497772961510)) -* **gkehub:** Add request_id under `DeleteMembershipRequest` and `UpdateMembershipRequest` ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **internal/carver:** support carving batches ([#4623](https://www.github.com/googleapis/google-cloud-go/issues/4623)) ([2972d19](https://www.github.com/googleapis/google-cloud-go/commit/2972d194da19bedf16d76fda471c06a965cfdcd6)) -* **kms:** add support for Key Reimport ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) -* **metastore:** Added the Backup resource and Backup resource GetIamPolicy/SetIamPolicy to V1 feat: Added the RestoreService method to V1 ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **monitoring/dashboard:** Added support for logs-based alerts: https://cloud.google.com/logging/docs/alerting/log-based-alerts feat: Added support for user-defined labels on cloud monitoring's Service and ServiceLevelObjective objects fix!: mark required fields in QueryTimeSeriesRequest as required ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **osconfig:** Update osconfig v1 and v1alpha with WindowsApplication ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) -* **speech:** Add transcript normalization ([b31646d](https://www.github.com/googleapis/google-cloud-go/commit/b31646d1e12037731df4b5c0ba9f60b6434d7b9b)) -* **talent:** Add new commute methods in Search APIs feat: Add new histogram type 'publish_time_in_day' feat: Support filtering by requisitionId is ListJobs API ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **translate:** added v3 proto for online/batch document translation and updated v3beta1 proto for format conversion ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) - - -### Bug Fixes - -* **datastream:** Change a few resource pattern variables from camelCase to snake_case ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) - -## [0.92.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.91.0...v0.92.0) (2021-08-16) - - -### Features - -* **all:** remove testing deps ([#4580](https://www.github.com/googleapis/google-cloud-go/issues/4580)) ([15c1eb9](https://www.github.com/googleapis/google-cloud-go/commit/15c1eb9730f0b514edb911161f9c59e8d790a5ec)), refs [#4061](https://www.github.com/googleapis/google-cloud-go/issues/4061) -* **internal/detect:** add helper to detect projectID from env ([#4582](https://www.github.com/googleapis/google-cloud-go/issues/4582)) ([cc65d94](https://www.github.com/googleapis/google-cloud-go/commit/cc65d945688ac446602bce6ef86a935714dfe2f8)), refs [#1294](https://www.github.com/googleapis/google-cloud-go/issues/1294) -* **spannertest:** Add validation of duplicated column names ([#4611](https://www.github.com/googleapis/google-cloud-go/issues/4611)) ([84f86a6](https://www.github.com/googleapis/google-cloud-go/commit/84f86a605c809ab36dd3cb4b3ab1df15a5302083)) - -## [0.91.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.90.0...v0.91.0) (2021-08-11) - - -### Features - -* **.github:** support dynamic submodule detection ([#4537](https://www.github.com/googleapis/google-cloud-go/issues/4537)) ([4374b90](https://www.github.com/googleapis/google-cloud-go/commit/4374b907e9f166da6bd23a8ef94399872b00afd6)) -* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3)) -* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3)) -* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products docs: reorder some fields ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **dialogflow:** expose `Locations` service to get/list avaliable locations of Dialogflow products; fixed some API annotations ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **kms:** add support for HMAC, Variable Key Destruction, and GenerateRandom ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **speech:** add total_billed_time response field ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **video/transcoder:** Add video cropping feature feat: Add video padding feature feat: Add ttl_after_completion_days field to Job docs: Update proto documentation docs: Indicate v1beta1 deprecation ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) - - -### Bug Fixes - -* **functions:** Updating behavior of source_upload_url during Get/List function calls ([381a494](https://www.github.com/googleapis/google-cloud-go/commit/381a494c29da388977b0bdda2177058328cc4afe)) - -## [0.90.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.89.0...v0.90.0) (2021-08-03) - - -### ⚠ BREAKING CHANGES - -* **compute:** add pagination and an Operation wrapper (#4542) - -### Features - -* **compute:** add pagination and an Operation wrapper ([#4542](https://www.github.com/googleapis/google-cloud-go/issues/4542)) ([36f4649](https://www.github.com/googleapis/google-cloud-go/commit/36f46494111f6d16d103fb208d49616576dbf91e)) -* **internal/godocfx:** add status to packages and TOCs ([#4547](https://www.github.com/googleapis/google-cloud-go/issues/4547)) ([c6de69c](https://www.github.com/googleapis/google-cloud-go/commit/c6de69c710561bb2a40eff05417df4b9798c258a)) -* **internal/godocfx:** mark status of deprecated items ([#4525](https://www.github.com/googleapis/google-cloud-go/issues/4525)) ([d571c6f](https://www.github.com/googleapis/google-cloud-go/commit/d571c6f4337ec9c4807c230cd77f53b6e7db6437)) - - -### Bug Fixes - -* **internal/carver:** don't tag commits ([#4518](https://www.github.com/googleapis/google-cloud-go/issues/4518)) ([c355eb8](https://www.github.com/googleapis/google-cloud-go/commit/c355eb8ecb0bb1af0ccf55e6262ca8c0d5c7e352)) - -## [0.89.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.88.0...v0.89.0) (2021-07-29) - - -### Features - -* **assuredworkloads:** Add EU Regions And Support compliance regime ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **datacatalog:** Added support for BigQuery connections entries feat: Added support for BigQuery routines entries feat: Added usage_signal field feat: Added labels field feat: Added ReplaceTaxonomy in Policy Tag Manager Serialization API feat: Added support for public tag templates feat: Added support for rich text tags docs: Documentation improvements ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **datafusion:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97)) -* **iap:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97)) -* **internal/carver:** add tooling to help carve out sub-modules ([#4417](https://www.github.com/googleapis/google-cloud-go/issues/4417)) ([a7e28f2](https://www.github.com/googleapis/google-cloud-go/commit/a7e28f2557469562ae57e5174b41bdf8fce62b63)) -* **networkconnectivity:** Add files for Network Connectivity v1 API. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **retail:** Add restricted Retail Search features for Retail API v2. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **secretmanager:** In Secret Manager, users can now use filter to customize the output of ListSecrets/ListSecretVersions calls ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **securitycenter:** add finding_class and indicator fields in Finding ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **speech:** add total_billed_time response field. fix!: phrase_set_id is required field in CreatePhraseSetRequest. fix!: custom_class_id is required field in CreateCustomClassRequest. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **storagetransfer:** start generating apiv1 ([#4505](https://www.github.com/googleapis/google-cloud-go/issues/4505)) ([f2d531d](https://www.github.com/googleapis/google-cloud-go/commit/f2d531d2b519efa58e0f23a178bbebe675c203c3)) - - -### Bug Fixes - -* **internal/gapicgen:** exec Stdout already set ([#4509](https://www.github.com/googleapis/google-cloud-go/issues/4509)) ([41246e9](https://www.github.com/googleapis/google-cloud-go/commit/41246e900aaaea92a9f956e92956c40c86f4cb3a)) -* **internal/gapicgen:** tidy all after dep bump ([#4515](https://www.github.com/googleapis/google-cloud-go/issues/4515)) ([9401be5](https://www.github.com/googleapis/google-cloud-go/commit/9401be509c570c3c55694375065c84139e961857)), refs [#4434](https://www.github.com/googleapis/google-cloud-go/issues/4434) - -## [0.88.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.87.0...v0.88.0) (2021-07-22) - - -### ⚠ BREAKING CHANGES - -* **cloudbuild/apiv1:** Proto had a prior definitions of WorkerPool resources which were never supported. This change replaces those resources with definitions that are currently supported. - -### Features - -* **cloudbuild/apiv1:** add a WorkerPools API ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547)) -* **cloudbuild/apiv1:** Implementation of Build Failure Info: - Added message FailureInfo field ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547)) -* **osconfig/agentendpoint:** OSConfig AgentEndpoint: add basic os info to RegisterAgentRequest, add WindowsApplication type to Inventory ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c)) -* **resourcesettings:** Publish Cloud ResourceSettings v1 API ([43ad3cb](https://www.github.com/googleapis/google-cloud-go/commit/43ad3cb7be981fff9dc5dcf4510f1cd7bea99957)) - - -### Bug Fixes - -* **internal/godocfx:** set exit code, print cmd output, no go get ... ([#4445](https://www.github.com/googleapis/google-cloud-go/issues/4445)) ([cc70f77](https://www.github.com/googleapis/google-cloud-go/commit/cc70f77ac279a62e24e1b07f6e53fd126b7286b0)) -* **internal:** detect module for properly generating docs URLs ([#4460](https://www.github.com/googleapis/google-cloud-go/issues/4460)) ([1eaba8b](https://www.github.com/googleapis/google-cloud-go/commit/1eaba8bd694f7552a8e3e09b4f164de8b6ca23f0)), refs [#4447](https://www.github.com/googleapis/google-cloud-go/issues/4447) -* **kms:** Updating WORKSPACE files to use the newest version of the Typescript generator. ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c)) - -## [0.87.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.86.0...v0.87.0) (2021-07-13) - - -### Features - -* **container:** allow updating security group on existing clusters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) -* **monitoring/dashboard:** added validation only mode when writing dashboards feat: added alert chart widget ([652d7c2](https://www.github.com/googleapis/google-cloud-go/commit/652d7c277da2f6774729064ab65d557875c81567)) -* **networkmanagment:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) -* **secretmanager:** Tune Secret Manager auto retry parameters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) -* **video/transcoder:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) - - -### Bug Fixes - -* **compute:** properly generate PUT requests ([#4426](https://www.github.com/googleapis/google-cloud-go/issues/4426)) ([a7491a5](https://www.github.com/googleapis/google-cloud-go/commit/a7491a533e4ad75eb6d5f89718d4dafb0c5b4167)) -* **internal:** fix relative pathing for generator ([#4397](https://www.github.com/googleapis/google-cloud-go/issues/4397)) ([25e0eae](https://www.github.com/googleapis/google-cloud-go/commit/25e0eaecf9feb1caa97988c5398ac58f6ca17391)) - - -### Miscellaneous Chores - -* **all:** fix release version ([#4427](https://www.github.com/googleapis/google-cloud-go/issues/4427)) ([2c0d267](https://www.github.com/googleapis/google-cloud-go/commit/2c0d2673ccab7281b6432215ee8279f9efd04a15)) - -## [0.86.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.85.0...v0.86.0) (2021-07-01) - - -### Features - -* **bigquery managedwriter:** schema conversion support ([#4357](https://www.github.com/googleapis/google-cloud-go/issues/4357)) ([f2b20f4](https://www.github.com/googleapis/google-cloud-go/commit/f2b20f493e2ed5a883ce42fa65695c03c574feb5)) - -## [0.85.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.84.0...v0.85.0) (2021-06-30) - - -### Features - -* **dataflow:** start generating apiv1beta3 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) -* **datastream:** start generating apiv1alpha1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) -* **dialogflow:** added Automated agent reply type and allow cancellation flag for partial response feature. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) -* **documentai:** update document.proto, add the processor management methods. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) -* **eventarc:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) -* **gkehub:** added v1alpha messages and client for gkehub ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) -* **internal/godocfx:** add support for other modules ([#4290](https://www.github.com/googleapis/google-cloud-go/issues/4290)) ([d52bae6](https://www.github.com/googleapis/google-cloud-go/commit/d52bae6cd77474174192c46236d309bf967dfa00)) -* **internal/godocfx:** different metadata for different modules ([#4297](https://www.github.com/googleapis/google-cloud-go/issues/4297)) ([598f5b9](https://www.github.com/googleapis/google-cloud-go/commit/598f5b93778b2e2e75265ae54484dd54477433f5)) -* **internal:** add force option for regen ([#4310](https://www.github.com/googleapis/google-cloud-go/issues/4310)) ([de654eb](https://www.github.com/googleapis/google-cloud-go/commit/de654ebfcf23a53b4d1fee0aa48c73999a55c1a6)) -* **servicecontrol:** Added the gRPC service config for the Service Controller v1 API docs: Updated some comments. ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) -* **workflows/executions:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) - - -### Bug Fixes - -* **internal:** add autogenerated header to snippets ([#4261](https://www.github.com/googleapis/google-cloud-go/issues/4261)) ([2220787](https://www.github.com/googleapis/google-cloud-go/commit/222078722c37c3fdadec7bbbe0bcf81edd105f1a)), refs [#4260](https://www.github.com/googleapis/google-cloud-go/issues/4260) -* **internal:** fix googleapis-disco regen ([#4354](https://www.github.com/googleapis/google-cloud-go/issues/4354)) ([aeea1ce](https://www.github.com/googleapis/google-cloud-go/commit/aeea1ce1e5dff3acdfe208932327b52c49851b41)) -* **kms:** replace IAMPolicy mixin in service config. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) -* **security/privateca:** Fixed casing of the Ruby namespace ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) - -## [0.84.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.83.0...v0.84.0) (2021-06-09) - - -### Features - -* **aiplatform:** start generating apiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) -* **apigeeconnect:** start generating abiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) -* **dialogflow/cx:** support sentiment analysis in bot testing ([7a57aac](https://www.github.com/googleapis/google-cloud-go/commit/7a57aac996f2bae20ee6ddbd02ad9e56e380099b)) -* **dialogflow/cx:** support sentiment analysis in bot testing ([6ad2306](https://www.github.com/googleapis/google-cloud-go/commit/6ad2306f64710ce16059b464342dbc6a98d2d9c2)) -* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) -* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([18375e5](https://www.github.com/googleapis/google-cloud-go/commit/18375e50e8f16e63506129b8927a7b62f85e407b)) -* **gkeconnect/gateway:** start generating apiv1beta1 ([#4235](https://www.github.com/googleapis/google-cloud-go/issues/4235)) ([1c3e968](https://www.github.com/googleapis/google-cloud-go/commit/1c3e9689d78670a231a3660db00fd4fd8f5c6345)) -* **lifesciences:** strat generating apiv2beta ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) -* **tpu:** start generating apiv1 ([#4199](https://www.github.com/googleapis/google-cloud-go/issues/4199)) ([cac48ea](https://www.github.com/googleapis/google-cloud-go/commit/cac48eab960cd34cc20732f6a1aeb93c540a036b)) - - -### Bug Fixes - -* **bttest:** fix race condition in SampleRowKeys ([#4207](https://www.github.com/googleapis/google-cloud-go/issues/4207)) ([5711fb1](https://www.github.com/googleapis/google-cloud-go/commit/5711fb10d25c458807598d736a232bb2210a047a)) -* **documentai:** Fix Ruby gem title of documentai v1 (package not currently published) ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) - -## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02) - - -### Features - -* **dialogflow:** added a field in the query result to indicate whether slot filling is cancelled. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) -* **essentialcontacts:** start generating apiv1 ([#4118](https://www.github.com/googleapis/google-cloud-go/issues/4118)) ([fe14afc](https://www.github.com/googleapis/google-cloud-go/commit/fe14afcf74e09089b22c4f5221cbe37046570fda)) -* **gsuiteaddons:** start generating apiv1 ([#4082](https://www.github.com/googleapis/google-cloud-go/issues/4082)) ([6de5c99](https://www.github.com/googleapis/google-cloud-go/commit/6de5c99173c4eeaf777af18c47522ca15637d232)) -* **osconfig:** OSConfig: add ExecResourceOutput and per step error message. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) -* **osconfig:** start generating apiv1alpha ([#4119](https://www.github.com/googleapis/google-cloud-go/issues/4119)) ([8ad471f](https://www.github.com/googleapis/google-cloud-go/commit/8ad471f26087ec076460df6dcf27769ffe1b8834)) -* **privatecatalog:** start generating apiv1beta1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) -* **serviceusage:** start generating apiv1 ([#4120](https://www.github.com/googleapis/google-cloud-go/issues/4120)) ([e4531f9](https://www.github.com/googleapis/google-cloud-go/commit/e4531f93cfeb6388280bb253ef6eb231aba37098)) -* **shell:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) -* **vpcaccess:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) - -## [0.82.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.82.0) (2021-05-17) - - -### Features - -* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) -* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) -* **cloudbuild/apiv1:** Add fields for Pub/Sub triggers ([8b4adbf](https://www.github.com/googleapis/google-cloud-go/commit/8b4adbf9815e1ec229dfbcfb9189d3ea63112e1b)) -* **cloudbuild/apiv1:** Implementation of Source Manifests: - Added message StorageSourceManifest as an option for the Source message - Added StorageSourceManifest field to the SourceProvenance message ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) -* **clouddms:** start generating apiv1 ([#4081](https://www.github.com/googleapis/google-cloud-go/issues/4081)) ([29df85c](https://www.github.com/googleapis/google-cloud-go/commit/29df85c40ab64d59e389a980c9ce550077839763)) -* **dataproc:** update the Dataproc V1 API client library ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) -* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) -* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **dialogflow:** added more Environment RPCs feat: added Versions service feat: added Fulfillment service feat: added TextToSpeechSettings. feat: added location in some resource patterns. ([4f73dc1](https://www.github.com/googleapis/google-cloud-go/commit/4f73dc19c2e05ad6133a8eac3d62ddb522314540)) -* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) -* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([07fdcd1](https://www.github.com/googleapis/google-cloud-go/commit/07fdcd12499eac26f9b5fae01d6c1282c3e02b7c)) -* **internal/gapicgen:** only update relevant gapic files ([#4066](https://www.github.com/googleapis/google-cloud-go/issues/4066)) ([5948bee](https://www.github.com/googleapis/google-cloud-go/commit/5948beedbadd491601bdee6a006cf685e94a85f4)) -* **internal/gensnippets:** add license header and region tags ([#3924](https://www.github.com/googleapis/google-cloud-go/issues/3924)) ([e9ff7a0](https://www.github.com/googleapis/google-cloud-go/commit/e9ff7a0f9bb1cc67f5d0de47934811960429e72c)) -* **internal/gensnippets:** initial commit ([#3922](https://www.github.com/googleapis/google-cloud-go/issues/3922)) ([3fabef0](https://www.github.com/googleapis/google-cloud-go/commit/3fabef032388713f732ab4dbfc51624cdca0f481)) -* **internal:** auto-generate snippets ([#3949](https://www.github.com/googleapis/google-cloud-go/issues/3949)) ([b70e0fc](https://www.github.com/googleapis/google-cloud-go/commit/b70e0fccdc86813e0d97ff63b585822d4deafb38)) -* **internal:** generate region tags for snippets ([#3962](https://www.github.com/googleapis/google-cloud-go/issues/3962)) ([ef2b90e](https://www.github.com/googleapis/google-cloud-go/commit/ef2b90ea6d47e27744c98a1a9ae0c487c5051808)) -* **metastore:** start generateing apiv1 ([#4083](https://www.github.com/googleapis/google-cloud-go/issues/4083)) ([661610a](https://www.github.com/googleapis/google-cloud-go/commit/661610afa6a9113534884cafb138109536724310)) -* **security/privateca:** start generating apiv1 ([#4023](https://www.github.com/googleapis/google-cloud-go/issues/4023)) ([08aa83a](https://www.github.com/googleapis/google-cloud-go/commit/08aa83a5371bb6485bc3b19b3ed5300f807ce69f)) -* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) -* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) -* **speech:** add webm opus support. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) -* **speech:** Support for spoken punctuation and spoken emojis. ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) - - -### Bug Fixes - -* **binaryauthorization:** add Java options to Binaryauthorization protos ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **internal/gapicgen:** filter out internal directory changes ([#4085](https://www.github.com/googleapis/google-cloud-go/issues/4085)) ([01473f6](https://www.github.com/googleapis/google-cloud-go/commit/01473f6d8db26c6e18969ace7f9e87c66e94ad9e)) -* **internal/gapicgen:** use correct region tags for gensnippets ([#4022](https://www.github.com/googleapis/google-cloud-go/issues/4022)) ([8ccd689](https://www.github.com/googleapis/google-cloud-go/commit/8ccd689cab08f016008ca06a939a4828817d4a25)) -* **internal/gensnippets:** run goimports ([#3931](https://www.github.com/googleapis/google-cloud-go/issues/3931)) ([10050f0](https://www.github.com/googleapis/google-cloud-go/commit/10050f05c20c226547d87c08168fa4bc551395c5)) -* **internal:** append a new line to comply with go fmt ([#4028](https://www.github.com/googleapis/google-cloud-go/issues/4028)) ([a297278](https://www.github.com/googleapis/google-cloud-go/commit/a2972783c4af806199d1c67c9f63ad9677f20f34)) -* **internal:** make sure formatting is run on snippets ([#4039](https://www.github.com/googleapis/google-cloud-go/issues/4039)) ([130dfc5](https://www.github.com/googleapis/google-cloud-go/commit/130dfc535396e98fc009585b0457e3bc48ead941)), refs [#4037](https://www.github.com/googleapis/google-cloud-go/issues/4037) -* **metastore:** increase metastore lro polling timeouts ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) - - -### Miscellaneous Chores - -* **all:** fix release version ([#4040](https://www.github.com/googleapis/google-cloud-go/issues/4040)) ([4c991a9](https://www.github.com/googleapis/google-cloud-go/commit/4c991a928665d9be93691decce0c653f430688b7)) - -## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02) - - -### Features - -* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) -* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4)) -* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) -* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) -* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) - - -### Bug Fixes - -* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678)) - -## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23) - - -### ⚠ BREAKING CHANGES - -* **all:** This is a breaking change in dialogflow - -### Features - -* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) -* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) -* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) -* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) -* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634) -* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61)) -* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf)) -* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57)) -* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f)) -* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) -* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e)) -* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) -* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) -* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee)) - - -### Miscellaneous Chores - -* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) - -## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10) - - -### Features - -* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f)) -* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) -* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) -* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441)) -* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25)) -* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e)) -* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548)) -* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650)) -* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4)) -* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) - - -### Bug Fixes - -* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) -* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495)) -* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80)) -* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef)) -* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c)) -* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e)) - -## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22) - - -### Features - -* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de)) -* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) -* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) -* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) -* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) -* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) -* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) - - -### Bug Fixes - -* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) -* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) - -## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16) - - -### Features - -* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04)) -* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) -* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2)) -* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418)) -* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) -* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f)) -* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac)) - - -### Bug Fixes - -* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) -* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd)) -* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) -* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) -* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363)) -* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) - -## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02) - - -### Features - -* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) -* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db)) -* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d)) -* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) -* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e)) -* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) -* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) -* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) -* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7)) -* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3)) -* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) -* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf)) -* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5)) -* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7)) -* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) -* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) -* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102)) -* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149)) -* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b)) -* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e)) -* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af)) -* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279)) -* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359)) -* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f)) -* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531)) -* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) -* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8)) -* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60)) -* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) -* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) -* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a)) -* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db)) - - -### Bug Fixes - -* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f)) -* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a)) -* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71)) - -## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448) -* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965)) - - -### Bug Fixes - -* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a)) - -## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374) -* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84)) -* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba)) - - -### Bug Fixes - -* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5)) - -## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199) -* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff)) -* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be)) -* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999)) -* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a)) - - -### Bug Fixes - -* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0)) - -## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119) - - -### Bug Fixes - -* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44)) -* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a)) - - -## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044) -* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3)) -* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42)) -* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc)) -* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171)) -* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6)) -* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2)) -* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900)) - - -### Bug Fixes - -* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb)) -* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7)) - -## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025) -* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44)) -* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714)) - -## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14) - -This is an empty release that was created solely to aid in pubsublite's module -carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14) - - -### Features - -* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045)) -* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958) -* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33)) -* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf)) -* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b)) -* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e)) -* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85)) - - -### Bug Fixes - -* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0)) - -## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935) - -## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864) -* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568)) -* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72)) -* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830)) -* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e)) -* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61)) - - -### Bug Fixes - -* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d)) -* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7)) -* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883) - -## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781) -* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93)) -* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521)) -* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64)) -* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5)) -* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893)) -* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4)) -* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901)) -* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580)) - - -### Bug Fixes - -* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639)) -* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c)) -* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835) - - -### Reverts - -* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557)) - -## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27) - - -### Announcements - -The following changes will be included in an upcoming release and are not -included in this one. - -#### Default Deadlines - -By default, non-streaming methods, like Create or Get methods, will have a -default deadline applied to the context provided at call time, unless a context -deadline is already set. Streaming methods have no default deadline and will run -indefinitely, unless the context provided at call time contains a deadline. - -To opt-out of this behavior, set the environment variable -`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to -initializing a client. This opt-out mechanism will be removed in a later -release, with a notice similar to this one ahead of its removal. - - -### Features - -* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764) - - -### Bug Fixes - -* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1)) -* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d)) - -## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706) -* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e)) - -## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05) - - -### Features - -* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04)) -* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c)) -* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b)) -* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601)) -* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32)) - -## v0.62.0 - -### Announcements - -- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was - merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606). - This fixed a broken API response for `DiagnoseCluster`. When polling on the - Long Running Operation(LRO), the API now returns - `(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an - `error` before. - -### Changes - -- all: - - Updated all direct dependencies. - - Updated contributing guidelines to suggest allowing edits from maintainers. -- billing/budgets: - - Start generating client for apiv1beta1. -- functions: - - Start generating client for apiv1. -- notebooks: - - Start generating client apiv1beta1. -- profiler: - - update proftest to support parsing floating-point backoff durations. - - Fix the regexp used to parse backoff duration. -- Various updates to autogenerated clients. - -## v0.61.0 - -### Changes - -- all: - - Update all direct dependencies. -- dashboard: - - Start generating client for apiv1. -- policytroubleshooter: - - Start generating client for apiv1. -- profiler: - - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`. -- Various updates to autogenerated clients. - -## v0.60.0 - -### Changes - -- all: - - Refactored examples to reduce module dependencies. - - Update sub-modules to use cloud.google.com/go v0.59.0. -- internal: - - Start generating client for gaming apiv1beta. -- Various updates to autogenerated clients. - -## v0.59.0 - -### Announcements - -goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our -contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept -pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to -point to GitHub. - -### Changes - -- all: - - Remove dependency on honnef.co/go/tools. - - Update our contributing instructions now that we use GitHub for reviews. - - Remove some un-inclusive terminology. -- compute/metadata: - - Pass cancelable context to DNS lookup. -- .github: - - Update templates issue/PR templates. -- internal: - - Bump several clients to GA. - - Fix GoDoc badge source. - - Several automation changes related to the move to GitHub. - - Start generating a client for asset v1p5beta1. -- Various updates to autogenerated clients. - -## v0.58.0 - -### Deprecation notice - -- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking - changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`. - -### Changes - -- all: - - The remaining uses of gtransport.Dial have been removed. - - The `genproto` dependency has been updated to a version that makes use of - new `protoreflect` library. For more information on these protobuf changes - please see the following post from the official Go blog: - https://blog.golang.org/protobuf-apiv2. -- internal: - - Started generation of datastore admin v1 client. - - Updated protofuf version used for generation to 3.12.X. - - Update the release levels for several APIs. - - Generate clients with protoc-gen-go@v1.4.1. -- monitoring: - - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation - notice above). -- profiler: - - Fixed flakiness in tests. -- Various updates to autogenerated clients. - -## v0.57.0 - -- all: - - Update module dependency `google.golang.org/api` to `v0.21.0`. -- errorreporting: - - Add exported SetGoogleClientInfo wrappers to manual file. -- expr/v1alpha1: - - Deprecate client. This client will be removed in a future release. -- internal: - - Fix possible data race in TestTracer. - - Pin versions of tools used for generation. - - Correct the release levels for BigQuery APIs. - - Start generation osconfig v1. -- longrunning: - - Add exported SetGoogleClientInfo wrappers to manual file. -- monitoring: - - Stop generation of monitoring/apiv3 because of incoming breaking change. -- trace: - - Add exported SetGoogleClientInfo wrappers to manual file. -- Various updates to autogenerated clients. - -## v0.56.0 - -- secretmanager: - - add IAM helper -- profiler: - - try all us-west1 zones for integration tests -- internal: - - add config to generate webrisk v1 - - add repo and commit to buildcop invocation - - add recaptchaenterprise v1 generation config - - update microgenerator to v0.12.5 - - add datacatalog client - - start generating security center settings v1beta - - start generating osconfig agentendpoint v1 - - setup generation for bigquery/connection/v1beta1 -- all: - - increase continous testing timeout to 45m - - various updates to autogenerated clients. - -## v0.55.0 - -- Various updates to autogenerated clients. - -## v0.54.0 - -- all: - - remove unused golang.org/x/exp from mod file - - update godoc.org links to pkg.go.dev -- compute/metadata: - - use defaultClient when http.Client is nil - - remove subscribeClient -- iam: - - add support for v3 policy and IAM conditions -- Various updates to autogenerated clients. - -## v0.53.0 - -- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers). - - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API. -- profiler: remove symbolization (drops support for go1.10) -- Various updates to autogenerated clients. - -## v0.52.0 - -- internal/gapicgen: multiple improvements related to library generation. -- compute/metadata: unset ResponseHeaderTimeout in defaultClient -- docs: fix link to KMS in README.md -- Various updates to autogenerated clients. - -## v0.51.0 - -- secretmanager: - - add IAM helper for generic resource IAM handle -- cloudbuild: - - migrate to microgen in a major version -- Various updates to autogenerated clients. - -## v0.50.0 - -- profiler: - - Support disabling CPU profile collection. - - Log when a profile creation attempt begins. -- compute/metadata: - - Fix panic on malformed URLs. - - InstanceName returns actual instance name. -- Various updates to autogenerated clients. - -## v0.49.0 - -- functions/metadata: - - Handle string resources in JSON unmarshaller. -- Various updates to autogenerated clients. - -## v0.48.0 - -- Various updates to autogenerated clients - -## v0.47.0 - -This release drops support for Go 1.9 and Go 1.10: we continue to officially -support Go 1.11, Go 1.12, and Go 1.13. - -- Various updates to autogenerated clients. -- Add cloudbuild/apiv1 client. - -## v0.46.3 - -This is an empty release that was created solely to aid in storage's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.46.2 - -This is an empty release that was created solely to aid in spanner's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.46.1 - -This is an empty release that was created solely to aid in firestore's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.46.0 - -- spanner: - - Retry "Session not found" for read-only transactions. - - Retry aborted PDMLs. -- spanner/spannertest: - - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly. -- storage: - - Add HMACKeyOptions. - - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL, - DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice - StorageClasses but they are still acceptable values. -- trace: - - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been - marked OBSOLETE for several years: it is now no longer provided. If you - relied on this package, please vendor it or switch to using - https://cloud.google.com/trace/docs/setup/go (which obsoleted it). - -## v0.45.1 - -This is an empty release that was created solely to aid in pubsub's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.45.0 - -- compute/metadata: - - Add Email method. -- storage: - - Fix duplicated retry logic. - - Add ReaderObjectAttrs.StartOffset. - - Support reading last N bytes of a file when a negative range is given, such - as `obj.NewRangeReader(ctx, -10, -1)`. - - Add HMACKey listing functionality. -- spanner/spannertest: - - Support primary keys with no columns. - - Fix MinInt64 parsing. - - Implement deletion of key ranges. - - Handle reads during a read-write transaction. - - Handle returning DATE values. -- pubsub: - - Fix Ack/Modack request size calculation. -- logging: - - Add auto-detection of monitored resources on GAE Standard. - -## v0.44.3 - -This is an empty release that was created solely to aid in bigtable's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.44.2 - -This is an empty release that was created solely to aid in bigquery's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.44.1 - -This is an empty release that was created solely to aid in datastore's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.44.0 - -- datastore: - - Interface elements whose underlying types are supported, are now supported. - - Reduce time to initial retry from 1s to 100ms. -- firestore: - - Add Increment transformation. -- storage: - - Allow emulator with STORAGE_EMULATOR_HOST. - - Add methods for HMAC key management. -- pubsub: - - Add PublishCount and PublishLatency measurements. - - Add DefaultPublishViews and DefaultSubscribeViews for convenience of - importing all views. - - Add add Subscription.PushConfig.AuthenticationMethod. -- spanner: - - Allow emulator usage with SPANNER_EMULATOR_HOST. - - Add cloud.google.com/go/spanner/spannertest, a spanner emulator. - - Add cloud.google.com/go/spanner/spansql which contains types and a parser - for the Cloud Spanner SQL dialect. -- asset: - - Add apiv1p2beta1 client. - -## v0.43.0 - -This is an empty release that was created solely to aid in logging's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.42.0 - -- bigtable: - - Add an admin method to update an instance and clusters. - - Fix bttest regex matching behavior for alternations (things like `|a`). - - Expose BlockAllFilter filter. -- bigquery: - - Add Routines API support. -- storage: - - Add read-only Bucket.LocationType. -- logging: - - Add TraceSampled to Entry. - - Fix to properly extract {Trace, Span}Id from X-Cloud-Trace-Context. -- pubsub: - - Add Cloud Key Management to TopicConfig. - - Change ExpirationPolicy to optional.Duration. -- automl: - - Add apiv1beta1 client. -- iam: - - Fix compilation problem with iam/credentials/apiv1. - -## v0.41.0 - -- bigtable: - - Check results from PredicateFilter in bttest, which fixes certain false matches. -- profiler: - - debugLog checks user defined logging options before logging. -- spanner: - - PartitionedUpdates respect query parameters. - - StartInstance allows specifying cloud API access scopes. -- bigquery: - - Use empty slice instead of nil for ValueSaver, fixing an issue with zero-length, repeated, nested fields causing panics. -- firestore: - - Return same number of snapshots as doc refs (in the form of duplicate records) during GetAll. -- replay: - - Change references to IPv4 addresses to localhost, making replay compatible with IPv6. - -## v0.40.0 - -- all: - - Update to protobuf-golang v1.3.1. -- datastore: - - Attempt to decode GAE-encoded keys if initial decoding attempt fails. - - Support integer time conversion. -- pubsub: - - Add PublishSettings.BundlerByteLimit. If users receive pubsub.ErrOverflow, - this value should be adjusted higher. - - Use IPv6 compatible target in testutil. -- bigtable: - - Fix Latin-1 regexp filters in bttest, allowing \C. - - Expose PassAllFilter. -- profiler: - - Add log messages for slow path in start. - - Fix start to allow retry until success. -- firestore: - - Add admin client. -- containeranalysis: - - Add apiv1 client. -- grafeas: - - Add apiv1 client. - -## 0.39.0 - -- bigtable: - - Implement DeleteInstance in bttest. - - Return an error on invalid ReadRowsRequest.RowRange key ranges in bttest. -- bigquery: - - Move RequirePartitionFilter outside of TimePartioning. - - Expose models API. -- firestore: - - Allow array values in create and update calls. - - Add CollectionGroup method. -- pubsub: - - Add ExpirationPolicy to Subscription. -- storage: - - Add V4 signing. -- rpcreplay: - - Match streams by first sent request. This further improves rpcreplay's - ability to distinguish streams. -- httpreplay: - - Set up Man-In-The-Middle config only once. This should improve proxy - creation when multiple proxies are used in a single process. - - Remove error on empty Content-Type, allowing requests with no Content-Type - header but a non-empty body. -- all: - - Fix an edge case bug in auto-generated library pagination by properly - propagating pagetoken. - -## 0.38.0 - -This update includes a substantial reduction in our transitive dependency list -by way of updating to opencensus@v0.21.0. - -- spanner: - - Error implements GRPCStatus, allowing status.Convert. -- bigtable: - - Fix a bug in bttest that prevents single column queries returning results - that match other filters. - - Remove verbose retry logging. -- logging: - - Ensure RequestUrl has proper UTF-8, removing the need for users to wrap and - rune replace manually. -- recaptchaenterprise: - - Add v1beta1 client. -- phishingprotection: - - Add v1beta1 client. - -## 0.37.4 - -This patch releases re-builds the go.sum. This was not possible in the -previous release. - -- firestore: - - Add sentinel value DetectProjectID for auto-detecting project ID. - - Add OpenCensus tracing for public methods. - - Marked stable. All future changes come with a backwards compatibility - guarantee. - - Removed firestore/apiv1beta1. All users relying on this low-level library - should migrate to firestore/apiv1. Note that most users should use the - high-level firestore package instead. -- pubsub: - - Allow large messages in synchronous pull case. - - Cap bundler byte limit. This should prevent OOM conditions when there are - a very large number of message publishes occurring. -- storage: - - Add ETag to BucketAttrs and ObjectAttrs. -- datastore: - - Removed some non-sensical OpenCensus traces. -- webrisk: - - Add v1 client. -- asset: - - Add v1 client. -- cloudtasks: - - Add v2 client. - -## 0.37.3 - -This patch release removes github.com/golang/lint from the transitive -dependency list, resolving `go get -u` problems. - -Note: this release intentionally has a broken go.sum. Please use v0.37.4. - -## 0.37.2 - -This patch release is mostly intended to bring in v0.3.0 of -google.golang.org/api, which fixes a GCF deployment issue. - -Note: we had to-date accidentally marked Redis as stable. In this release, we've -fixed it by downgrading its documentation to alpha, as it is in other languages -and docs. - -- all: - - Document context in generated libraries. - -## 0.37.1 - -Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which -introduces a new oauth2 url. - -## 0.37.0 - -- spanner: - - Add BatchDML method. - - Reduced initial time between retries. -- bigquery: - - Produce better error messages for InferSchema. - - Add logical type control for avro loads. - - Add support for the GEOGRAPHY type. -- datastore: - - Add sentinel value DetectProjectID for auto-detecting project ID. - - Allow flatten tag on struct pointers. - - Fixed a bug that caused queries to panic with invalid queries. Instead they - will now return an error. -- profiler: - - Add ability to override GCE zone and instance. -- pubsub: - - BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more - consistently retry specific error codes based on whether they're idempotent - or non-idempotent. -- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing - the Content-Length header to be dropped. -- iot: - - Add new apiv1 client. -- securitycenter: - - Add new apiv1 client. -- cloudscheduler: - - Add new apiv1 client. - -## 0.36.0 - -- spanner: - - Reduce minimum retry backoff from 1s to 100ms. This makes time between - retries much faster and should improve latency. -- storage: - - Add support for Bucket Policy Only. -- kms: - - Add ResourceIAM helper method. - - Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM. -- firestore: - - Switch from v1beta1 API to v1 API. - - Allow emulator with FIRESTORE_EMULATOR_HOST. -- bigquery: - - Add NumLongTermBytes to Table. - - Add TotalBytesProcessedAccuracy to QueryStatistics. -- irm: - - Add new v1alpha2 client. -- talent: - - Add new v4beta1 client. -- rpcreplay: - - Fix connection to work with grpc >= 1.17. - - It is now required for an actual gRPC server to be running for Dial to - succeed. - -## 0.35.1 - -- spanner: - - Adds OpenCensus views back to public API. - -## v0.35.0 - -- all: - - Add go.mod and go.sum. - - Switch usage of gax-go to gax-go/v2. -- bigquery: - - Fix bug where time partitioning could not be removed from a table. - - Fix panic that occurred with empty query parameters. -- bttest: - - Fix bug where deleted rows were returned by ReadRows. -- bigtable/emulator: - - Configure max message size to 256 MiB. -- firestore: - - Allow non-transactional queries in transactions. - - Allow StartAt/EndBefore on direct children at any depth. - - QuerySnapshotIterator.Stop may be called in an error state. - - Fix bug the prevented reset of transaction write state in between retries. -- functions/metadata: - - Make Metadata.Resource a pointer. -- logging: - - Make SpanID available in logging.Entry. -- metadata: - - Wrap !200 error code in a typed err. -- profiler: - - Add function to check if function name is within a particular file in the - profile. - - Set parent field in create profile request. - - Return kubernetes client to start cluster, so client can be used to poll - cluster. - - Add function for checking if filename is in profile. -- pubsub: - - Fix bug where messages expired without an initial modack in - synchronous=true mode. - - Receive does not retry ResourceExhausted errors. -- spanner: - - client.Close now cancels existing requests and should be much faster for - large amounts of sessions. - - Correctly allow MinOpened sessions to be spun up. - -## v0.34.0 - -- functions/metadata: - - Switch to using JSON in context. - - Make Resource a value. -- vision: Fix ProductSearch return type. -- datastore: Add an example for how to handle MultiError. - -## v0.33.1 - -- compute: Removes an erroneously added go.mod. -- logging: Populate source location in fromLogEntry. - -## v0.33.0 - -- bttest: - - Add support for apply_label_transformer. -- expr: - - Add expr library. -- firestore: - - Support retrieval of missing documents. -- kms: - - Add IAM methods. -- pubsub: - - Clarify extension documentation. -- scheduler: - - Add v1beta1 client. -- vision: - - Add product search helper. - - Add new product search client. - -## v0.32.0 - -Note: This release is the last to support Go 1.6 and 1.8. - -- bigquery: - - Add support for removing an expiration. - - Ignore NeverExpire in Table.Create. - - Validate table expiration time. -- cbt: - - Add note about not supporting arbitrary bytes. -- datastore: - - Align key checks. -- firestore: - - Return an error when using Start/End without providing values. -- pubsub: - - Add pstest Close method. - - Clarify MaxExtension documentation. -- securitycenter: - - Add v1beta1 client. -- spanner: - - Allow nil in mutations. - - Improve doc of SessionPoolConfig.MaxOpened. - - Increase session deletion timeout from 5s to 15s. - -## v0.31.0 - -- bigtable: - - Group mutations across multiple requests. -- bigquery: - - Link to bigquery troubleshooting errors page in bigquery.Error comment. -- cbt: - - Fix go generate command. - - Document usage of both maxage + maxversions. -- datastore: - - Passing nil keys results in ErrInvalidKey. -- firestore: - - Clarify what Document.DataTo does with untouched struct fields. -- profile: - - Validate service name in agent. -- pubsub: - - Fix deadlock with pstest and ctx.Cancel. - - Fix a possible deadlock in pstest. -- trace: - - Update doc URL with new fragment. - -Special thanks to @fastest963 for going above and beyond helping us to debug -hard-to-reproduce Pub/Sub issues. - -## v0.30.0 - -- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information. -- bigtable: bttest supports row sample filter. -- functions: metadata package added for accessing Cloud Functions resource metadata. - -## v0.29.0 - -- bigtable: - - Add retry to all idempotent RPCs. - - cbt supports complex GC policies. - - Emulator supports arbitrary bytes in regex filters. -- firestore: Add ArrayUnion and ArrayRemove. -- logging: Add the ContextFunc option to supply the context used for - asynchronous RPCs. -- profiler: Ignore NotDefinedError when fetching the instance name -- pubsub: - - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled. - - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning. - - Fix deadlock. - - Restore Ack/Nack/Modacks metrics. - - Improve context handling in iterator. - - Implement synchronous mode for Receive. - - pstest: add Pull. -- spanner: Add a metric for the number of sessions currently opened. -- storage: - - Canceling the context releases all resources. - - Add additional RetentionPolicy attributes. -- vision/apiv1: Add LocalizeObjects method. - -## v0.28.0 - -- bigtable: - - Emulator returns Unimplemented for snapshot RPCs. -- bigquery: - - Support zero-length repeated, nested fields. -- cloud assets: - - Add v1beta client. -- datastore: - - Don't nil out transaction ID on retry. -- firestore: - - BREAKING CHANGE: When watching a query with Query.Snapshots, QuerySnapshotIterator.Next - returns a QuerySnapshot which contains read time, result size, change list and the DocumentIterator - (previously, QuerySnapshotIterator.Next returned just the DocumentIterator). See: https://godoc.org/cloud.google.com/go/firestore#Query.Snapshots. - - Add array-contains operator. -- IAM: - - Add iam/credentials/apiv1 client. -- pubsub: - - Canceling the context passed to Subscription.Receive causes Receive to return when - processing finishes on all messages currently in progress, even if new messages are arriving. -- redis: - - Add redis/apiv1 client. -- storage: - - Add Reader.Attrs. - - Deprecate several Reader getter methods: please use Reader.Attrs for these instead. - - Add ObjectHandle.Bucket and ObjectHandle.Object methods. - -## v0.27.0 - -- bigquery: - - Allow modification of encryption configuration and partitioning options to a table via the Update call. - - Add a SchemaFromJSON function that converts a JSON table schema. -- bigtable: - - Restore cbt count functionality. -- containeranalysis: - - Add v1beta client. -- spanner: - - Fix a case where an iterator might not be closed correctly. -- storage: - - Add ServiceAccount method https://godoc.org/cloud.google.com/go/storage#Client.ServiceAccount. - - Add a method to Reader that returns the parsed value of the Last-Modified header. - -## v0.26.0 - -- bigquery: - - Support filtering listed jobs by min/max creation time. - - Support data clustering (https://godoc.org/cloud.google.com/go/bigquery#Clustering). - - Include job creator email in Job struct. -- bigtable: - - Add `RowSampleFilter`. - - emulator: BREAKING BEHAVIOR CHANGE: Regexps in row, family, column and value filters - must match the entire target string to succeed. Previously, the emulator was - succeeding on partial matches. - NOTE: As of this release, this change only affects the emulator when run - from this repo (bigtable/cmd/emulator/cbtemulator.go). The version launched - from `gcloud` will be updated in a subsequent `gcloud` release. -- dataproc: Add apiv1beta2 client. -- datastore: Save non-nil pointer fields on omitempty. -- logging: populate Entry.Trace from the HTTP X-Cloud-Trace-Context header. -- logging/logadmin: Support writer_identity and include_children. -- pubsub: - - Support labels on topics and subscriptions. - - Support message storage policy for topics. - - Use the distribution of ack times to determine when to extend ack deadlines. - The only user-visible effect of this change should be that programs that - call only `Subscription.Receive` need no IAM permissions other than `Pub/Sub - Subscriber`. -- storage: - - Support predefined ACLs. - - Support additional ACL fields other than Entity and Role. - - Support bucket websites. - - Support bucket logging. - - -## v0.25.0 - -- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md) -- bigtable: - - cbt: Support a GC policy of "never". -- errorreporting: - - Support User. - - Close now calls Flush. - - Use OnError (previously ignored). - - Pass through the RPC error as-is to OnError. -- httpreplay: A tool for recording and replaying HTTP requests - (for the bigquery and storage clients in this repo). -- kms: v1 client added -- logging: add SourceLocation to Entry. -- storage: improve CRC checking on read. - -## v0.24.0 - -- bigquery: Support for the NUMERIC type. -- bigtable: - - cbt: Optionally specify columns for read/lookup - - Support instance-level administration. -- oslogin: New client for the OS Login API. -- pubsub: - - The package is now stable. There will be no further breaking changes. - - Internal changes to improve Subscription.Receive behavior. -- storage: Support updating bucket lifecycle config. -- spanner: Support struct-typed parameter bindings. -- texttospeech: New client for the Text-to-Speech API. - -## v0.23.0 - -- bigquery: Add DDL stats to query statistics. -- bigtable: - - cbt: Add cells-per-column limit for row lookup. - - cbt: Make it possible to combine read filters. -- dlp: v2beta2 client removed. Use the v2 client instead. -- firestore, spanner: Fix compilation errors due to protobuf changes. - -## v0.22.0 - -- bigtable: - - cbt: Support cells per column limit for row read. - - bttest: Correctly handle empty RowSet. - - Fix ReadModifyWrite operation in emulator. - - Fix API path in GetCluster. - -- bigquery: - - BEHAVIOR CHANGE: Retry on 503 status code. - - Add dataset.DeleteWithContents. - - Add SchemaUpdateOptions for query jobs. - - Add Timeline to QueryStatistics. - - Add more stats to ExplainQueryStage. - - Support Parquet data format. - -- datastore: - - Support omitempty for times. - -- dlp: - - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client, - which is now out of beta. - - Add v2 client. - -- firestore: - - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid. - -- iam: - - Support JWT signing via SignJwt callopt. - -- profiler: - - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done. - - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute. - - Avoid returning empty serial port output. - -- pubsub: - - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy. - - BEHAVIOR CHANGE: Don't backoff on EOF. - - pstest: Support Acknowledge and ModifyAckDeadline RPCs. - -- redis: - - Add v1 beta Redis client. - -- spanner: - - Support SessionLabels. - -- speech: - - Add api v1 beta1 client. - -- storage: - - BEHAVIOR CHANGE: Retry reads when retryable error occurs. - - Fix delete of object in requester-pays bucket. - - Support KMS integration. - -## v0.21.0 - -- bigquery: - - Add OpenCensus tracing. - -- firestore: - - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot - whose Exists method returns false. DocumentRef.Get and Transaction.Get - return the non-nil DocumentSnapshot in addition to a NotFound error. - **DocumentRef.GetAll and Transaction.GetAll return a non-nil - DocumentSnapshot instead of nil.** - - Add DocumentIterator.Stop. **Call Stop whenever you are done with a - DocumentIterator.** - - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime - notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. - - Canceling an RPC now always returns a grpc.Status with codes.Canceled. - -- spanner: - - Add `CommitTimestamp`, which supports inserting the commit timestamp of a - transaction into a column. - -## v0.20.0 - -- bigquery: Support SchemaUpdateOptions for load jobs. - -- bigtable: - - Add SampleRowKeys. - - cbt: Support union, intersection GCPolicy. - - Retry admin RPCS. - - Add trace spans to retries. - -- datastore: Add OpenCensus tracing. - -- firestore: - - Fix queries involving Null and NaN. - - Allow Timestamp protobuffers for time values. - -- logging: Add a WriteTimeout option. - -- spanner: Support Batch API. - -- storage: Add OpenCensus tracing. - -## v0.19.0 - -- bigquery: - - Support customer-managed encryption keys. - -- bigtable: - - Improved emulator support. - - Support GetCluster. - -- datastore: - - Add general mutations. - - Support pointer struct fields. - - Support transaction options. - -- firestore: - - Add Transaction.GetAll. - - Support document cursors. - -- logging: - - Support concurrent RPCs to the service. - - Support per-entry resources. - -- profiler: - - Add config options to disable heap and thread profiling. - - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. - -- pubsub: - - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the - callback returns). - - Add SubscriptionInProject. - - Add OpenCensus instrumentation for streaming pull. - -- storage: - - Support CORS. - -## v0.18.0 - -- bigquery: - - Marked stable. - - Schema inference of nullable fields supported. - - Added TimePartitioning to QueryConfig. - -- firestore: Data provided to DocumentRef.Set with a Merge option can contain - Delete sentinels. - -- logging: Clients can accept parent resources other than projects. - -- pubsub: - - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. - - Support updating more subscription metadata: AckDeadline, - RetainAckedMessages and RetentionDuration. - -- oslogin/apiv1beta: New client for the Cloud OS Login API. - -- rpcreplay: A package for recording and replaying gRPC traffic. - -- spanner: - - Add a ReadWithOptions that supports a row limit, as well as an index. - - Support query plan and execution statistics. - - Added [OpenCensus](http://opencensus.io) support. - -- storage: Clarify checksum validation for gzipped files (it is not validated - when the file is served uncompressed). - - -## v0.17.0 - -- firestore BREAKING CHANGES: - - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. - Change - `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` - to - `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` - - Change - `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` - to - `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` - - Rename MergePaths to Merge; require args to be FieldPaths - - A value stored as an integer can be read into a floating-point field, and vice versa. -- bigtable/cmd/cbt: - - Support deleting a column. - - Add regex option for row read. -- spanner: Mark stable. -- storage: - - Add Reader.ContentEncoding method. - - Fix handling of SignedURL headers. -- bigquery: - - If Uploader.Put is called with no rows, it returns nil without making a - call. - - Schema inference supports the "nullable" option in struct tags for - non-required fields. - - TimePartitioning supports "Field". - - -## v0.16.0 - -- Other bigquery changes: - - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). - - UseStandardSQL is deprecated; set UseLegacySQL to true if you need - Legacy SQL. - - Uploader.Put will generate a random insert ID if you do not provide one. - - Support time partitioning for load jobs. - - Support dry-run queries. - - A `Job` remembers its last retrieved status. - - Support retrieving job configuration. - - Support labels for jobs and tables. - - Support dataset access lists. - - Improve support for external data sources, including data from Bigtable and - Google Sheets, and tables with external data. - - Support updating a table's view configuration. - - Fix uploading civil times with nanoseconds. - -- storage: - - Support PubSub notifications. - - Support Requester Pays buckets. - -- profiler: Support goroutine and mutex profile types. - -## v0.15.0 - -- firestore: beta release. See the - [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). - -- errorreporting: The existing package has been redesigned. - -- errors: This package has been removed. Use errorreporting. - - -## v0.14.0 - -- bigquery BREAKING CHANGES: - - Standard SQL is the default for queries and views. - - `Table.Create` takes `TableMetadata` as a second argument, instead of - options. - - `Dataset.Create` takes `DatasetMetadata` as a second argument. - - `DatasetMetadata` field `ID` renamed to `FullID` - - `TableMetadata` field `ID` renamed to `FullID` - -- Other bigquery changes: - - The client will append a random suffix to a provided job ID if you set - `AddJobIDSuffix` to true in a job config. - - Listing jobs is supported. - - Better retry logic. - -- vision, language, speech: clients are now stable - -- monitoring: client is now beta - -- profiler: - - Rename InstanceName to Instance, ZoneName to Zone - - Auto-detect service name and version on AppEngine. - -## v0.13.0 - -- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these - options to continue using Legacy SQL after the client switches its default - to Standard SQL. - -- bigquery: Support for updating dataset labels. - -- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other - than the client's. DatasetsInProject is no longer needed and is deprecated. - -- bigtable: Fail ListInstances when any zones fail. - -- spanner: support decoding of slices of basic types (e.g. []string, []int64, - etc.) - -- logging/logadmin: UpdateSink no longer creates a sink if it is missing - (actually a change to the underlying service, not the client) - -- profiler: Service and ServiceVersion replace Target in Config. - -## v0.12.0 - -- pubsub: Subscription.Receive now uses streaming pull. - -- pubsub: add Client.TopicInProject to access topics in a different project - than the client. - -- errors: renamed errorreporting. The errors package will be removed shortly. - -- datastore: improved retry behavior. - -- bigquery: support updates to dataset metadata, with etags. - -- bigquery: add etag support to Table.Update (BREAKING: etag argument added). - -- bigquery: generate all job IDs on the client. - -- storage: support bucket lifecycle configurations. - - -## v0.11.0 - -- Clients for spanner, pubsub and video are now in beta. - -- New client for DLP. - -- spanner: performance and testing improvements. - -- storage: requester-pays buckets are supported. - -- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. - -- pubsub: bug fixes and other minor improvements - -## v0.10.0 - -- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. - -- pubsub: Subscription.Receive now runs concurrently for higher throughput. - -- vision: cloud.google.com/go/vision is deprecated. Use -cloud.google.com/go/vision/apiv1 instead. - -- translation: now stable. - -- trace: several changes to the surface. See the link below. - -### Code changes required from v0.9.0 - -- pubsub: Replace - - ``` - sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) - ``` - - with - - ``` - sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ - PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, - }) - ``` - -- trace: traceGRPCServerInterceptor will be provided from *trace.Client. -Given an initialized `*trace.Client` named `tc`, instead of - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) - ``` - - write - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) - ``` - -- trace trace.GRPCClientInterceptor will also provided from *trace.Client. -Instead of - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) - ``` - - write - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) - ``` - -- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC -interceptor as a dial option as shown below when initializing Cloud package -clients: - - ``` - c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) - if err != nil { - ... - } - ``` - - -## v0.9.0 - -- Breaking changes to some autogenerated clients. -- rpcreplay package added. - -## v0.8.0 - -- profiler package added. -- storage: - - Retry Objects.Insert call. - - Add ProgressFunc to WRiter. -- pubsub: breaking changes: - - Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). - - Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). - - Message.Done replaced with Message.Ack and Message.Nack. - -## v0.7.0 - -- Release of a client library for Spanner. See -the -[blog -post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). -Note that although the Spanner service is beta, the Go client library is alpha. - -## v0.6.0 - -- Beta release of BigQuery, DataStore, Logging and Storage. See the -[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). - -- bigquery: - - struct support. Read a row directly into a struct with -`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. -You can also use field tags. See the [package documentation][cloud-bigquery-ref] -for details. - - - The `ValueList` type was removed. It is no longer necessary. Instead of - ```go - var v ValueList - ... it.Next(&v) .. - ``` - use - - ```go - var v []Value - ... it.Next(&v) ... - ``` - - - Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or - `ValueList` would append to the slice. Now each call resets the size to zero first. - - - Schema inference will infer the SQL type BYTES for a struct field of - type []byte. Previously it inferred STRING. - - - The types `uint`, `uint64` and `uintptr` are no longer supported in schema - inference. BigQuery's integer type is INT64, and those types may hold values - that are not correctly represented in a 64-bit signed integer. - -## v0.5.0 - -- bigquery: - - The SQL types DATE, TIME and DATETIME are now supported. They correspond to - the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` - package. - - Support for query parameters. - - Support deleting a dataset. - - Values from INTEGER columns will now be returned as int64, not int. This - will avoid errors arising from large values on 32-bit systems. -- datastore: - - Nested Go structs encoded as Entity values, instead of a -flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg. - ```go - type State struct { - Cities []struct{ - Populations []int - } - } - ``` - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for -more details. - - Contexts no longer hold namespaces; instead you must set a key's namespace - explicitly. Also, key functions have been changed and renamed. - - The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: - ```go - q := datastore.NewQuery("Kind").Namespace("ns") - ``` - - All the fields of Key are exported. That means you can construct any Key with a struct literal: - ```go - k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} - ``` - - As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. - - `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace - ```go - NewIncompleteKey(ctx, kind, parent) - ``` - with - ```go - IncompleteKey(kind, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - - `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace - ```go - NewKey(ctx, kind, name, 0, parent) - NewKey(ctx, kind, "", id, parent) - ``` - with - ```go - NameKey(kind, name, parent) - IDKey(kind, id, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - - The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. - - The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. - - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for -more details. - -## v0.4.0 - -- bigquery: - -`NewGCSReference` is now a function, not a method on `Client`. - - `Table.LoaderFrom` now accepts a `ReaderSource`, enabling - loading data into a table from a file or any `io.Reader`. - * Client.Table and Client.OpenTable have been removed. - Replace - ```go - client.OpenTable("project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table") - ``` - - * Client.CreateTable has been removed. - Replace - ```go - client.CreateTable(ctx, "project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table").Create(ctx) - ``` - - * Dataset.ListTables have been replaced with Dataset.Tables. - Replace - ```go - tables, err := ds.ListTables(ctx) - ``` - with - ```go - it := ds.Tables(ctx) - for { - table, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use table. - } - ``` - - * Client.Read has been replaced with Job.Read, Table.Read and Query.Read. - Replace - ```go - it, err := client.Read(ctx, job) - ``` - with - ```go - it, err := job.Read(ctx) - ``` - and similarly for reading from tables or queries. - - * The iterator returned from the Read methods is now named RowIterator. Its - behavior is closer to the other iterators in these libraries. It no longer - supports the Schema method; see the next item. - Replace - ```go - for it.Next(ctx) { - var vals ValueList - if err := it.Get(&vals); err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - if err := it.Err(); err != nil { - // TODO: Handle error. - } - ``` - with - ``` - for { - var vals ValueList - err := it.Next(&vals) - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - ``` - Instead of the `RecordsPerRequest(n)` option, write - ```go - it.PageInfo().MaxSize = n - ``` - Instead of the `StartIndex(i)` option, write - ```go - it.StartIndex = i - ``` - - * ValueLoader.Load now takes a Schema in addition to a slice of Values. - Replace - ```go - func (vl *myValueLoader) Load(v []bigquery.Value) - ``` - with - ```go - func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) - ``` - - - * Table.Patch is replace by Table.Update. - Replace - ```go - p := table.Patch() - p.Description("new description") - metadata, err := p.Apply(ctx) - ``` - with - ```go - metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ - Description: "new description", - }) - ``` - - * Client.Copy is replaced by separate methods for each of its four functions. - All options have been replaced by struct fields. - - * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. - - Replace - ```go - client.Copy(ctx, table, gcsRef) - ``` - with - ```go - table.LoaderFrom(gcsRef).Run(ctx) - ``` - Instead of passing options to Copy, set fields on the Loader: - ```go - loader := table.LoaderFrom(gcsRef) - loader.WriteDisposition = bigquery.WriteTruncate - ``` - - * To extract data from a table into Google Cloud Storage, use - Table.ExtractorTo. Set fields on the returned Extractor instead of - passing options. - - Replace - ```go - client.Copy(ctx, gcsRef, table) - ``` - with - ```go - table.ExtractorTo(gcsRef).Run(ctx) - ``` - - * To copy data into a table from one or more other tables, use - Table.CopierFrom. Set fields on the returned Copier instead of passing options. - - Replace - ```go - client.Copy(ctx, dstTable, srcTable) - ``` - with - ```go - dst.Table.CopierFrom(srcTable).Run(ctx) - ``` - - * To start a query job, create a Query and call its Run method. Set fields - on the query instead of passing options. - - Replace - ```go - client.Copy(ctx, table, query) - ``` - with - ```go - query.Run(ctx) - ``` - - * Table.NewUploader has been renamed to Table.Uploader. Instead of options, - configure an Uploader by setting its fields. - Replace - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - ``` - with - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - u.IgnoreUnknownValues = true - ``` - -- pubsub: remove `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package -`google.golang.org/api/iterator`. - -## v0.3.0 - -- storage: - * AdminClient replaced by methods on Client. - Replace - ```go - adminClient.CreateBucket(ctx, bucketName, attrs) - ``` - with - ```go - client.Bucket(bucketName).Create(ctx, projectID, attrs) - ``` - - * BucketHandle.List replaced by BucketHandle.Objects. - Replace - ```go - for query != nil { - objs, err := bucket.List(d.ctx, query) - if err != nil { ... } - query = objs.Next - for _, obj := range objs.Results { - fmt.Println(obj) - } - } - ``` - with - ```go - iter := bucket.Objects(d.ctx, query) - for { - obj, err := iter.Next() - if err == iterator.Done { - break - } - if err != nil { ... } - fmt.Println(obj) - } - ``` - (The `iterator` package is at `google.golang.org/api/iterator`.) - - Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. - - Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. - - - * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, nil) - ``` - with - ```go - attrs, err := dst.CopierFrom(src).Run(ctx) - ``` - - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - c := dst.CopierFrom(src) - c.ContextType = "text/html" - attrs, err := c.Run(ctx) - ``` - - * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. - Replace - ```go - attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) - ``` - with - ```go - attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) - ``` - - * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. - Replace - ```go - attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) - ``` - - * ObjectHandle.WithConditions replaced by ObjectHandle.If. - Replace - ```go - obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) - ``` - with - ```go - obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) - ``` - - Replace - ```go - obj.WithConditions(storage.IfGenerationMatch(0)) - ``` - with - ```go - obj.If(storage.Conditions{DoesNotExist: true}) - ``` - - * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). - -- Package preview/logging deleted. Use logging instead. - -## v0.2.0 - -- Logging client replaced with preview version (see below). - -- New clients for some of Google's Machine Learning APIs: Vision, Speech, and -Natural Language. - -- Preview version of a new [Stackdriver Logging][cloud-logging] client in -[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). -This client uses gRPC as its transport layer, and supports log reading, sinks -and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. diff --git a/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md deleted file mode 100644 index 8fd1bc9c22b..00000000000 --- a/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,44 +0,0 @@ -# Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md deleted file mode 100644 index 6d6e48b65bd..00000000000 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ /dev/null @@ -1,327 +0,0 @@ -# Contributing - -1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose). - The issue will be used to discuss the bug or feature and should be created - before sending a PR. - -1. [Install Go](https://golang.org/dl/). - 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) - is in your `PATH`. - 1. Check it's working by running `go version`. - * If it doesn't work, check the install location, usually - `/usr/local/go`, is on your `PATH`. - -1. Sign one of the -[contributor license agreements](#contributor-license-agreements) below. - -1. Clone the repo: - `git clone https://github.com/googleapis/google-cloud-go` - -1. Change into the checked out source: - `cd google-cloud-go` - -1. Fork the repo. - -1. Set your fork as a remote: - `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git` - -1. Make changes, commit to your fork. - - Commit messages should follow the - [Conventional Commits Style](https://www.conventionalcommits.org). The scope - portion should always be filled with the name of the package affected by the - changes being made. For example: - ``` - feat(functions): add gophers codelab - ``` - -1. Send a pull request with your changes. - - To minimize friction, consider setting `Allow edits from maintainers` on the - PR, which will enable project committers and automation to update your PR. - -1. A maintainer will review the pull request and make comments. - - Prefer adding additional commits over amending and force-pushing since it can - be difficult to follow code reviews when the commit history changes. - - Commits will be squashed when they're merged. - -## Testing - -We test code against two versions of Go, the minimum and maximum versions -supported by our clients. To see which versions these are checkout our -[README](README.md#supported-versions). - -### Integration Tests - -In addition to the unit tests, you may run the integration test suite. These -directions describe setting up your environment to run integration tests for -_all_ packages: note that many of these instructions may be redundant if you -intend only to run integration tests on a single package. - -#### GCP Setup - -To run the integrations tests, creation and configuration of two projects in -the Google Developers Console is required: one specifically for Firestore -integration tests, and another for all other integration tests. We'll refer to -these projects as "general project" and "Firestore project". - -After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) -for each project. Ensure the project-level **Owner** -[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to -each service account. During the creation of the service account, you should -download the JSON credential file for use later. - -Next, ensure the following APIs are enabled in the general project: - -- BigQuery API -- BigQuery Data Transfer API -- Cloud Dataproc API -- Cloud Dataproc Control API Private -- Cloud Datastore API -- Cloud Firestore API -- Cloud Key Management Service (KMS) API -- Cloud Natural Language API -- Cloud OS Login API -- Cloud Pub/Sub API -- Cloud Resource Manager API -- Cloud Spanner API -- Cloud Speech API -- Cloud Translation API -- Cloud Video Intelligence API -- Cloud Vision API -- Compute Engine API -- Compute Engine Instance Group Manager API -- Container Registry API -- Firebase Rules API -- Google Cloud APIs -- Google Cloud Deployment Manager V2 API -- Google Cloud SQL -- Google Cloud Storage -- Google Cloud Storage JSON API -- Google Compute Engine Instance Group Updater API -- Google Compute Engine Instance Groups API -- Kubernetes Engine API -- Cloud Error Reporting API -- Pub/Sub Lite API - -Next, create a Datastore database in the general project, and a Firestore -database in the Firestore project. - -Finally, in the general project, create an API key for the translate API: - -- Go to GCP Developer Console. -- Navigate to APIs & Services > Credentials. -- Click Create Credentials > API Key. -- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below. - -#### Local Setup - -Once the two projects are created and configured, set the following environment -variables: - -- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g. -bamboo-shift-455) for the general project. -- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general -project's service account. -- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID -(e.g. doorway-cliff-677) for the Firestore project. -- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the -Firestore project's service account. -- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above. - -As part of the setup that follows, the following variables will be configured: - -- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, -in the form -"projects/P/locations/L/keyRings/R". The creation of this is described below. -- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests, -in the form -"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region. -- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. - -Install the [gcloud command-line tool][gcloudcli] to your machine and use it to -create some resources used in integration tests. - -From the project's root directory: - -``` sh -# Sets the default project in your env. -$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Authenticates the gcloud tool with your account. -$ gcloud auth login - -# Create the indexes used in the datastore integration tests. -$ gcloud datastore indexes create datastore/testdata/index.yaml - -# Creates a Google Cloud storage bucket with the same name as your test project, -# and with the Cloud Logging service account as owner, for the sink -# integration tests in logging. -$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID -$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Creates a PubSub topic for integration tests of storage notifications. -$ gcloud beta pubsub topics create go-storage-notification-test -# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user -# "service-@gs-project-accounts.iam.gserviceaccount.com" -# as a publisher to that topic. - -# Creates a Spanner instance for the spanner integration tests. -$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test' -# NOTE: Spanner instances are priced by the node-hour, so you may want to -# delete the instance after testing with 'gcloud beta spanner instances delete'. - -$ export MY_KEYRING=some-keyring-name -$ export MY_LOCATION=global -$ export MY_SINGLE_LOCATION=us-central1 -# Creates a KMS keyring, in the same location as the default location for your -# project's buckets. -$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION -# Creates two keys in the keyring, named key1 and key2. -$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption -$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption -# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. -$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING -# Authorizes Google Cloud Storage to encrypt and decrypt using key1. -$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 - -# Create KMS Key in one region for Bigtable -$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION -$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption -# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable. -$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING -# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud: -$ gcloud beta services identity create \ - --service=bigtableadmin.googleapis.com \ - --project $GCLOUD_TESTS_GOLANG_PROJECT_ID -# Note the service agent email for the agent created. -$ export SERVICE_AGENT_EMAIL= - -# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1 -$ gcloud kms keys add-iam-policy-binding key1 \ - --keyring $MY_KEYRING \ - --location $MY_SINGLE_LOCATION \ - --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ - --member "serviceAccount:$SERVICE_AGENT_EMAIL" \ - --project $GCLOUD_TESTS_GOLANG_PROJECT_ID -``` - -It may be useful to add exports to your shell initialization for future use. -For instance, in `.zshrc`: - -```sh -#### START GO SDK Test Variables -# Developers Console project's ID (e.g. bamboo-shift-455) for the general project. -export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project - -# The path to the JSON key file of the general project's service account. -export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json - -# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. -export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project - -# The path to the JSON key file of the Firestore project's service account. -export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json - -# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R". -# The creation of this is described below. -export MY_KEYRING=my-golang-sdk-test -export MY_LOCATION=global -export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING - -# API key for using the Translate API. -export GCLOUD_TESTS_API_KEY=abcdefghijk123456789 - -# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones) -export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region -#### END GO SDK Test Variables -``` - -#### Running - -Once you've done the necessary setup, you can run the integration tests by -running: - -``` sh -$ go test -v ./... -``` - -Note that the above command will not run the tests in other modules. To run -tests on other modules, first navigate to the appropriate -subdirectory. For instance, to run only the tests for datastore: -``` sh -$ cd datastore -$ go test -v ./... -``` - -#### Replay - -Some packages can record the RPCs during integration tests to a file for -subsequent replay. To record, pass the `-record` flag to `go test`. The -recording will be saved to the _package_`.replay` file. To replay integration -tests from a saved recording, the replay file must be present, the `-short` -flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY` -environment variable must have a non-empty value. - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your -work**, then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0, -available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/) - -[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md deleted file mode 100644 index 01453cc692a..00000000000 --- a/vendor/cloud.google.com/go/README.md +++ /dev/null @@ -1,139 +0,0 @@ -# Google Cloud Client Libraries for Go - -[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go) - -Go packages for [Google Cloud Platform](https://cloud.google.com) services. - -``` go -import "cloud.google.com/go" -``` - -To install the packages on your system, *do not clone the repo*. Instead: - -1. Change to your project directory: - - ```bash - cd /my/cloud/project - ``` -1. Get the package you want to use. Some products have their own module, so it's - best to `go get` the package(s) you want to use: - - ``` - $ go get cloud.google.com/go/firestore # Replace with the package you want to use. - ``` - -**NOTE:** Some of these packages are under development, and may occasionally -make backwards-incompatible changes. - -## Supported APIs - -For an updated list of all of our released APIs please see our -[reference docs](https://cloud.google.com/go/docs/reference). - -## [Go Versions Supported](#supported-versions) - -Our libraries are compatible with at least the three most recent, major Go -releases. They are currently compatible with: - -- Go 1.19 -- Go 1.18 -- Go 1.17 -- Go 1.16 -- Go 1.15 - -## Authorization - -By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) -for authorization credentials used in calling the API endpoints. This will allow your -application to run in many environments without requiring explicit configuration. - -[snip]:# (auth) -```go -client, err := storage.NewClient(ctx) -``` - -To authorize using a -[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), -pass -[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile) -to the `NewClient` function of the desired package. For example: - -[snip]:# (auth-JSON) -```go -client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) -``` - -You can exert more control over authorization by using the -[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to -create an `oauth2.TokenSource`. Then pass -[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) -to the `NewClient` function: -[snip]:# (auth-ts) -```go -tokenSource := ... -client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) -``` - -## Contributing - -Contributions are welcome. Please, see the -[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) -document for details. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. -See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. - -[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory -[cloud-automl]: https://cloud.google.com/automl -[cloud-build]: https://cloud.google.com/cloud-build/ -[cloud-bigquery]: https://cloud.google.com/bigquery/ -[cloud-bigtable]: https://cloud.google.com/bigtable/ -[cloud-compute]: https://cloud.google.com/compute -[cloud-container]: https://cloud.google.com/containers/ -[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis -[cloud-dataproc]: https://cloud.google.com/dataproc/ -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/ -[cloud-debugger]: https://cloud.google.com/debugger/ -[cloud-dlp]: https://cloud.google.com/dlp/ -[cloud-errors]: https://cloud.google.com/error-reporting/ -[cloud-firestore]: https://cloud.google.com/firestore/ -[cloud-iam]: https://cloud.google.com/iam/ -[cloud-iot]: https://cloud.google.com/iot-core/ -[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts -[cloud-kms]: https://cloud.google.com/kms/ -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsublite]: https://cloud.google.com/pubsub/lite -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-language]: https://cloud.google.com/natural-language -[cloud-logging]: https://cloud.google.com/logging/ -[cloud-natural-language]: https://cloud.google.com/natural-language/ -[cloud-memorystore]: https://cloud.google.com/memorystore/ -[cloud-monitoring]: https://cloud.google.com/monitoring/ -[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest -[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/ -[cloud-securitycenter]: https://cloud.google.com/security-command-center/ -[cloud-scheduler]: https://cloud.google.com/scheduler -[cloud-spanner]: https://cloud.google.com/spanner/ -[cloud-speech]: https://cloud.google.com/speech -[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ -[cloud-tasks]: https://cloud.google.com/tasks/ -[cloud-texttospeech]: https://cloud.google.com/texttospeech/ -[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ -[cloud-trace]: https://cloud.google.com/trace/ -[cloud-translate]: https://cloud.google.com/translate -[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/ -[cloud-recommender]: https://cloud.google.com/recommendations/ -[cloud-video]: https://cloud.google.com/video-intelligence/ -[cloud-vision]: https://cloud.google.com/vision -[cloud-webrisk]: https://cloud.google.com/web-risk/ - -## Links - -- [Go on Google Cloud](https://cloud.google.com/go/home) -- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started) -- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart) -- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go) -- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go) diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md deleted file mode 100644 index 6d0fcf4f9f9..00000000000 --- a/vendor/cloud.google.com/go/RELEASING.md +++ /dev/null @@ -1,141 +0,0 @@ -# Releasing - -## Determine which module to release - -The Go client libraries have several modules. Each module does not strictly -correspond to a single library - they correspond to trees of directories. If a -file needs to be released, you must release the closest ancestor module. - -To see all modules: - -```bash -$ cat `find . -name go.mod` | grep module -module cloud.google.com/go/pubsub -module cloud.google.com/go/spanner -module cloud.google.com/go -module cloud.google.com/go/bigtable -module cloud.google.com/go/bigquery -module cloud.google.com/go/storage -module cloud.google.com/go/pubsublite -module cloud.google.com/go/firestore -module cloud.google.com/go/logging -module cloud.google.com/go/internal/gapicgen -module cloud.google.com/go/internal/godocfx -module cloud.google.com/go/internal/examples/fake -module cloud.google.com/go/internal/examples/mock -module cloud.google.com/go/datastore -``` - -The `cloud.google.com/go` is the repository root module. Each other module is -a submodule. - -So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest -ancestor module is `cloud.google.com/go/bigtable` - so you should release a new -version of the `cloud.google.com/go/bigtable` submodule. - -If you need to release a change in `asset/apiv1/asset_client.go`, the closest -ancestor module is `cloud.google.com/go` - so you should release a new version -of the `cloud.google.com/go` repository root module. Note: releasing -`cloud.google.com/go` has no impact on any of the submodules, and vice-versa. -They are released entirely independently. - -## Test failures - -If there are any test failures in the Kokoro build, releases are blocked until -the failures have been resolved. - -## How to release - -### Automated Releases (`cloud.google.com/go` and submodules) - -We now use [release-please](https://github.com/googleapis/release-please) to -perform automated releases for `cloud.google.com/go` and all submodules. - -1. If there are changes that have not yet been released, a - [pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will - be automatically opened by release-please - with a title like "chore: release X.Y.Z" (for the root module) or - "chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z - is the next version to be released. Find the desired pull request - [here](https://github.com/googleapis/google-cloud-go/pulls) -1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are - any failures in the most recent build, address them before proceeding with - the release. (This applies even if the failures are in a different submodule - from the one being released.) -1. Review the release notes. These are automatically generated from the titles - of any merged commits since the previous release. If you would like to edit - them, this can be done by updating the changes in the release PR. -1. To cut a release, approve and merge the pull request. Doing so will - update the `CHANGES.md`, tag the merged commit with the appropriate version, - and draft a GitHub release which will copy the notes from `CHANGES.md`. - -### Manual Release (`cloud.google.com/go`) - -If for whatever reason the automated release process is not working as expected, -here is how to manually cut a release of `cloud.google.com/go`. - -1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are - any failures in the most recent build, address them before proceeding with - the release. -1. Navigate to `google-cloud-go/` and switch to main. -1. `git pull` -1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. - The current latest tag `$CV` is the largest tag. It should look something - like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a - specific library, not the module root). We'll call the current version `$CV` - and the new version `$NV`. -1. On main, run `git log $CV...` to list all the changes since the last - release. NOTE: You must manually visually parse out changes to submodules [1] - (the `git log` is going to show you things in submodules, which are not going - to be part of your release). -1. Edit `CHANGES.md` to include a summary of the changes. -1. In `internal/version/version.go`, update `const Repo` to today's date with - the format `YYYYMMDD`. -1. In `internal/version` run `go generate`. -1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, - and create a PR titled `chore: release $NV`. -1. Wait for the PR to be reviewed and merged. Once it's merged, and without - merging any other PRs in the meantime: - a. Switch to main. - b. `git pull` - c. Tag the repo with the next version: `git tag $NV`. - d. Push the tag to origin: - `git push origin $NV` -1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) - with the new release, copying the contents of `CHANGES.md`. - -### Manual Releases (submodules) - -If for whatever reason the automated release process is not working as expected, -here is how to manually cut a release of a submodule. - -(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) - -1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are - any failures in the most recent build, address them before proceeding with - the release. (This applies even if the failures are in a different submodule - from the one being released.) -1. Navigate to `google-cloud-go/` and switch to main. -1. `git pull` -1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all - existing releases. The current latest tag `$CV` is the largest tag. It - should look something like `datastore/vX.Y.Z`. We'll call the current version - `$CV` and the new version `$NV`. -1. On main, run `git log $CV.. -- datastore/` to list all the changes to the - submodule directory since the last release. -1. Edit `datastore/CHANGES.md` to include a summary of the changes. -1. In `internal/version` run `go generate`. -1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, - and create a PR titled `chore(datastore): release $NV`. -1. Wait for the PR to be reviewed and merged. Once it's merged, and without - merging any other PRs in the meantime: - a. Switch to main. - b. `git pull` - c. Tag the repo with the next version: `git tag $NV`. - d. Push the tag to origin: - `git push origin $NV` -1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) - with the new release, copying the contents of `datastore/CHANGES.md`. diff --git a/vendor/cloud.google.com/go/SECURITY.md b/vendor/cloud.google.com/go/SECURITY.md deleted file mode 100644 index 8b58ae9c01a..00000000000 --- a/vendor/cloud.google.com/go/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). - -The Google Security Team will respond within 5 working days of your report on g.co/vulnz. - -We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 00000000000..5ac4a843e11 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.12.1" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 00000000000..8631b6d6d2c --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,5 @@ +# Changes + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 00000000000..f940fb2c85b --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 1405d096746..50538b1d343 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -71,6 +71,7 @@ func newDefaultHTTPClient() *http.Client { KeepAlive: 30 * time.Second, }).Dial, }, + Timeout: 5 * time.Second, } } diff --git a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go similarity index 85% rename from vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/compute/metadata/tidyfix.go index c3f49e045d2..4cef4850081 100644 --- a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of +// This file, and the {{.RootMod}} import, won't actually become part of // the resultant binary. //go:build modhack // +build modhack -package iam +package metadata // Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go" +import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go deleted file mode 100644 index 871645ef111..00000000000 --- a/vendor/cloud.google.com/go/doc.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package cloud is the root of the packages used to access Google Cloud -Services. See https://godoc.org/cloud.google.com/go for a full list -of sub-packages. - -# Client Options - -All clients in sub-packages are configurable via client options. These options are -described here: https://godoc.org/google.golang.org/api/option. - -# Authentication and Authorization - -All the clients in sub-packages support authentication via Google Application Default -Credentials (see https://cloud.google.com/docs/authentication/production), or -by providing a JSON key file for a Service Account. See examples below. - -Google Application Default Credentials (ADC) is the recommended way to authorize -and authenticate clients. For information on how to create and obtain -Application Default Credentials, see -https://cloud.google.com/docs/authentication/production. Here is an example -of a client using ADC to authenticate: - - client, err := secretmanager.NewClient(context.Background()) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. - -You can use a file with credentials to authenticate and authorize, such as a JSON -key file associated with a Google service account. Service Account keys can be -created and downloaded from -https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses -the Secret Manger client, but the same steps apply to the other client libraries -underneath this package. Example: - - client, err := secretmanager.NewClient(context.Background(), - option.WithCredentialsFile("/path/to/service-account-key.json")) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. - -In some cases (for instance, you don't want to store secrets on disk), you can -create credentials from in-memory JSON and use the WithCredentials option. -The google package in this example is at golang.org/x/oauth2/google. -This example uses the Secret Manager client, but the same steps apply to -the other client libraries underneath this package. Note that scopes can be -found at https://developers.google.com/identity/protocols/oauth2/scopes, and -are also provided in all auto-generated libraries: for example, -cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: - - ctx := context.Background() - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) - if err != nil { - // TODO: handle error. - } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. - -# Timeouts and Cancellation - -By default, non-streaming methods, like Create or Get, will have a default deadline applied to the -context provided at call time, unless a context deadline is already set. Streaming -methods have no default deadline and will run indefinitely. To set timeouts or -arrange for cancellation, use contexts. Transient -errors will be retried when correctness allows. - -Here is an example of how to set a timeout for an RPC, use context.WithTimeout: - - ctx := context.Background() - // Do not set a timeout on the context passed to NewClient: dialing happens - // asynchronously, and the context is used to refresh credentials in the - // background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - // Time out if it takes more than 10 seconds to create a dataset. - tctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() // Always call cancel. - - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} - if err := client.DeleteSecret(tctx, req); err != nil { - // TODO: handle error. - } - -Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel: - - ctx := context.Background() - // Do not cancel the context passed to NewClient: dialing happens asynchronously, - // and the context is used to refresh credentials in the background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - cctx, cancel := context.WithCancel(ctx) - defer cancel() // Always call cancel. - - // TODO: Make the cancel function available to whatever might want to cancel the - // call--perhaps a GUI button. - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} - if err := client.DeleteSecret(cctx, req); err != nil { - // TODO: handle error. - } - -To opt out of default deadlines, set the temporary environment variable -GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client -creation. This affects all Google Cloud Go client libraries. This opt-out -mechanism will be removed in a future release. File an issue at -https://github.com/googleapis/google-cloud-go if the default deadlines -cannot work for you. - -Do not attempt to control the initial connection (dialing) of a service by setting a -timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts -would be ineffective and would only interfere with credential refreshing, which uses -the same context. - -# Connection Pooling - -Connection pooling differs in clients based on their transport. Cloud -clients either rely on HTTP or gRPC transports to communicate -with Google Cloud. - -Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the -underlying HTTP transport to cache connections for later re-use. These are cached to -the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in -http.DefaultTransport. - -For gRPC clients (all others in this repo), connection pooling is configurable. Users -of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client -option to NewClient calls. This configures the underlying gRPC connections to be -pooled and addressed in a round robin fashion. - -# Using the Libraries with Docker - -Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to -hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 -for more information. - -# Debugging - -To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See -https://godoc.org/google.golang.org/grpc/grpclog for more information. - -For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". - -# Inspecting errors - -Most of the errors returned by the generated clients are wrapped in an -`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror) -and can be further unwrapped into a `grpc.Status` or `googleapi.Error` depending -on the transport used to make the call (gRPC or REST). Converting your errors to -these types can be a useful way to get more information about what went wrong -while debugging. - -`apierror.APIError` gives access to specific details in the -error. The transport-specific errors can still be unwrapped using the -`apierror.APIError`. - - if err != nil { - var ae *apierror.APIError - if errors.As(err, &ae) { - log.Println(ae.Reason()) - log.Println(ae.Details().Help.GetLinks()) - } - } - -If the gRPC transport was used, the `grpc.Status` can still be parsed using the -`status.FromError` function. - - if err != nil { - if s, ok := status.FromError(err); ok { - log.Println(s.Message()) - for _, d := range s.Proto().Details { - log.Println(d) - } - } - } - -If the REST transport was used, the `googleapi.Error` can be parsed in a similar -way. - - if err != nil { - var gerr *googleapi.Error - if errors.As(err, &gerr) { - log.Println(gerr.Message) - } - } - -# Client Stability - -Clients in this repository are considered alpha or beta unless otherwise -marked as stable in the README.md. Semver is not used to communicate stability -of clients. - -Alpha and beta clients may change or go away without notice. - -Clients marked stable will maintain compatibility with future versions for as -long as we can reasonably sustain. Incompatible changes might be made in some -situations, including: - -- Security bugs may prompt backwards-incompatible changes. - -- Situations in which components are no longer feasible to maintain without -making breaking changes, including removal. - -- Parts of the client surface may be outright unstable and subject to change. -These parts of the surface will be labeled with the note, "It is EXPERIMENTAL -and subject to change or removal without notice." -*/ -package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 12f1167f1dc..c4ead20e09e 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,26 @@ # Changes +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) + + +### Features + +* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) + + +### Features + +* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) + + +### Features + +* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index be8372087f7..39bc0be8c60 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -251,6 +251,15 @@ "release_level": "ga", "library_type": "GAPIC_MANUAL" }, + "cloud.google.com/go/bigquery/analyticshub/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", + "description": "Analytics Hub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/connection/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", "description": "BigQuery Connection API", @@ -278,6 +287,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", + "description": "BigQuery Data Policy API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/datatransfer/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", "description": "BigQuery Data Transfer API", @@ -410,7 +428,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/channel/apiv1": { @@ -548,6 +566,15 @@ "release_level": "alpha", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/dataform/apiv1beta1": { + "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", + "description": "Dataform API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/datafusion/apiv1": { "distribution_name": "cloud.google.com/go/datafusion/apiv1", "description": "Cloud Data Fusion API", @@ -656,6 +683,15 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/dialogflow/apiv2beta1": { + "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dialogflow/cx/apiv3": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", "description": "Dialogflow API", @@ -710,6 +746,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/edgecontainer/apiv1": { + "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", + "description": "Distributed Cloud Edge Container API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", @@ -899,6 +944,15 @@ "release_level": "ga", "library_type": "CORE" }, + "cloud.google.com/go/iam/apiv2": { + "distribution_name": "cloud.google.com/go/iam/apiv2", + "description": "Identity and Access Management (IAM) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/iam/credentials/apiv1": { "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", "description": "IAM Service Account Credentials API", @@ -955,7 +1009,7 @@ }, "cloud.google.com/go/language/apiv1beta2": { "distribution_name": "cloud.google.com/go/language/apiv1beta2", - "description": "Google Cloud Natural Language API", + "description": "Cloud Natural Language API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", @@ -1292,7 +1346,7 @@ "language": "Go", "client_library_type": "manual", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsublite/apiv1": { @@ -1304,22 +1358,22 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1beta1", - "release_level": "beta", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", - "release_level": "ga", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", + "release_level": "beta", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommendationengine/apiv1beta1": { @@ -1466,15 +1520,6 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/secretmanager/apiv1beta1": { - "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", - "description": "Secret Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/security/privateca/apiv1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1", "description": "Certificate Authority API", @@ -1493,6 +1538,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/security/publicca/apiv1beta1": { + "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", + "description": "Public Certificate Authority API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/securitycenter/apiv1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1", "description": "Security Command Center API", @@ -1637,6 +1691,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/speech/apiv2": { + "distribution_name": "cloud.google.com/go/speech/apiv2", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/storage": { "distribution_name": "cloud.google.com/go/storage", "description": "Cloud Storage (GCS)", @@ -1781,22 +1844,22 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/apiv1p1beta1": { - "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", + "cloud.google.com/go/vision/v2/apiv1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1p1beta1", - "release_level": "beta", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/v2/apiv1": { - "distribution_name": "cloud.google.com/go/vision/v2/apiv1", + "cloud.google.com/go/vision/v2/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", - "release_level": "ga", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", + "release_level": "beta", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmmigration/apiv1": { diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json deleted file mode 100644 index fe028df58f9..00000000000 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ /dev/null @@ -1,334 +0,0 @@ -{ - "release-type": "go-yoshi", - "include-component-in-tag": true, - "tag-separator": "/", - "packages": { - "accessapproval": { - "component": "accessapproval" - }, - "accesscontextmanager": { - "component": "accesscontextmanager" - }, - "aiplatform": { - "component": "aiplatform" - }, - "analytics": { - "component": "analytics" - }, - "apigateway": { - "component": "apigateway" - }, - "apigeeconnect": { - "component": "apigeeconnect" - }, - "apigeeregistry": { - "component": "apigeeregistry" - }, - "apikeys": { - "component": "apikeys" - }, - "appengine": { - "component": "appengine" - }, - "area120": { - "component": "area120" - }, - "artifactregistry": { - "component": "artifactregistry" - }, - "asset": { - "component": "asset" - }, - "assuredworkloads": { - "component": "assuredworkloads" - }, - "automl": { - "component": "automl" - }, - "baremetalsolution": { - "component": "baremetalsolution" - }, - "batch": { - "component": "batch" - }, - "beyondcorp": { - "component": "beyondcorp" - }, - "billing": { - "component": "billing" - }, - "binaryauthorization": { - "component": "binaryauthorization" - }, - "certificatemanager": { - "component": "certificatemanager" - }, - "channel": { - "component": "channel" - }, - "cloudbuild": { - "component": "cloudbuild" - }, - "clouddms": { - "component": "clouddms" - }, - "cloudtasks": { - "component": "cloudtasks" - }, - "compute": { - "component": "compute" - }, - "contactcenterinsights": { - "component": "contactcenterinsights" - }, - "container": { - "component": "container" - }, - "containeranalysis": { - "component": "containeranalysis" - }, - "datacatalog": { - "component": "datacatalog" - }, - "dataflow": { - "component": "dataflow" - }, - "dataform": { - "component": "dataform" - }, - "datafusion": { - "component": "datafusion" - }, - "datalabeling": { - "component": "datalabeling" - }, - "dataplex": { - "component": "dataplex" - }, - "dataproc": { - "component": "dataproc" - }, - "dataqna": { - "component": "dataqna" - }, - "datastream": { - "component": "datastream" - }, - "deploy": { - "component": "deploy" - }, - "dialogflow": { - "component": "dialogflow" - }, - "dlp": { - "component": "dlp" - }, - "documentai": { - "component": "documentai" - }, - "domains": { - "component": "domains" - }, - "essentialcontacts": { - "component": "essentialcontacts" - }, - "eventarc": { - "component": "eventarc" - }, - "filestore": { - "component": "filestore" - }, - "functions": { - "component": "functions" - }, - "gaming": { - "component": "gaming" - }, - "gkebackup": { - "component": "gkebackup" - }, - "gkeconnect": { - "component": "gkeconnect" - }, - "gkehub": { - "component": "gkehub" - }, - "gkemulticloud": { - "component": "gkemulticloud" - }, - "grafeas": { - "component": "grafeas" - }, - "gsuiteaddons": { - "component": "gsuiteaddons" - }, - "iam": { - "component": "iam" - }, - "iap": { - "component": "iap" - }, - "ids": { - "component": "ids" - }, - "iot": { - "component": "iot" - }, - "kms": { - "component": "kms" - }, - "language": { - "component": "language" - }, - "lifesciences": { - "component": "lifesciences" - }, - "managedidentities": { - "component": "managedidentities" - }, - "mediatranslation": { - "component": "mediatranslation" - }, - "memcache": { - "component": "memcache" - }, - "metastore": { - "component": "metastore" - }, - "monitoring": { - "component": "monitoring" - }, - "networkconnectivity": { - "component": "networkconnectivity" - }, - "networkmanagement": { - "component": "networkmanagement" - }, - "networksecurity": { - "component": "networksecurity" - }, - "notebooks": { - "component": "notebooks" - }, - "optimization": { - "component": "optimization" - }, - "orchestration": { - "component": "orchestration" - }, - "orgpolicy": { - "component": "orgpolicy" - }, - "osconfig": { - "component": "osconfig" - }, - "oslogin": { - "component": "oslogin" - }, - "phishingprotection": { - "component": "phishingprotection" - }, - "policytroubleshooter": { - "component": "policytroubleshooter" - }, - "privatecatalog": { - "component": "privatecatalog" - }, - "recaptchaenterprise/v2": { - "component": "recaptchaenterprise" - }, - "recommendationengine": { - "component": "recommendationengine" - }, - "recommender": { - "component": "recommender" - }, - "redis": { - "component": "redis" - }, - "resourcemanager": { - "component": "resourcemanager" - }, - "resourcesettings": { - "component": "resourcesettings" - }, - "retail": { - "component": "retail" - }, - "run": { - "component": "run" - }, - "scheduler": { - "component": "scheduler" - }, - "secretmanager": { - "component": "secretmanager" - }, - "security": { - "component": "security" - }, - "securitycenter": { - "component": "securitycenter" - }, - "servicecontrol": { - "component": "servicecontrol" - }, - "servicedirectory": { - "component": "servicedirectory" - }, - "servicemanagement": { - "component": "servicemanagement" - }, - "serviceusage": { - "component": "serviceusage" - }, - "shell": { - "component": "shell" - }, - "speech": { - "component": "speech" - }, - "storagetransfer": { - "component": "storagetransfer" - }, - "talent": { - "component": "talent" - }, - "texttospeech": { - "component": "texttospeech" - }, - "tpu": { - "component": "tpu" - }, - "trace": { - "component": "trace" - }, - "translate": { - "component": "translate" - }, - "video": { - "component": "video" - }, - "videointelligence": { - "component": "videointelligence" - }, - "vision/v2": { - "component": "vision" - }, - "vmmigration": { - "component": "vmmigration" - }, - "vpcaccess": { - "component": "vpcaccess" - }, - "webrisk": { - "component": "webrisk" - }, - "websecurityscanner": { - "component": "websecurityscanner" - }, - "workflows": { - "component": "workflows" - } - } -} diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json deleted file mode 100644 index 546e7c31ad5..00000000000 --- a/vendor/cloud.google.com/go/release-please-config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "release-type": "go-yoshi", - "separate-pull-requests": true, - "include-component-in-tag": false, - "packages": { - ".": { - "component": "main" - } - } -} diff --git a/vendor/cloud.google.com/go/testing.md b/vendor/cloud.google.com/go/testing.md deleted file mode 100644 index bcca0604dbb..00000000000 --- a/vendor/cloud.google.com/go/testing.md +++ /dev/null @@ -1,236 +0,0 @@ -# Testing Code that depends on Go Client Libraries - -The Go client libraries generated as a part of `cloud.google.com/go` all take -the approach of returning concrete types instead of interfaces. That way, new -fields and methods can be added to the libraries without breaking users. This -document will go over some patterns that can be used to test code that depends -on the Go client libraries. - -## Testing gRPC services using fakes - -*Note*: You can see the full -[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake). - -The clients found in `cloud.google.com/go` are gRPC based, with a couple of -notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage) -and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients. -Interactions with gRPC services can be faked by serving up your own in-memory -server within your test. One benefit of using this approach is that you don’t -need to define an interface in your runtime code; you can keep using -concrete struct types. You instead define a fake server in your test code. For -example, take a look at the following function: - -```go -import ( - "context" - "fmt" - "log" - "os" - - translate "cloud.google.com/go/translate/apiv3" - "github.com/googleapis/gax-go/v2" - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" -) - -func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) { - ctx := context.Background() - log.Printf("Translating %q to %q", text, targetLang) - req := &translatepb.TranslateTextRequest{ - Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")), - TargetLanguageCode: "en-US", - Contents: []string{text}, - } - resp, err := client.TranslateText(ctx, req) - if err != nil { - return "", fmt.Errorf("unable to translate text: %v", err) - } - translations := resp.GetTranslations() - if len(translations) != 1 { - return "", fmt.Errorf("expected only one result, got %d", len(translations)) - } - return translations[0].TranslatedText, nil -} -``` - -Here is an example of what a fake server implementation would look like for -faking the interactions above: - -```go -import ( - "context" - - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" -) - -type fakeTranslationServer struct { - translatepb.UnimplementedTranslationServiceServer -} - -func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) { - resp := &translatepb.TranslateTextResponse{ - Translations: []*translatepb.Translation{ - &translatepb.Translation{ - TranslatedText: "Hello World", - }, - }, - } - return resp, nil -} -``` - -All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto) -contains a similar `package.UnimplmentedFooServer` type that is useful for -creating fakes. By embedding the unimplemented server in the -`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server -exposes. Then, by providing our own `fakeTranslationServer.TranslateText` -method you can “override” the default unimplemented behavior of the one RPC that -you would like to be faked. - -The test itself does require a little bit of setup: start up a `net.Listener`, -register the server, and tell the client library to call the server: - -```go -import ( - "context" - "net" - "testing" - - translate "cloud.google.com/go/translate/apiv3" - "google.golang.org/api/option" - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" - "google.golang.org/grpc" -) - -func TestTranslateTextWithConcreteClient(t *testing.T) { - ctx := context.Background() - - // Setup the fake server. - fakeTranslationServer := &fakeTranslationServer{} - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal(err) - } - gsrv := grpc.NewServer() - translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer) - fakeServerAddr := l.Addr().String() - go func() { - if err := gsrv.Serve(l); err != nil { - panic(err) - } - }() - - // Create a client. - client, err := translate.NewTranslationClient(ctx, - option.WithEndpoint(fakeServerAddr), - option.WithoutAuthentication(), - option.WithGRPCDialOption(grpc.WithInsecure()), - ) - if err != nil { - t.Fatal(err) - } - - // Run the test. - text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US") - if err != nil { - t.Fatal(err) - } - if text != "Hello World" { - t.Fatalf("got %q, want Hello World", text) - } -} -``` - -## Testing using mocks - -*Note*: You can see the full -[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock). - -When mocking code you need to work with interfaces. Let’s create an interface -for the `cloud.google.com/go/translate/apiv3` client used in the -`TranslateTextWithConcreteClient` function mentioned in the previous section. -The `translate.Client` has over a dozen methods but this code only uses one of -them. Here is an interface that satisfies the interactions of the -`translate.Client` in this function. - -```go -type TranslationClient interface { - TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) -} -``` - -Now that we have an interface that satisfies the method being used we can -rewrite the function signature to take the interface instead of the concrete -type. - -```go -func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) { -// ... -} -``` - -This allows a real `translate.Client` to be passed to the method in production -and for a mock implementation to be passed in during testing. This pattern can -be applied to any Go code, not just `cloud.google.com/go`. This is because -interfaces in Go are implicitly satisfied. Structs in the client libraries can -implicitly implement interfaces defined in your codebase. Let’s take a look at -what it might look like to define a lightweight mock for the `TranslationClient` -interface. - -```go -import ( - "context" - "testing" - - "github.com/googleapis/gax-go/v2" - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" -) - -type mockClient struct{} - -func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) { - resp := &translatepb.TranslateTextResponse{ - Translations: []*translatepb.Translation{ - &translatepb.Translation{ - TranslatedText: "Hello World", - }, - }, - } - return resp, nil -} - -func TestTranslateTextWithAbstractClient(t *testing.T) { - client := &mockClient{} - text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US") - if err != nil { - t.Fatal(err) - } - if text != "Hello World" { - t.Fatalf("got %q, want Hello World", text) - } -} -``` - -If you prefer to not write your own mocks there are mocking frameworks such as -[golang/mock](https://github.com/golang/mock) which can generate mocks for you -from an interface. As a word of caution though, try to not -[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html). - -## Testing using emulators - -Some of the client libraries provided in `cloud.google.com/go` support running -against a service emulator. The concept is similar to that of using fakes, -mentioned above, but the server is managed for you. You just need to start it up -and instruct the client library to talk to the emulator by setting a service -specific emulator environment variable. Current services/environment-variables -are: - -- bigtable: `BIGTABLE_EMULATOR_HOST` -- datastore: `DATASTORE_EMULATOR_HOST` -- firestore: `FIRESTORE_EMULATOR_HOST` -- pubsub: `PUBSUB_EMULATOR_HOST` -- spanner: `SPANNER_EMULATOR_HOST` -- storage: `STORAGE_EMULATOR_HOST` - - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud. - -For more information on emulators please refer to the -[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators). diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 1ec4a1ee867..521b3219800 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -23,13 +23,16 @@ const ( ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuSouth2RegionID = "eu-south-2" // Europe (Spain). EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). @@ -157,6 +160,9 @@ var awsPartition = partition{ "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, + "ap-south-2": region{ + Description: "Asia Pacific (Hyderabad)", + }, "ap-southeast-1": region{ Description: "Asia Pacific (Singapore)", }, @@ -172,12 +178,18 @@ var awsPartition = partition{ "eu-central-1": region{ Description: "Europe (Frankfurt)", }, + "eu-central-2": region{ + Description: "Europe (Zurich)", + }, "eu-north-1": region{ Description: "Europe (Stockholm)", }, "eu-south-1": region{ Description: "Europe (Milan)", }, + "eu-south-2": region{ + Description: "Europe (Spain)", + }, "eu-west-1": region{ Description: "Europe (Ireland)", }, @@ -237,6 +249,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -258,12 +273,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -399,6 +420,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -429,12 +453,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -633,6 +663,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -897,6 +930,25 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "aoss": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "api.detective": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -1084,6 +1136,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -1196,6 +1256,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -1212,6 +1280,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -1402,6 +1478,26 @@ var awsPartition = partition{ }, }, }, + "api.ecr-public": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "api.elastic-inference": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1593,6 +1689,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -1601,6 +1705,14 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.iotwireless.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -1960,6 +2072,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1981,12 +2096,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2142,6 +2263,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2157,12 +2281,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2339,6 +2469,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2354,12 +2487,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2919,6 +3058,9 @@ var awsPartition = partition{ }, "appsync": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -2940,6 +3082,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3020,56 +3165,183 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -3109,12 +3381,30 @@ var awsPartition = partition{ endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -3124,6 +3414,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -3133,6 +3429,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -3142,6 +3444,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -3215,6 +3523,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3230,12 +3541,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3678,6 +3995,16 @@ var awsPartition = partition{ }, }, }, + "cases": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "cassandra": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -3894,6 +4221,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3915,12 +4245,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3975,6 +4311,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4070,6 +4409,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4085,12 +4427,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4262,6 +4610,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4336,6 +4687,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4351,12 +4705,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4622,6 +4982,17 @@ var awsPartition = partition{ }, }, }, + "codecatalyst": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "codecatalyst.global.api.aws", + }, + }, + }, "codecommit": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4793,6 +5164,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4808,12 +5182,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5252,6 +5632,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5352,6 +5735,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5830,6 +6216,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5845,12 +6234,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6264,6 +6659,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6750,6 +7148,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6966,6 +7367,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6981,12 +7385,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7154,6 +7564,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7194,6 +7607,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7236,12 +7652,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7621,6 +8043,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7690,6 +8115,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7720,12 +8148,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7928,6 +8362,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8003,6 +8440,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8024,12 +8464,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8181,6 +8627,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8196,12 +8645,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8413,6 +8868,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8477,6 +8935,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8492,12 +8953,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9019,6 +9486,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -9073,6 +9549,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -9154,6 +9639,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9169,12 +9657,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9293,6 +9787,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9316,12 +9813,18 @@ var awsPartition = partition{ }: endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9477,6 +9980,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9836,6 +10342,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9851,12 +10360,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9978,6 +10493,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9993,12 +10511,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10264,6 +10788,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10847,15 +11374,33 @@ var awsPartition = partition{ }, "fsx": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10868,12 +11413,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -10910,6 +11464,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-west-2", }: endpoint{ @@ -10937,6 +11500,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -10946,6 +11518,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "prod-ca-central-1", }: endpoint{ @@ -11000,6 +11575,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-west-2", }: endpoint{ @@ -11018,6 +11611,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11036,6 +11632,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -11116,6 +11721,9 @@ var awsPartition = partition{ }, "gamesparks": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11703,6 +12311,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11998,6 +12609,112 @@ var awsPartition = partition{ }, }, }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "ingest-fips-us-east-1", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-east-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-west-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "inspector": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -12255,6 +12972,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12609,6 +13329,16 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "iotroborunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "iotsecuredtunneling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -13204,6 +13934,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13219,12 +13952,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13441,6 +14180,15 @@ var awsPartition = partition{ }, "kms": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "af-south-1", }: endpoint{}, @@ -13549,6 +14297,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13639,6 +14405,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -13675,6 +14459,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14040,6 +14842,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14085,6 +14896,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14103,6 +14923,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14169,6 +14998,12 @@ var awsPartition = partition{ endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14572,6 +15407,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14587,12 +15425,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14756,6 +15600,15 @@ var awsPartition = partition{ }, "m2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -14768,6 +15621,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -15505,12 +16364,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips", }: endpoint{ @@ -15588,6 +16453,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15603,12 +16471,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -15928,6 +16802,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15943,12 +16820,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16510,6 +17393,88 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "oidc": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17044,6 +18009,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17059,12 +18027,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17123,7 +18097,21 @@ var awsPartition = partition{ }: endpoint{}, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Hostname: "pinpoint.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17133,6 +18121,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17142,6 +18139,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -17168,6 +18174,23 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "pinpoint.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -17187,6 +18210,79 @@ var awsPartition = partition{ }, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17528,9 +18624,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -17786,6 +18900,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17871,12 +18988,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17998,6 +19121,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18028,12 +19154,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18390,6 +19522,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18411,12 +19546,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18872,6 +20013,126 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-3.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-2.api.aws", + }, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18958,6 +20219,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19589,6 +20853,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -19663,6 +20936,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -19681,6 +20963,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -20426,6 +21717,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -20517,6 +21811,13 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "sagemaker-geospatial": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "savingsplans": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -20531,6 +21832,37 @@ var awsPartition = partition{ }, }, }, + "scheduler": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "schemas": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20642,6 +21974,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20672,12 +22007,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -20900,6 +22241,31 @@ var awsPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -21339,6 +22705,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21641,6 +23010,34 @@ var awsPartition = partition{ }, }, }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "sms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -22131,6 +23528,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22146,12 +23546,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22270,6 +23676,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22285,12 +23694,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22406,6 +23821,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22427,12 +23845,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22667,6 +24091,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22682,12 +24109,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22854,6 +24287,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22962,6 +24398,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22977,12 +24416,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23045,6 +24490,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23068,12 +24516,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23212,6 +24666,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23227,12 +24684,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23345,6 +24808,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23360,12 +24826,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23478,6 +24950,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23493,12 +24968,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23967,6 +25448,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24185,6 +25669,67 @@ var awsPartition = partition{ }, }, }, + "voice-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.voice-chime.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "fips.voice-chime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.voice-chime.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "fips.voice-chime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "voiceid": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24196,22 +25741,46 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ - + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, Deprecated: boxedTrue, }, endpointKey{ @@ -24220,14 +25789,18 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", Variant: fipsVariant, - }: endpoint{}, + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, - }: endpoint{}, + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + }, }, }, "waf": service{ @@ -25513,12 +27086,50 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ui-ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "ui-us-east-1", + }: endpoint{}, + endpointKey{ + Region: "ui-us-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "workdocs": service{ @@ -25718,6 +27329,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -25733,12 +27347,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -26055,9 +27675,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "autoscaling": service{ @@ -27004,6 +28636,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28150,6 +29807,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -28159,6 +29822,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -28388,6 +30057,14 @@ var awsusgovPartition = partition{ }, }, "cloudtrail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-east-1", @@ -28726,6 +30403,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "data-ats.iot": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -29094,6 +30781,15 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-east-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -29102,6 +30798,15 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-west-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "ecs": service{ @@ -30000,6 +31705,13 @@ var awsusgovPartition = partition{ }, }, }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "inspector": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31135,6 +32847,31 @@ var awsusgovPartition = partition{ }, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-west-1.api.aws", + }, + }, + }, "resource-groups": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -32971,6 +34708,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "dynamodb": service{ @@ -33045,6 +34785,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, @@ -33054,6 +34803,15 @@ var awsisoPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "elasticloadbalancing": service{ @@ -33105,6 +34863,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "glacier": service{ @@ -33262,6 +35023,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "rds": service{ @@ -33373,6 +35137,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "states": service{ @@ -33440,6 +35207,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "tagging": service{ @@ -33934,6 +35704,13 @@ var awsisobPartition = partition{ }, }, }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "s3": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index c09688d1c04..ee4c2d9e987 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.109" +const SDKVersion = "1.44.156" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go index ebcbc2b40a3..34fea49ca81 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -1,9 +1,8 @@ package shareddefaults import ( - "os" + "os/user" "path/filepath" - "runtime" ) // SharedCredentialsFilename returns the SDK's default file path @@ -31,10 +30,17 @@ func SharedConfigFilename() string { // UserHomeDir returns the home directory for the user the process is // running under. func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") + var home string + + home = userHomeDir() + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir } - // *nix - return os.Getenv("HOME") + return home } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go new file mode 100644 index 00000000000..eb298ae0fc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go @@ -0,0 +1,18 @@ +//go:build !go1.12 +// +build !go1.12 + +package shareddefaults + +import ( + "os" + "runtime" +) + +func userHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go new file mode 100644 index 00000000000..51541b50876 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go @@ -0,0 +1,13 @@ +//go:build go1.12 +// +build go1.12 + +package shareddefaults + +import ( + "os" +) + +func userHomeDir() string { + home, _ := os.UserHomeDir() + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index 2aec80661a4..12e814ddf25 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -4,7 +4,6 @@ package jsonutil import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "math" "reflect" @@ -16,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + var timeType = reflect.ValueOf(time.Time{}).Type() var byteSliceType = reflect.ValueOf([]byte{}).Type() @@ -211,10 +216,16 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) case reflect.Float64: f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + switch { + case math.IsNaN(f): + writeString(floatNaN, buf) + case math.IsInf(f, 1): + writeString(floatInf, buf) + case math.IsInf(f, -1): + writeString(floatNegInf, buf) + default: + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) default: switch converted := value.Interface().(type) { case time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index 8b2c9bbeba0..f9334879b80 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "math" "math/big" "reflect" "strings" @@ -258,6 +259,18 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag return err } value.Set(reflect.ValueOf(v)) + case *float64: + // These are regular strings when parsed by encoding/json's unmarshaler. + switch { + case strings.EqualFold(d, floatNaN): + value.Set(reflect.ValueOf(aws.Float64(math.NaN()))) + case strings.EqualFold(d, floatInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(1)))) + case strings.EqualFold(d, floatNegInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1)))) + default: + return fmt.Errorf("unknown JSON number value: %s", d) + } default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 75866d01218..058334053c2 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -3,6 +3,7 @@ package queryutil import ( "encoding/base64" "fmt" + "math" "net/url" "reflect" "sort" @@ -13,6 +14,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Parse parses an object i and fills a url.Values object. The isEC2 flag // indicates if this is the EC2 Query sub-protocol. func Parse(body url.Values, i interface{}, isEC2 bool) error { @@ -228,9 +235,32 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta case int: v.Set(name, strconv.Itoa(value)) case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + var str string + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + v.Set(name, str) case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + asFloat64 := float64(value) + var str string + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + v.Set(name, str) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" format := tag.Get("timestampFormat") diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 63f66af2c62..1d273ff0ec6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "fmt" "io" + "math" "net/http" "net/url" "path" @@ -20,6 +21,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -302,7 +309,16 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) case int64: str = strconv.FormatInt(value, 10) case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index cdef403e219..79fcf1699b7 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "net/http" "reflect" "strconv" @@ -231,9 +232,20 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&i)) case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err + var f float64 + switch { + case strings.EqualFold(header, floatNaN): + f = math.NaN() + case strings.EqualFold(header, floatInf): + f = math.Inf(1) + case strings.EqualFold(header, floatNegInf): + f = math.Inf(-1) + default: + var err error + f, err = strconv.ParseFloat(header, 64) + if err != nil { + return err + } } v.Set(reflect.ValueOf(&f)) case *time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 2fbb93ae76a..58c12bd8ccb 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "encoding/xml" "fmt" + "math" "reflect" "sort" "strconv" @@ -14,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // BuildXML will serialize params into an xml.Encoder. Error will be returned // if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { @@ -275,6 +282,7 @@ func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect // Error will be returned if the value type is unsupported. func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { var str string + switch converted := value.Interface().(type) { case string: str = converted @@ -289,9 +297,29 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case int: str = strconv.Itoa(converted) case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) + switch { + case math.IsNaN(converted): + str = floatNaN + case math.IsInf(converted, 1): + str = floatInf + case math.IsInf(converted, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(converted, 'f', -1, 64) + } case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently. + asFloat64 := float64(converted) + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 107c053f8ac..44a580a940b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/xml" "fmt" "io" + "math" "reflect" "strconv" "strings" @@ -276,9 +277,20 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err + var v float64 + switch { + case strings.EqualFold(node.Text, floatNaN): + v = math.NaN() + case strings.EqualFold(node.Text, floatInf): + v = math.Inf(1) + case strings.EqualFold(node.Text, floatNegInf): + v = math.Inf(-1) + default: + var err error + v, err = strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } } r.Set(reflect.ValueOf(&v)) case *time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index 273b2da221d..12fcef703f0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -60,6 +60,10 @@ func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // Adds a statement to a topic's access control policy, granting access for // the specified Amazon Web Services accounts to the specified actions. // +// To remove the ability to change topic permissions, you must deny permissions +// to the AddPermission, RemovePermission, and SetTopicAttributes actions in +// your IAM policy. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3634,6 +3638,10 @@ func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *reques // // Removes a statement from a topic's access control policy. // +// To remove the ability to change topic permissions, you must deny permissions +// to the AddPermission, RemovePermission, and SetTopicAttributes actions in +// your IAM policy. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4101,6 +4109,10 @@ func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *re // // Allows a topic owner to set an attribute of the topic to a new value. // +// To remove the ability to change topic permissions, you must deny permissions +// to the AddPermission, RemovePermission, and SetTopicAttributes actions in +// your IAM policy. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4428,6 +4440,10 @@ func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, // the endpoint owner can easily resubscribe to the topic if the Unsubscribe // request was unintended. // +// Amazon SQS queue subscriptions require authentication for deletion. Only +// the owner of the subscription, or the owner of the topic can unsubscribe +// using the required Amazon Web Services signature. +// // This action is throttled at 100 transactions per second (TPS). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5385,6 +5401,18 @@ type CreateTopicInput struct { // * Policy – The policy that defines who can access your topic. By default, // only the topic owner can publish or subscribe to the topic. // + // * SignatureVersion – The signature version corresponds to the hashing + // algorithm used while creating the signature of the notifications, subscription + // confirmations, or unsubscribe confirmation messages sent by Amazon SNS. + // By default, SignatureVersion is set to 1. + // + // * TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig + // is set to PassThrough, and the topic passes through the tracing header + // it receives from an Amazon SNS publisher to its subscriptions. If set + // to Active, Amazon SNS will vend X-Ray segment data to topic owner account + // if the sampled flag in the tracing header is true. This is only supported + // on standard topics. + // // The following attribute applies only to server-side encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // // * KmsMasterKeyId – The ID of an Amazon Web Services managed customer @@ -6313,6 +6341,11 @@ type GetSubscriptionAttributesOutput struct { // For more information, see Amazon SNS Message Filtering (https://docs.aws.amazon.com/sns/latest/dg/sns-message-filtering.html) // in the Amazon SNS Developer Guide. // + // * FilterPolicyScope – This attribute lets you choose the filtering scope + // by using one of the following string value types: MessageAttributes (default) + // – The filter is applied on the message attributes. MessageBody – The + // filter is applied on the message body. + // // * Owner – The Amazon Web Services account ID of the subscription's owner. // // * PendingConfirmation – true if the subscription hasn't been confirmed. @@ -6429,10 +6462,20 @@ type GetTopicAttributesOutput struct { // * DisplayName – The human-readable name used in the From field for notifications // to email and email-json endpoints. // + // * EffectiveDeliveryPolicy – The JSON serialization of the effective + // delivery policy, taking system defaults into account. + // // * Owner – The Amazon Web Services account ID of the topic's owner. // // * Policy – The JSON serialization of the topic's access control policy. // + // * SignatureVersion – The version of the Amazon SNS signature used for + // the topic. By default, SignatureVersion is set to 1. The signature is + // a Base64-encoded SHA1withRSA signature. When you set SignatureVersion + // to 2. Amazon SNS uses a Base64-encoded SHA256withRSA signature. If the + // API response does not include the SignatureVersion attribute, it means + // that the SignatureVersion for the topic has value 1. + // // * SubscriptionsConfirmed – The number of confirmed subscriptions for // the topic. // @@ -6444,8 +6487,12 @@ type GetTopicAttributesOutput struct { // // * TopicArn – The topic's ARN. // - // * EffectiveDeliveryPolicy – The JSON serialization of the effective - // delivery policy, taking system defaults into account. + // * TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig + // is set to PassThrough, and the topic passes through the tracing header + // it receives from an Amazon SNS publisher to its subscriptions. If set + // to Active, Amazon SNS will vend X-Ray segment data to topic owner account + // if the sampled flag in the tracing header is true. This is only supported + // on standard topics. // // The following attribute applies only to server-side-encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // @@ -8756,6 +8803,11 @@ type SetSubscriptionAttributesInput struct { // only a subset of messages, rather than receiving every message published // to the topic. // + // * FilterPolicyScope – This attribute lets you choose the filtering scope + // by using one of the following string value types: MessageAttributes (default) + // – The filter is applied on the message attributes. MessageBody – The + // filter is applied on the message body. + // // * RawMessageDelivery – When set to true, enables raw message delivery // to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints // to process JSON formatting, which is otherwise created for Amazon SNS @@ -8882,6 +8934,13 @@ type SetTopicAttributesInput struct { // * Policy – The policy that defines who can access your topic. By default, // only the topic owner can publish or subscribe to the topic. // + // * TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig + // is set to PassThrough, and the topic passes through the tracing header + // it receives from an Amazon SNS publisher to its subscriptions. If set + // to Active, Amazon SNS will vend X-Ray segment data to topic owner account + // if the sampled flag in the tracing header is true. This is only supported + // on standard topics. + // // The following attribute applies only to server-side-encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // // * KmsMasterKeyId – The ID of an Amazon Web Services managed customer @@ -8890,6 +8949,10 @@ type SetTopicAttributesInput struct { // For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the Key Management Service API Reference. // + // * SignatureVersion – The signature version corresponds to the hashing + // algorithm used while creating the signature of the notifications, subscription + // confirmations, or unsubscribe confirmation messages sent by Amazon SNS. + // // The following attribute applies only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): // // * ContentBasedDeduplication – Enables content-based deduplication for @@ -9004,6 +9067,11 @@ type SubscribeInput struct { // only a subset of messages, rather than receiving every message published // to the topic. // + // * FilterPolicyScope – This attribute lets you choose the filtering scope + // by using one of the following string value types: MessageAttributes (default) + // – The filter is applied on the message attributes. MessageBody – The + // filter is applied on the message body. + // // * RawMessageDelivery – When set to true, enables raw message delivery // to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints // to process JSON formatting, which is otherwise created for Amazon SNS diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 2b7e675ab86..c0706bec93d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -74,16 +74,16 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // When you create a role, you create two policies: A role trust policy that @@ -307,16 +307,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Calling AssumeRoleWithSAML does not require the use of Amazon Web Services @@ -343,11 +343,12 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -563,16 +564,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // # Tags @@ -588,11 +589,12 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -1110,9 +1112,9 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass @@ -1424,11 +1426,12 @@ type AssumeRoleInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1441,11 +1444,12 @@ type AssumeRoleInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1520,11 +1524,12 @@ type AssumeRoleInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1843,11 +1848,12 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1860,11 +1866,12 @@ type AssumeRoleWithSAMLInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2190,11 +2197,12 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2207,11 +2215,12 @@ type AssumeRoleWithWebIdentityInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2934,8 +2943,8 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, // then the resulting federated user session has no permissions. @@ -2960,11 +2969,12 @@ type GetFederationTokenInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2973,11 +2983,12 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plaintext that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that + // you use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about + // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // // This parameter is optional. However, if you do not pass any session policies, @@ -2997,11 +3008,12 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -3015,11 +3027,12 @@ type GetFederationTokenInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user diff --git a/vendor/github.com/benbjohnson/clock/LICENSE b/vendor/github.com/benbjohnson/clock/LICENSE new file mode 100644 index 00000000000..ce212cb1cee --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/benbjohnson/clock/README.md b/vendor/github.com/benbjohnson/clock/README.md new file mode 100644 index 00000000000..4f1f82fc6d7 --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/README.md @@ -0,0 +1,105 @@ +clock +===== + +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/benbjohnson/clock) + +Clock is a small library for mocking time in Go. It provides an interface +around the standard library's [`time`][time] package so that the application +can use the realtime clock while tests can use the mock clock. + +The module is currently maintained by @djmitche. + +[time]: https://pkg.go.dev/github.com/benbjohnson/clock + +## Usage + +### Realtime Clock + +Your application can maintain a `Clock` variable that will allow realtime and +mock clocks to be interchangeable. For example, if you had an `Application` type: + +```go +import "github.com/benbjohnson/clock" + +type Application struct { + Clock clock.Clock +} +``` + +You could initialize it to use the realtime clock like this: + +```go +var app Application +app.Clock = clock.New() +... +``` + +Then all timers and time-related functionality should be performed from the +`Clock` variable. + + +### Mocking time + +In your tests, you will want to use a `Mock` clock: + +```go +import ( + "testing" + + "github.com/benbjohnson/clock" +) + +func TestApplication_DoSomething(t *testing.T) { + mock := clock.NewMock() + app := Application{Clock: mock} + ... +} +``` + +Now that you've initialized your application to use the mock clock, you can +adjust the time programmatically. The mock clock always starts from the Unix +epoch (midnight UTC on Jan 1, 1970). + + +### Controlling time + +The mock clock provides the same functions that the standard library's `time` +package provides. For example, to find the current time, you use the `Now()` +function: + +```go +mock := clock.NewMock() + +// Find the current time. +mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC + +// Move the clock forward. +mock.Add(2 * time.Hour) + +// Check the time again. It's 2 hours later! +mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC +``` + +Timers and Tickers are also controlled by this same mock clock. They will only +execute when the clock is moved forward: + +```go +mock := clock.NewMock() +count := 0 + +// Kick off a timer to increment every 1 mock second. +go func() { + ticker := mock.Ticker(1 * time.Second) + for { + <-ticker.C + count++ + } +}() +runtime.Gosched() + +// Move the clock forward 10 seconds. +mock.Add(10 * time.Second) + +// This prints 10. +fmt.Println(count) +``` diff --git a/vendor/github.com/benbjohnson/clock/clock.go b/vendor/github.com/benbjohnson/clock/clock.go new file mode 100644 index 00000000000..40555b3037b --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/clock.go @@ -0,0 +1,378 @@ +package clock + +import ( + "context" + "sort" + "sync" + "time" +) + +// Re-export of time.Duration +type Duration = time.Duration + +// Clock represents an interface to the functions in the standard library time +// package. Two implementations are available in the clock package. The first +// is a real-time clock which simply wraps the time package's functions. The +// second is a mock clock which will only change when +// programmatically adjusted. +type Clock interface { + After(d time.Duration) <-chan time.Time + AfterFunc(d time.Duration, f func()) *Timer + Now() time.Time + Since(t time.Time) time.Duration + Until(t time.Time) time.Duration + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time + Ticker(d time.Duration) *Ticker + Timer(d time.Duration) *Timer + WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc) + WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc) +} + +// New returns an instance of a real-time clock. +func New() Clock { + return &clock{} +} + +// clock implements a real-time clock by simply wrapping the time package functions. +type clock struct{} + +func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) } + +func (c *clock) AfterFunc(d time.Duration, f func()) *Timer { + return &Timer{timer: time.AfterFunc(d, f)} +} + +func (c *clock) Now() time.Time { return time.Now() } + +func (c *clock) Since(t time.Time) time.Duration { return time.Since(t) } + +func (c *clock) Until(t time.Time) time.Duration { return time.Until(t) } + +func (c *clock) Sleep(d time.Duration) { time.Sleep(d) } + +func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) } + +func (c *clock) Ticker(d time.Duration) *Ticker { + t := time.NewTicker(d) + return &Ticker{C: t.C, ticker: t} +} + +func (c *clock) Timer(d time.Duration) *Timer { + t := time.NewTimer(d) + return &Timer{C: t.C, timer: t} +} + +func (c *clock) WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc) { + return context.WithDeadline(parent, d) +} + +func (c *clock) WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(parent, t) +} + +// Mock represents a mock clock that only moves forward programmically. +// It can be preferable to a real-time clock when testing time-based functionality. +type Mock struct { + mu sync.Mutex + now time.Time // current time + timers clockTimers // tickers & timers +} + +// NewMock returns an instance of a mock clock. +// The current time of the mock clock on initialization is the Unix epoch. +func NewMock() *Mock { + return &Mock{now: time.Unix(0, 0)} +} + +// Add moves the current time of the mock clock forward by the specified duration. +// This should only be called from a single goroutine at a time. +func (m *Mock) Add(d time.Duration) { + // Calculate the final current time. + t := m.now.Add(d) + + // Continue to execute timers until there are no more before the new time. + for { + if !m.runNextTimer(t) { + break + } + } + + // Ensure that we end with the new time. + m.mu.Lock() + m.now = t + m.mu.Unlock() + + // Give a small buffer to make sure that other goroutines get handled. + gosched() +} + +// Set sets the current time of the mock clock to a specific one. +// This should only be called from a single goroutine at a time. +func (m *Mock) Set(t time.Time) { + // Continue to execute timers until there are no more before the new time. + for { + if !m.runNextTimer(t) { + break + } + } + + // Ensure that we end with the new time. + m.mu.Lock() + m.now = t + m.mu.Unlock() + + // Give a small buffer to make sure that other goroutines get handled. + gosched() +} + +// runNextTimer executes the next timer in chronological order and moves the +// current time to the timer's next tick time. The next time is not executed if +// its next time is after the max time. Returns true if a timer was executed. +func (m *Mock) runNextTimer(max time.Time) bool { + m.mu.Lock() + + // Sort timers by time. + sort.Sort(m.timers) + + // If we have no more timers then exit. + if len(m.timers) == 0 { + m.mu.Unlock() + return false + } + + // Retrieve next timer. Exit if next tick is after new time. + t := m.timers[0] + if t.Next().After(max) { + m.mu.Unlock() + return false + } + + // Move "now" forward and unlock clock. + m.now = t.Next() + m.mu.Unlock() + + // Execute timer. + t.Tick(m.now) + return true +} + +// After waits for the duration to elapse and then sends the current time on the returned channel. +func (m *Mock) After(d time.Duration) <-chan time.Time { + return m.Timer(d).C +} + +// AfterFunc waits for the duration to elapse and then executes a function. +// A Timer is returned that can be stopped. +func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer { + t := m.Timer(d) + t.C = nil + t.fn = f + return t +} + +// Now returns the current wall time on the mock clock. +func (m *Mock) Now() time.Time { + m.mu.Lock() + defer m.mu.Unlock() + return m.now +} + +// Since returns time since `t` using the mock clock's wall time. +func (m *Mock) Since(t time.Time) time.Duration { + return m.Now().Sub(t) +} + +// Until returns time until `t` using the mock clock's wall time. +func (m *Mock) Until(t time.Time) time.Duration { + return t.Sub(m.Now()) +} + +// Sleep pauses the goroutine for the given duration on the mock clock. +// The clock must be moved forward in a separate goroutine. +func (m *Mock) Sleep(d time.Duration) { + <-m.After(d) +} + +// Tick is a convenience function for Ticker(). +// It will return a ticker channel that cannot be stopped. +func (m *Mock) Tick(d time.Duration) <-chan time.Time { + return m.Ticker(d).C +} + +// Ticker creates a new instance of Ticker. +func (m *Mock) Ticker(d time.Duration) *Ticker { + m.mu.Lock() + defer m.mu.Unlock() + ch := make(chan time.Time, 1) + t := &Ticker{ + C: ch, + c: ch, + mock: m, + d: d, + next: m.now.Add(d), + } + m.timers = append(m.timers, (*internalTicker)(t)) + return t +} + +// Timer creates a new instance of Timer. +func (m *Mock) Timer(d time.Duration) *Timer { + m.mu.Lock() + defer m.mu.Unlock() + ch := make(chan time.Time, 1) + t := &Timer{ + C: ch, + c: ch, + mock: m, + next: m.now.Add(d), + stopped: false, + } + m.timers = append(m.timers, (*internalTimer)(t)) + return t +} + +func (m *Mock) removeClockTimer(t clockTimer) { + for i, timer := range m.timers { + if timer == t { + copy(m.timers[i:], m.timers[i+1:]) + m.timers[len(m.timers)-1] = nil + m.timers = m.timers[:len(m.timers)-1] + break + } + } + sort.Sort(m.timers) +} + +// clockTimer represents an object with an associated start time. +type clockTimer interface { + Next() time.Time + Tick(time.Time) +} + +// clockTimers represents a list of sortable timers. +type clockTimers []clockTimer + +func (a clockTimers) Len() int { return len(a) } +func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) } + +// Timer represents a single event. +// The current time will be sent on C, unless the timer was created by AfterFunc. +type Timer struct { + C <-chan time.Time + c chan time.Time + timer *time.Timer // realtime impl, if set + next time.Time // next tick time + mock *Mock // mock clock, if set + fn func() // AfterFunc function, if set + stopped bool // True if stopped, false if running +} + +// Stop turns off the ticker. +func (t *Timer) Stop() bool { + if t.timer != nil { + return t.timer.Stop() + } + + t.mock.mu.Lock() + registered := !t.stopped + t.mock.removeClockTimer((*internalTimer)(t)) + t.stopped = true + t.mock.mu.Unlock() + return registered +} + +// Reset changes the expiry time of the timer +func (t *Timer) Reset(d time.Duration) bool { + if t.timer != nil { + return t.timer.Reset(d) + } + + t.mock.mu.Lock() + t.next = t.mock.now.Add(d) + defer t.mock.mu.Unlock() + + registered := !t.stopped + if t.stopped { + t.mock.timers = append(t.mock.timers, (*internalTimer)(t)) + } + + t.stopped = false + return registered +} + +type internalTimer Timer + +func (t *internalTimer) Next() time.Time { return t.next } +func (t *internalTimer) Tick(now time.Time) { + // a gosched() after ticking, to allow any consequences of the + // tick to complete + defer gosched() + + t.mock.mu.Lock() + if t.fn != nil { + // defer function execution until the lock is released, and + defer t.fn() + } else { + t.c <- now + } + t.mock.removeClockTimer((*internalTimer)(t)) + t.stopped = true + t.mock.mu.Unlock() +} + +// Ticker holds a channel that receives "ticks" at regular intervals. +type Ticker struct { + C <-chan time.Time + c chan time.Time + ticker *time.Ticker // realtime impl, if set + next time.Time // next tick time + mock *Mock // mock clock, if set + d time.Duration // time between ticks +} + +// Stop turns off the ticker. +func (t *Ticker) Stop() { + if t.ticker != nil { + t.ticker.Stop() + } else { + t.mock.mu.Lock() + t.mock.removeClockTimer((*internalTicker)(t)) + t.mock.mu.Unlock() + } +} + +// Reset resets the ticker to a new duration. +func (t *Ticker) Reset(dur time.Duration) { + if t.ticker != nil { + t.ticker.Reset(dur) + return + } + + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + + t.d = dur + t.next = t.mock.now.Add(dur) +} + +type internalTicker Ticker + +func (t *internalTicker) Next() time.Time { return t.next } +func (t *internalTicker) Tick(now time.Time) { + select { + case t.c <- now: + default: + } + t.next = now.Add(t.d) + gosched() +} + +// Sleep momentarily so that other goroutines can process. +func gosched() { time.Sleep(1 * time.Millisecond) } + +var ( + // type checking + _ Clock = &Mock{} +) diff --git a/vendor/github.com/benbjohnson/clock/context.go b/vendor/github.com/benbjohnson/clock/context.go new file mode 100644 index 00000000000..eb67594f2c3 --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/context.go @@ -0,0 +1,86 @@ +package clock + +import ( + "context" + "fmt" + "sync" + "time" +) + +func (m *Mock) WithTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + return m.WithDeadline(parent, m.Now().Add(timeout)) +} + +func (m *Mock) WithDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return context.WithCancel(parent) + } + ctx := &timerCtx{clock: m, parent: parent, deadline: deadline, done: make(chan struct{})} + propagateCancel(parent, ctx) + dur := m.Until(deadline) + if dur <= 0 { + ctx.cancel(context.DeadlineExceeded) // deadline has already passed + return ctx, func() {} + } + ctx.Lock() + defer ctx.Unlock() + if ctx.err == nil { + ctx.timer = m.AfterFunc(dur, func() { + ctx.cancel(context.DeadlineExceeded) + }) + } + return ctx, func() { ctx.cancel(context.Canceled) } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent context.Context, child *timerCtx) { + if parent.Done() == nil { + return // parent is never canceled + } + go func() { + select { + case <-parent.Done(): + child.cancel(parent.Err()) + case <-child.Done(): + } + }() +} + +type timerCtx struct { + sync.Mutex + + clock Clock + parent context.Context + deadline time.Time + done chan struct{} + + err error + timer *Timer +} + +func (c *timerCtx) cancel(err error) { + c.Lock() + defer c.Unlock() + if c.err != nil { + return // already canceled + } + c.err = err + close(c.done) + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { return c.deadline, true } + +func (c *timerCtx) Done() <-chan struct{} { return c.done } + +func (c *timerCtx) Err() error { return c.err } + +func (c *timerCtx) Value(key interface{}) interface{} { return c.parent.Value(key) } + +func (c *timerCtx) String() string { + return fmt.Sprintf("clock.WithDeadline(%s [%s])", c.deadline, c.deadline.Sub(c.clock.Now())) +} diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b34..8bf0e5b7815 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 00000000000..94b9c443987 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d5417..a9e0d45c9dc 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf796..3e8b132579e 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 00000000000..7e3145a2218 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f4d..9216e0a40c1 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a821603e..26df13bba4b 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a31f..e86f1b5fd8e 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e49..1c1638fd88a 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go new file mode 100644 index 00000000000..bf7671dd2b2 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go @@ -0,0 +1,70 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "strings" + "syscall" +) + +const ( + // listenFdsStart corresponds to `SD_LISTEN_FDS_START`. + listenFdsStart = 3 +) + +// Files returns a slice containing a `os.File` object for each +// file descriptor passed to this process via systemd fd-passing protocol. +// +// The order of the file descriptors is preserved in the returned slice. +// `unsetEnv` is typically set to `true` in order to avoid clashes in +// fd usage and to avoid leaking environment flags to child processes. +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + defer os.Unsetenv("LISTEN_FDNAMES") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + names := strings.Split(os.Getenv("LISTEN_FDNAMES"), ":") + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + name := "LISTEN_FD_" + strconv.Itoa(fd) + offset := fd - listenFdsStart + if offset < len(names) && len(names[offset]) > 0 { + name = names[offset] + } + files = append(files, os.NewFile(uintptr(fd), name)) + } + + return files +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go new file mode 100644 index 00000000000..d391bf00c5e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go @@ -0,0 +1,21 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import "os" + +func Files(unsetEnv bool) []*os.File { + return nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go b/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go new file mode 100644 index 00000000000..3dbe2b08776 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go @@ -0,0 +1,103 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners() ([]net.Listener, error) { + files := Files(true) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + f.Close() + } + } + return listeners, nil +} + +// ListenersWithNames maps a listener name to a set of net.Listener instances. +func ListenersWithNames() (map[string][]net.Listener, error) { + files := Files(true) + listeners := map[string][]net.Listener{} + + for _, f := range files { + if pc, err := net.FileListener(f); err == nil { + current, ok := listeners[f.Name()] + if !ok { + listeners[f.Name()] = []net.Listener{pc} + } else { + listeners[f.Name()] = append(current, pc) + } + f.Close() + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil { + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} + +// TLSListenersWithNames maps a listener name to a net.Listener with +// the associated TLS configuration. +func TLSListenersWithNames(tlsConfig *tls.Config) (map[string][]net.Listener, error) { + listeners, err := ListenersWithNames() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil { + for _, ll := range listeners { + // Activate TLS only for TCP sockets + for i, l := range ll { + if l.Addr().Network() == "tcp" { + ll[i] = tls.NewListener(l, tlsConfig) + } + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go new file mode 100644 index 00000000000..a97206785a4 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go @@ -0,0 +1,38 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns() ([]net.PacketConn, error) { + files := Files(true) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + f.Close() + } + } + return conns, nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go index 8d58ca0fbca..439ad287466 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows // Package journal provides write bindings to the local systemd journal. @@ -53,15 +54,9 @@ var ( onceConn sync.Once ) -func init() { - onceConn.Do(initConn) -} - // Enabled checks whether the local systemd journal is available for logging. func Enabled() bool { - onceConn.Do(initConn) - - if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + if c := getOrInitConn(); c == nil { return false } @@ -82,7 +77,7 @@ func Enabled() bool { // (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) // for more details. vars may be nil. func Send(message string, priority Priority, vars map[string]string) error { - conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + conn := getOrInitConn() if conn == nil { return errors.New("could not initialize socket to journald") } @@ -126,6 +121,16 @@ func Send(message string, priority Priority, vars map[string]string) error { return nil } +// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary +func getOrInitConn() *net.UnixConn { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn != nil { + return conn + } + onceConn.Do(initConn) + return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) +} + func appendVariable(w io.Writer, name, value string) { if err := validVarName(name); err != nil { fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) @@ -194,7 +199,7 @@ func tempFd() (*os.File, error) { } // initConn initializes the global `unixConnPtr` socket. -// It is meant to be called exactly once, at program startup. +// It is automatically called when needed. func initConn() { autobind, err := net.ResolveUnixAddr("unixgram", "") if err != nil { diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index 85f6ab07155..c245a89513f 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -2,7 +2,6 @@ package units import ( "fmt" - "regexp" "strconv" "strings" ) @@ -26,16 +25,17 @@ const ( PiB = 1024 * TiB ) -type unitMap map[string]int64 +type unitMap map[byte]int64 var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} ) -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { i := 0 @@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) { // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } - size, err := strconv.ParseFloat(matches[1], 64) + size, err := strconv.ParseFloat(num, 64) if err != nil { return -1, err } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix } return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) } diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf432..1d89d85ce4f 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,6 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5ef50..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index cc01c08f56d..77f9593bd58 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -7,6 +7,95 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + ## [1.5.4] - 2022-04-25 * Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) @@ -40,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 8a642563d71..ea379759d51 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,60 +1,26 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb059..fb03ade7506 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 0731c5ef8ad..d4e6080feb2 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,120 +1,161 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. -[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** -Cross platform: Windows, Linux, BSD and macOS. +--- -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +Platform support: -\* Android and iOS are untested. +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | -Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +Linux and macOS should include Android and iOS, but these are currently untested. -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: -Please refer to [CONTRIBUTING][] before opening an issue or pull request. + % go run ./cmd/fsnotify -## FAQ +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -**When a file is moved to another directory is it still being watched?** +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -No (it shouldn't be, unless you are watching where it was moved to). +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When I watch a directory, are all subdirectories watched as well?** +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**Do I have to watch the Error and Event channels in a separate goroutine?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Why am I receiving multiple events for the same file on OS X?** +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE -**How many files can be watched at once?** +This is the event that inotify sends, so not much can be changed about this. -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +To increase them you can use `sysctl` or write the value to proc file: -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -## Related Projects +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 00000000000..1a95ad8e7ce --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 00000000000..54c77fbb0ee --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 00000000000..29087469bf8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 00000000000..a9bb1c3c4d0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 00000000000..ae392867c04 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f55f..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e8aa..30a5bf0f07a 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,29 +1,37 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build !plan9 // +build !plan9 -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. package fsnotify import ( - "bytes" "errors" "fmt" + "strings" ) -// Event represents a single file system notification. +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( Create Op = 1 << iota Write @@ -32,38 +40,42 @@ const ( Chmod ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) - if op&Create == Create { - buffer.WriteString("|CREATE") +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if op.Has(Remove) { + b.WriteString("|REMOVE") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if op.Has(Write) { + b.WriteString("|WRITE") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if op.Has(Rename) { + b.WriteString("|RENAME") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if op.Has(Chmod) { + b.WriteString("|CHMOD") } - if buffer.Len() == 0 { - return "" + if b.Len() == 0 { + return "[no events]" } - return buffer.String()[1:] // Strip leading pipe + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go deleted file mode 100644 index 59688559836..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index a6d0e0ec8c1..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b572a37c3f1..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 6fb8d8532e7..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 00000000000..b09ef768340 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go similarity index 57% rename from vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/system_bsd.go index 36cc3845b6e..4322b0b8855 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go similarity index 52% rename from vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/system_darwin.go index 98cd8476ffb..5da5ffa78fe 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build darwin // +build darwin diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 02ce7deb0bb..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - // TODO: Consider using unsafe.Slice that is available from go1.17 - // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang - // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := syscall.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml index 8cad2987919..e24a6c14e6b 100644 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -51,3 +51,6 @@ linters: - forbidigo - cyclop - varnamelen + - exhaustruct + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml index 449a43c2bc8..4e1fc0c7d48 100644 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -44,3 +44,5 @@ linters: - cyclop - errname - varnamelen + - exhaustruct + - maintidx diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index 854d6eec1e1..77f1f92c5e3 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -99,6 +99,7 @@ func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { } func errorAsJSON(err Error) []byte { + //nolint:errchkjson b, _ := json.Marshal(struct { Code int32 `json:"code"` Message string `json:"message"` @@ -146,7 +147,7 @@ func ServeError(rw http.ResponseWriter, r *http.Request, err error) { ServeError(rw, r, nil) } case *MethodNotAllowedError: - rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) + rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) rw.WriteHeader(asHTTPCode(int(e.Code()))) if r == nil || r.Method != http.MethodHead { _, _ = rw.Write(errorAsJSON(e)) diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go index 963d4274078..af01190ce61 100644 --- a/vendor/github.com/go-openapi/errors/doc.go +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -13,7 +13,6 @@ // limitations under the License. /* - Package errors provides an Error interface and several concrete types implementing this interface to manage API errors and JSON-schema validation errors. @@ -23,6 +22,5 @@ it defines. It is used throughout the various go-openapi toolkit libraries (https://github.com/go-openapi). - */ package errors diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go index c26ad484ebc..963472d1f34 100644 --- a/vendor/github.com/go-openapi/errors/middleware.go +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -28,7 +28,6 @@ type APIVerificationFailed struct { MissingRegistration []string `json:"missingRegistration,omitempty"` } -// func (v *APIVerificationFailed) Error() string { buf := bytes.NewBuffer(nil) diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go index 3efda348216..d4d2b58f2bb 100644 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -16,7 +16,6 @@ package runtime import ( "io" - "io/ioutil" "net/http" "net/url" "time" @@ -79,7 +78,7 @@ type NamedReadCloser interface { func NamedReader(name string, rdr io.Reader) NamedReadCloser { rc, ok := rdr.(io.ReadCloser) if !ok { - rc = ioutil.NopCloser(rdr) + rc = io.NopCloser(rdr) } return &namedReadCloser{ name: name, diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go index 0b7e3824657..0d1691149d4 100644 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -15,10 +15,9 @@ package runtime import ( + "encoding/json" "fmt" "io" - - "encoding/json" ) // A ClientResponse represents a client response @@ -61,13 +60,18 @@ type APIError struct { Code int } -func (a *APIError) Error() string { - resp, _ := json.Marshal(a.Response) - return fmt.Sprintf("%s (status %d): %s", a.OperationName, a.Code, resp) +func (o *APIError) Error() string { + var resp []byte + if err, ok := o.Response.(error); ok { + resp = []byte("'" + err.Error() + "'") + } else { + resp, _ = json.Marshal(o.Response) + } + return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) } -func (a *APIError) String() string { - return a.Error() +func (o *APIError) String() string { + return o.Error() } // IsSuccess returns true when this elapse o k response returns a 2xx status code diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go index a4de897adcd..515969242ca 100644 --- a/vendor/github.com/go-openapi/runtime/constants.go +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -23,6 +23,8 @@ const ( // HeaderAccept the Accept header HeaderAccept = "Accept" + // HeaderAuthorization the Authorization header + HeaderAuthorization = "Authorization" charsetKey = "charset" diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go index 250e35fb0e1..d21ae4e870c 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/context.go +++ b/vendor/github.com/go-openapi/runtime/middleware/context.go @@ -195,6 +195,17 @@ func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Ro if spec != nil { an = analysis.New(spec.Spec()) } + + return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes) +} + +// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes in input the analysed spec too +func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context { + // Either there are no spec doc and analysis, or both of them. + if !((spec == nil && an == nil) || (spec != nil && an != nil)) { + panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them")) + } + ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes} return ctx } @@ -498,7 +509,9 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st if resp, ok := data.(Responder); ok { producers := route.Producers - prod, ok := producers[format] + // producers contains keys with normalized format, if a format has MIME type parameter such as `text/plain; charset=utf-8` + // then you must provide `text/plain` to get the correct producer. HOWEVER, format here is not normalized. + prod, ok := producers[normalizeOffer(format)] if !ok { prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) pr, ok := prods[c.api.DefaultProduces()] diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go index 8fa0cf4e464..9aaf65958ad 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/parameter.go +++ b/vendor/github.com/go-openapi/runtime/middleware/parameter.go @@ -206,7 +206,11 @@ func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams if p.parameter.Type == "file" { file, header, ffErr := request.FormFile(p.parameter.Name) if ffErr != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) + if p.parameter.Required { + return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) + } else { + return nil + } } target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) return nil diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go index 2c92f5c91f3..b4dea29e4bc 100644 --- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go @@ -16,6 +16,8 @@ type SwaggerUIOpts struct { Path string // SpecURL the url to find the spec for SpecURL string + // OAuthCallbackURL the url called after OAuth2 login + OAuthCallbackURL string // The three components needed to embed swagger-ui SwaggerURL string @@ -40,6 +42,9 @@ func (r *SwaggerUIOpts) EnsureDefaults() { if r.SpecURL == "" { r.SpecURL = "/swagger.json" } + if r.OAuthCallbackURL == "" { + r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback") + } if r.SwaggerURL == "" { r.SwaggerURL = swaggerLatest } @@ -73,7 +78,7 @@ func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { b := buf.Bytes() return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { + if path.Join(r.URL.Path) == pth { rw.Header().Set("Content-Type", "text/html; charset=utf-8") rw.WriteHeader(http.StatusOK) @@ -149,7 +154,8 @@ const ( plugins: [ SwaggerUIBundle.plugins.DownloadUrl ], - layout: "StandaloneLayout" + layout: "StandaloneLayout", + oauth2RedirectUrl: '{{ .OAuthCallbackURL }}' }) // End Swagger UI call region diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go new file mode 100644 index 00000000000..576f6003f7b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go @@ -0,0 +1,122 @@ +package middleware + +import ( + "bytes" + "fmt" + "net/http" + "path" + "text/template" +) + +func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := opts.OAuthCallbackURL + tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, &opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Join(r.URL.Path) == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + swaggerOAuthTemplate = ` + + + + {{ .Title }} + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go index 476d26c3e77..c3ffdac7e87 100644 --- a/vendor/github.com/go-openapi/runtime/security/authenticator.go +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -220,7 +220,7 @@ func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Aut const prefix = "Bearer " return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { var token string - hdr := r.Request.Header.Get("Authorization") + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) if strings.HasPrefix(hdr, prefix) { token = strings.TrimPrefix(hdr, prefix) } @@ -250,7 +250,7 @@ func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runti const prefix = "Bearer " return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { var token string - hdr := r.Request.Header.Get("Authorization") + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) if strings.HasPrefix(hdr, prefix) { token = strings.TrimPrefix(hdr, prefix) } diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index 3e0d8c770d3..bd14c2a269f 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -92,7 +92,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { res := new(Result) s := d.SpecValidator - for method, pathItem := range s.analyzer.Operations() { + for method, pathItem := range s.expandedAnalyzer().Operations() { for path, op := range pathItem { // parameters for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index f4b7a2dfe98..c8bffd78e5a 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -68,7 +68,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { res := new(Result) s := ex.SpecValidator - for method, pathItem := range s.analyzer.Operations() { + for method, pathItem := range s.expandedAnalyzer().Operations() { for path, op := range pathItem { // parameters for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 5d901dda717..48ebfab58e5 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -210,7 +210,7 @@ type paramHelper struct { } func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, res *Result, s *SpecValidator) (params []spec.Parameter) { - operation, ok := s.analyzer.OperationFor(method, path) + operation, ok := s.expandedAnalyzer().OperationFor(method, path) if ok { // expand parameters first if necessary resolvedParams := []spec.Parameter{} @@ -224,7 +224,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re // remove params with invalid expansion from Slice operation.Parameters = resolvedParams - for _, ppr := range s.analyzer.SafeParamsFor(method, path, + for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path, func(p spec.Parameter, err error) bool { // since params have already been expanded, there are few causes for error res.AddErrors(someParametersBrokenMsg(path, method, operationID)) diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index cdf5627a2c8..dff01f00be7 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -624,7 +624,7 @@ func (s *SpecValidator) validateParameters() *Result { // - path param must be required res := new(Result) rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) - for method, pi := range s.analyzer.Operations() { + for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) for path, op := range pi { pathToAdd := pathHelp.stripParametersInPath(path) @@ -793,3 +793,12 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio func (s *SpecValidator) SetContinueOnErrors(c bool) { s.Options.ContinueOnErrors = c } + +// expandedAnalyzer returns expanded.Analyzer when it is available. +// otherwise just analyzer. +func (s *SpecValidator) expandedAnalyzer() *analysis.Spec { + if s.expanded != nil && s.expanded.Analyzer != nil { + return s.expanded.Analyzer + } + return s.analyzer +} diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md index 48303001b7c..f5db14f07f8 100644 --- a/vendor/github.com/gofrs/uuid/README.md +++ b/vendor/github.com/gofrs/uuid/README.md @@ -16,6 +16,14 @@ This package supports the following UUID versions: * Version 4, based on random numbers (RFC-4122) * Version 5, based on SHA-1 hashing of a named value (RFC-4122) +This package also supports experimental Universally Unique Identifier implementations based on a +[draft RFC](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) that updates RFC-4122 +* Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122) +* Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122) + +The v6 and v7 IDs are **not** considered a part of the stable API, and may be subject to behavior or API changes as part of minor releases +to this package. They will be updated as the draft RFC changes, and will become stable if and when the draft RFC is accepted. + ## Project History This project was originally forked from the @@ -106,4 +114,4 @@ func main() { * [RFC-4122](https://tools.ietf.org/html/rfc4122) * [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) -* [New UUID Formats RFC Draft (Peabody) Rev 02](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-02) +* [New UUID Formats RFC Draft (Peabody) Rev 03](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go index 38bf685029e..4550bc6b346 100644 --- a/vendor/github.com/gofrs/uuid/generator.go +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -26,7 +26,6 @@ import ( "crypto/rand" "crypto/sha1" "encoding/binary" - "errors" "fmt" "hash" "io" @@ -71,7 +70,7 @@ func NewV5(ns UUID, name string) UUID { // pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit // order being adjusted to allow the UUID to be k-sortable. // -// This is implemented based on revision 02 of the Peabody UUID draft, and may +// This is implemented based on revision 03 of the Peabody UUID draft, and may // be subject to change pending further revisions. Until the final specification // revision is finished, changes required to implement updates to the spec will // not be considered a breaking change. They will happen as a minor version @@ -80,22 +79,16 @@ func NewV6() (UUID, error) { return DefaultGenerator.NewV6() } -// NewV7 returns a k-sortable UUID based on the current UNIX epoch, with the -// ability to configure the timestamp's precision from millisecond all the way -// to nanosecond. The additional precision is supported by reducing the amount -// of pseudorandom data that makes up the rest of the UUID. +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. // -// If an unknown Precision argument is passed to this method it will panic. As -// such it's strongly encouraged to use the package-provided constants for this -// value. -// -// This is implemented based on revision 02 of the Peabody UUID draft, and may +// This is implemented based on revision 03 of the Peabody UUID draft, and may // be subject to change pending further revisions. Until the final specification // revision is finished, changes required to implement updates to the spec will // not be considered a breaking change. They will happen as a minor version // releases until the spec is final. -func NewV7(p Precision) (UUID, error) { - return DefaultGenerator.NewV7(p) +func NewV7() (UUID, error) { + return DefaultGenerator.NewV7() } // Generator provides an interface for generating UUIDs. @@ -105,7 +98,7 @@ type Generator interface { NewV4() (UUID, error) NewV5(ns UUID, name string) UUID NewV6() (UUID, error) - NewV7(Precision) (UUID, error) + NewV7() (UUID, error) } // Gen is a reference UUID generator based on the specifications laid out in @@ -131,10 +124,6 @@ type Gen struct { lastTime uint64 clockSequence uint16 hardwareAddr [6]byte - - v7LastTime uint64 - v7LastSubsec uint64 - v7ClockSequence uint16 } // interface check -- build will fail if *Gen doesn't satisfy Generator @@ -224,7 +213,7 @@ func (g *Gen) NewV5(ns UUID, name string) UUID { // pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit // order being adjusted to allow the UUID to be k-sortable. // -// This is implemented based on revision 02 of the Peabody UUID draft, and may +// This is implemented based on revision 03 of the Peabody UUID draft, and may // be subject to change pending further revisions. Until the final specification // revision is finished, changes required to implement updates to the spec will // not be considered a breaking change. They will happen as a minor version @@ -280,244 +269,36 @@ func (g *Gen) getClockSequence() (uint64, uint16, error) { return timeNow, g.clockSequence, nil } -// Precision is used to configure the V7 generator, to specify how precise the -// timestamp within the UUID should be. -type Precision byte - -const ( - NanosecondPrecision Precision = iota - MicrosecondPrecision - MillisecondPrecision -) - -func (p Precision) String() string { - switch p { - case NanosecondPrecision: - return "nanosecond" - - case MicrosecondPrecision: - return "microsecond" - - case MillisecondPrecision: - return "millisecond" - - default: - return "unknown" - } -} - -// Duration returns the time.Duration for a specific precision. If the Precision -// value is not known, this returns 0. -func (p Precision) Duration() time.Duration { - switch p { - case NanosecondPrecision: - return time.Nanosecond - - case MicrosecondPrecision: - return time.Microsecond - - case MillisecondPrecision: - return time.Millisecond - - default: - return 0 - } -} - -// NewV7 returns a k-sortable UUID based on the current UNIX epoch, with the -// ability to configure the timestamp's precision from millisecond all the way -// to nanosecond. The additional precision is supported by reducing the amount -// of pseudorandom data that makes up the rest of the UUID. +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. // -// If an unknown Precision argument is passed to this method it will panic. As -// such it's strongly encouraged to use the package-provided constants for this -// value. -// -// This is implemented based on revision 02 of the Peabody UUID draft, and may +// This is implemented based on revision 03 of the Peabody UUID draft, and may // be subject to change pending further revisions. Until the final specification // revision is finished, changes required to implement updates to the spec will // not be considered a breaking change. They will happen as a minor version // releases until the spec is final. -func (g *Gen) NewV7(p Precision) (UUID, error) { +func (g *Gen) NewV7() (UUID, error) { var u UUID - var err error - - switch p { - case NanosecondPrecision: - u, err = g.newV7Nano() - - case MicrosecondPrecision: - u, err = g.newV7Micro() - - case MillisecondPrecision: - u, err = g.newV7Milli() - default: - panic(fmt.Sprintf("unknown precision value %d", p)) - } - - if err != nil { + if _, err := io.ReadFull(g.rand, u[6:]); err != nil { return Nil, err } + tn := g.epochFunc() + ms := uint64(tn.Unix())*1e3 + uint64(tn.Nanosecond())/1e6 + u[0] = byte(ms >> 40) + u[1] = byte(ms >> 32) + u[2] = byte(ms >> 24) + u[3] = byte(ms >> 16) + u[4] = byte(ms >> 8) + u[5] = byte(ms) + u.SetVersion(V7) u.SetVariant(VariantRFC4122) return u, nil } -func (g *Gen) newV7Milli() (UUID, error) { - var u UUID - - if _, err := io.ReadFull(g.rand, u[8:]); err != nil { - return Nil, err - } - - sec, nano, seq, err := g.getV7ClockSequence(MillisecondPrecision) - if err != nil { - return Nil, err - } - - msec := (nano / 1000000) & 0xfff - - d := (sec << 28) // set unixts field - d |= (msec << 16) // set msec field - d |= (uint64(seq) & 0xfff) // set seq field - - binary.BigEndian.PutUint64(u[:], d) - - return u, nil -} - -func (g *Gen) newV7Micro() (UUID, error) { - var u UUID - - if _, err := io.ReadFull(g.rand, u[10:]); err != nil { - return Nil, err - } - - sec, nano, seq, err := g.getV7ClockSequence(MicrosecondPrecision) - if err != nil { - return Nil, err - } - - usec := nano / 1000 - usech := (usec << 4) & 0xfff0000 - usecl := usec & 0xfff - - d := (sec << 28) // set unixts field - d |= usech | usecl // set usec fields - - binary.BigEndian.PutUint64(u[:], d) - binary.BigEndian.PutUint16(u[8:], seq) - - return u, nil -} - -func (g *Gen) newV7Nano() (UUID, error) { - var u UUID - - if _, err := io.ReadFull(g.rand, u[11:]); err != nil { - return Nil, err - } - - sec, nano, seq, err := g.getV7ClockSequence(NanosecondPrecision) - if err != nil { - return Nil, err - } - - nano &= 0x3fffffffff - nanoh := nano >> 26 - nanom := (nano >> 14) & 0xfff - nanol := uint16(nano & 0x3fff) - - d := (sec << 28) // set unixts field - d |= (nanoh << 16) | nanom // set nsec high and med fields - - binary.BigEndian.PutUint64(u[:], d) - binary.BigEndian.PutUint16(u[8:], nanol) // set nsec low field - - u[10] = byte(seq) // set seq field - - return u, nil -} - -const ( - maxSeq14 = (1 << 14) - 1 - maxSeq12 = (1 << 12) - 1 - maxSeq8 = (1 << 8) - 1 -) - -// getV7ClockSequence returns the unix epoch, nanoseconds of current second, and -// the sequence for V7 UUIDs. -func (g *Gen) getV7ClockSequence(p Precision) (epoch uint64, nano uint64, seq uint16, err error) { - g.storageMutex.Lock() - defer g.storageMutex.Unlock() - - tn := g.epochFunc() - unix := uint64(tn.Unix()) - nsec := uint64(tn.Nanosecond()) - - // V7 UUIDs have more precise requirements around how the clock sequence - // value is generated and used. Specifically they require that the sequence - // be zero, unless we've already generated a UUID within this unit of time - // (millisecond, microsecond, or nanosecond) at which point you should - // increment the sequence. Likewise if time has warped backwards for some reason (NTP - // adjustment?), we also increment the clock sequence to reduce the risk of a - // collision. - switch { - case unix < g.v7LastTime: - g.v7ClockSequence++ - - case unix > g.v7LastTime: - g.v7ClockSequence = 0 - - case unix == g.v7LastTime: - switch p { - case NanosecondPrecision: - if nsec <= g.v7LastSubsec { - if g.v7ClockSequence >= maxSeq8 { - return 0, 0, 0, errors.New("generating nanosecond precision UUIDv7s too fast: internal clock sequence would roll over") - } - - g.v7ClockSequence++ - } else { - g.v7ClockSequence = 0 - } - - case MicrosecondPrecision: - if nsec/1000 <= g.v7LastSubsec/1000 { - if g.v7ClockSequence >= maxSeq14 { - return 0, 0, 0, errors.New("generating microsecond precision UUIDv7s too fast: internal clock sequence would roll over") - } - - g.v7ClockSequence++ - } else { - g.v7ClockSequence = 0 - } - - case MillisecondPrecision: - if nsec/1000000 <= g.v7LastSubsec/1000000 { - if g.v7ClockSequence >= maxSeq12 { - return 0, 0, 0, errors.New("generating millisecond precision UUIDv7s too fast: internal clock sequence would roll over") - } - - g.v7ClockSequence++ - } else { - g.v7ClockSequence = 0 - } - - default: - panic(fmt.Sprintf("unknown precision value %d", p)) - } - } - - g.v7LastTime = unix - g.v7LastSubsec = nsec - - return unix, nsec, g.v7ClockSequence, nil -} - // Returns the hardware address. func (g *Gen) getHardwareAddr() ([]byte, error) { var err error @@ -558,9 +339,11 @@ func newFromHash(h hash.Hash, ns UUID, name string) UUID { return u } +var netInterfaces = net.Interfaces + // Returns the hardware address. func defaultHWAddrFunc() (net.HardwareAddr, error) { - ifaces, err := net.Interfaces() + ifaces, err := netInterfaces() if err != nil { return []byte{}, err } diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go index f314b845736..e747e54125a 100644 --- a/vendor/github.com/gofrs/uuid/uuid.go +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -20,7 +20,7 @@ // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // Package uuid provides implementations of the Universally Unique Identifier -// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 02). +// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 03). // // RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The // Peabody UUID RFC Draft[2] provides the specification for the new k-sortable @@ -36,7 +36,7 @@ // ensure we were understanding the specification correctly. // // [1] https://tools.ietf.org/html/rfc4122 -// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-02 +// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03 // [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 package uuid @@ -63,8 +63,8 @@ const ( V3 // Version 3 (namespace name-based) V4 // Version 4 (random) V5 // Version 5 (namespace name-based) - V6 // Version 6 (k-sortable timestamp and random data) [peabody draft] - V7 // Version 7 (k-sortable timestamp, with configurable precision, and random data) [peabody draft] + V6 // Version 6 (k-sortable timestamp and random data, field-compatible with v1) [peabody draft] + V7 // Version 7 (k-sortable timestamp and random data) [peabody draft] _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented] ) @@ -116,7 +116,7 @@ func TimestampFromV1(u UUID) (Timestamp, error) { // TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This // function returns an error if the UUID is any version other than 6. // -// This is implemented based on revision 01 of the Peabody UUID draft, and may +// This is implemented based on revision 03 of the Peabody UUID draft, and may // be subject to change pending further revisions. Until the final specification // revision is finished, changes required to implement updates to the spec will // not be considered a breaking change. They will happen as a minor version diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 96aa271e54f..c8a1beb8a8c 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -184,12 +184,13 @@ var profileDecoder = []decoder{ // repeated Location location = 4 func(b *buffer, m message) error { x := new(Location) - x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + x.Line = b.tmpLines[:0] // Use shared space temporarily pp := m.(*Profile) pp.Location = append(pp.Location, x) err := decodeMessage(b, x) - var tmp []Line - x.Line = append(tmp, x.Line...) // Shrink to allocated size + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) return err }, // repeated Function function = 5 @@ -307,41 +308,52 @@ func (p *Profile) postDecode() error { st.Unit, err = getString(p.stringTable, &st.unitX, err) } + // Pre-allocate space for all locations. + numLocations := 0 for _, s := range p.Sample { - labels := make(map[string][]string, len(s.labelX)) - numLabels := make(map[string][]int64, len(s.labelX)) - numUnits := make(map[string][]string, len(s.labelX)) - for _, l := range s.labelX { - var key, value string - key, err = getString(p.stringTable, &l.keyX, err) - if l.strX != 0 { - value, err = getString(p.stringTable, &l.strX, err) - labels[key] = append(labels[key], value) - } else if l.numX != 0 || l.unitX != 0 { - numValues := numLabels[key] - units := numUnits[key] - if l.unitX != 0 { - var unit string - unit, err = getString(p.stringTable, &l.unitX, err) - units = padStringArray(units, len(numValues)) - numUnits[key] = append(units, unit) + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) } - numLabels[key] = append(numLabels[key], l.numX) } - } - if len(labels) > 0 { - s.Label = labels - } - if len(numLabels) > 0 { - s.NumLabel = numLabels - for key, units := range numUnits { - if len(units) > 0 { - numUnits[key] = padStringArray(units, len(numLabels[key])) + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } } + s.NumUnit = numUnits } - s.NumUnit = numUnits } - s.Location = make([]*Location, len(s.locationIDX)) + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] for i, lid := range s.locationIDX { if lid < uint64(len(locationIds)) { s.Location[i] = locationIds[lid] diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go index ea8e66c68d2..c794b939067 100644 --- a/vendor/github.com/google/pprof/profile/filter.go +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -22,6 +22,10 @@ import "regexp" // samples where at least one frame matches focus but none match ignore. // Returns true is the corresponding regexp matched at least one sample. func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } focusOrIgnore := make(map[uint64]bool) hidden := make(map[uint64]bool) for _, l := range p.Location { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 6fcd11de19a..90c5723d78f 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -15,6 +15,7 @@ package profile import ( + "encoding/binary" "fmt" "sort" "strconv" @@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) { for _, src := range srcs { // Clear the profile-specific hash tables - pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.locationsByID = makeLocationIDMap(len(src.Location)) pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) @@ -136,7 +137,7 @@ type profileMerger struct { p *Profile // Memoization tables within a profile. - locationsByID map[uint64]*Location + locationsByID locationIDMap functionsByID map[uint64]*Function mappingsByID map[uint64]mapInfo @@ -153,6 +154,16 @@ type mapInfo struct { } func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. s := &Sample{ Location: make([]*Location, len(src.Location)), Value: make([]int64, len(src.Value)), @@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { s.NumLabel[k] = vv s.NumUnit[k] = uu } - // Check memoization table. Must be done on the remapped location to - // account for the remapped mapping. Add current values to the - // existing sample. - k := s.key() - if ss, ok := pm.samples[k]; ok { - for i, v := range src.Value { - ss.Value[i] += v - } - return ss - } copy(s.Value, src.Value) pm.samples[k] = s pm.p.Sample = append(pm.p.Sample, s) return s } -// key generates sampleKey to be used as a key for maps. -func (sample *Sample) key() sampleKey { - ids := make([]string, len(sample.Location)) - for i, l := range sample.Location { - ids[i] = strconv.FormatUint(l.ID, 16) +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) } - labels := make([]string, 0, len(sample.Label)) - for k, v := range sample.Label { - labels = append(labels, fmt.Sprintf("%q%q", k, v)) + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) } - sort.Strings(labels) - numlabels := make([]string, 0, len(sample.NumLabel)) - for k, v := range sample.NumLabel { - numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } } - sort.Strings(numlabels) - return sampleKey{ - strings.Join(ids, "|"), - strings.Join(labels, ""), - strings.Join(numlabels, ""), + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } -type sampleKey struct { - locations string - labels string - numlabels string +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } func (pm *profileMerger) mapLocation(src *Location) *Location { @@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { return nil } - if l, ok := pm.locationsByID[src.ID]; ok { + if l := pm.locationsByID.get(src.ID); l != nil { return l } @@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { // account for the remapped mapping ID. k := l.key() if ll, ok := pm.locations[k]; ok { - pm.locationsByID[src.ID] = ll + pm.locationsByID.set(src.ID, ll) return ll } - pm.locationsByID[src.ID] = l + pm.locationsByID.set(src.ID, l) pm.locations[k] = l pm.p.Location = append(pm.p.Location, l) return l @@ -480,3 +537,32 @@ func (p *Profile) compatible(pb *Profile) error { func equalValueType(st1, st2 *ValueType) bool { return st1.Type == st2.Type && st1.Unit == st2.Unit } + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go index 539ad3ab33f..a15696ba16f 100644 --- a/vendor/github.com/google/pprof/profile/proto.go +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -39,11 +39,12 @@ import ( ) type buffer struct { - field int // field tag - typ int // proto wire type code for field - u64 uint64 - data []byte - tmp [16]byte + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". } type decoder func(*buffer, message) error @@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding data := b.data - tmp := make([]int64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, int64(u)) + *x = append(*x, int64(u)) } - *x = append(*x, tmp...) return nil } var i int64 @@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { data := b.data // Packed encoding - tmp := make([]uint64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, u) + *x = append(*x, u) } - *x = append(*x, tmp...) return nil } var u uint64 diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index 02d21a81846..b2f9fd54660 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { prune := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool) + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + for _, loc := range p.Location { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - funcName := simplifyFunc(fn.Name) - if dropRx.MatchString(funcName) { - if keepRx == nil || !keepRx.MatchString(funcName) { - break - } + if pruneFromHere(fn.Name) { + break } } } diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index 81f54d5ef2c..0ba9da7d6bf 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -1,13 +1,15 @@ // Copyright 2022 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // -// Client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. package client import ( "crypto" + "crypto/ecdsa" "crypto/rsa" "crypto/x509" "encoding/gob" @@ -67,15 +69,17 @@ func (k *Key) CertificateChain() [][]byte { // Close closes the RPC connection and kills the signer subprocess. // Call this to free up resources when the Key object is no longer needed. func (k *Key) Close() error { - if err := k.client.Close(); err != nil { - return fmt.Errorf("failed to close RPC connection: %w", err) - } if err := k.cmd.Process.Kill(); err != nil { return fmt.Errorf("failed to kill signer process: %w", err) } if err := k.cmd.Wait(); err.Error() != "signal: killed" { return fmt.Errorf("signer process was not killed: %w", err) } + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) + } return nil } @@ -84,8 +88,11 @@ func (k *Key) Public() crypto.PublicKey { return k.publicKey } -// Sign signs a message by encrypting a message digest, using the specified signer options. +// Sign signs a message digest, using the specified signer options. func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) return } @@ -147,5 +154,15 @@ func Cred(configFilePath string) (*Key, error) { return nil, fmt.Errorf("invalid public key type: %T", publicKey) } + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + return k, nil } diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index 6b5f2806e6b..ccef5278a30 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -11,7 +11,7 @@ import ( "runtime" ) -const configFileName = "enterprise_certificate_config.json" +const configFileName = "certificate_config.json" // EnterpriseCertificateConfig contains parameters for initializing signer. type EnterpriseCertificateConfig struct { @@ -20,7 +20,7 @@ type EnterpriseCertificateConfig struct { // Libs specifies the locations of helper libraries. type Libs struct { - SignerBinary string `json:"signer_binary"` + ECP string `json:"ecp"` } // LoadSignerBinaryPath retrieves the path of the signer binary from the config file. @@ -39,9 +39,9 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { if err != nil { return "", err } - signerBinaryPath := config.Libs.SignerBinary + signerBinaryPath := config.Libs.ECP if signerBinaryPath == "" { - return "", errors.New("Signer binary path is missing.") + return "", errors.New("signer binary path is missing") } return signerBinaryPath, nil } @@ -61,9 +61,8 @@ func guessHomeDir() string { func getDefaultConfigFileDirectory() (directory string) { if runtime.GOOS == "windows" { return filepath.Join(os.Getenv("APPDATA"), "gcloud") - } else { - return filepath.Join(guessHomeDir(), ".config/gcloud") } + return filepath.Join(guessHomeDir(), ".config/gcloud") } // GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 129685e3769..7ea75bfdc37 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.5.1" + "v2": "2.6.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 2a05e44572a..fb8463a2cb1 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + ## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 00000000000..1b53d0a3ac1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index a46ae25b0ee..065312e827c 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.5.1" +const Version = "2.6.0" diff --git a/vendor/github.com/hashicorp/golang-lru/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/.golangci.yml new file mode 100644 index 00000000000..49202fc41e6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - megacheck + - revive + - govet + - unconvert + - megacheck + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + fast: false + disable-all: true + +issues: + exclude-rules: + - path: _test\.go + linters: + - dupl + exclude-use-default: false diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go index e474cd07581..15fcad0306e 100644 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -44,7 +44,7 @@ func New2Q(size int) (*TwoQueueCache, error) { // New2QParams creates a new TwoQueueCache using the provided // parameter values. -func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { +func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } @@ -138,7 +138,6 @@ func (c *TwoQueueCache) Add(key, value interface{}) { // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) - return } // ensureSpace is used to ensure we have space in the cache diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE index be2cc4dfb60..0e5d580e0e9 100644 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2014 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md index 33e58cfaf97..063bb16056e 100644 --- a/vendor/github.com/hashicorp/golang-lru/README.md +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -7,7 +7,7 @@ thread safe LRU cache. It is based on the cache in Groupcache. Documentation ============= -Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) +Full docs are available on [Godoc](https://pkg.go.dev/github.com/hashicorp/golang-lru) Example ======= diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go index 555225a218c..e396f8428aa 100644 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -173,7 +173,6 @@ func (c *ARCCache) Add(key, value interface{}) { // Add to the recently seen list c.t1.Add(key, value) - return } // replace is used to adaptively evict from either T1 or T2 diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index 4e5e9d8fd08..895d8e3ea0c 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -6,10 +6,17 @@ import ( "github.com/hashicorp/golang-lru/simplelru" ) +const ( + // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val + DefaultEvictedBufferSize = 16 +) + // Cache is a thread-safe fixed size LRU cache. type Cache struct { - lru simplelru.LRUCache - lock sync.RWMutex + lru *simplelru.LRU + evictedKeys, evictedVals []interface{} + onEvictedCB func(k, v interface{}) + lock sync.RWMutex } // New creates an LRU of the given size. @@ -19,30 +26,63 @@ func New(size int) (*Cache, error) { // NewWithEvict constructs a fixed size cache with the given eviction // callback. -func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { - lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) - if err != nil { - return nil, err +func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, err error) { + // create a cache with default settings + c = &Cache{ + onEvictedCB: onEvicted, } - c := &Cache{ - lru: lru, + if onEvicted != nil { + c.initEvictBuffers() + onEvicted = c.onEvicted } - return c, nil + c.lru, err = simplelru.NewLRU(size, onEvicted) + return +} + +func (c *Cache) initEvictBuffers() { + c.evictedKeys = make([]interface{}, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]interface{}, 0, DefaultEvictedBufferSize) +} + +// onEvicted save evicted key/val and sent in externally registered callback +// outside of critical section +func (c *Cache) onEvicted(k, v interface{}) { + c.evictedKeys = append(c.evictedKeys, k) + c.evictedVals = append(c.evictedVals, v) } // Purge is used to completely clear the cache. func (c *Cache) Purge() { + var ks, vs []interface{} c.lock.Lock() c.lru.Purge() + if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } c.lock.Unlock() + // invoke callback outside of critical section + if c.onEvictedCB != nil { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { + var k, v interface{} c.lock.Lock() evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } c.lock.Unlock() - return evicted + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return } // Get looks up a key's value from the cache. @@ -75,13 +115,21 @@ func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + var k, v interface{} c.lock.Lock() - defer c.lock.Unlock() - if c.lru.Contains(key) { + c.lock.Unlock() return true, false } evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } return false, evicted } @@ -89,47 +137,80 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { + var k, v interface{} c.lock.Lock() - defer c.lock.Unlock() - previous, ok = c.lru.Peek(key) if ok { + c.lock.Unlock() return previous, true, false } - evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } return nil, false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) (present bool) { + var k, v interface{} c.lock.Lock() present = c.lru.Remove(key) + if c.onEvictedCB != nil && present { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } c.lock.Unlock() + if c.onEvictedCB != nil && present { + c.onEvictedCB(k, v) + } return } // Resize changes the cache size. func (c *Cache) Resize(size int) (evicted int) { + var ks, vs []interface{} c.lock.Lock() evicted = c.lru.Resize(size) + if c.onEvictedCB != nil && evicted > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } c.lock.Unlock() + if c.onEvictedCB != nil && evicted > 0 { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } return evicted } // RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) { +func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { + var k, v interface{} c.lock.Lock() key, value, ok = c.lru.RemoveOldest() + if c.onEvictedCB != nil && ok { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } c.lock.Unlock() + if c.onEvictedCB != nil && ok { + c.onEvictedCB(k, v) + } return } // GetOldest returns the oldest entry -func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) { - c.lock.Lock() +func (c *Cache) GetOldest() (key, value interface{}, ok bool) { + c.lock.RLock() key, value, ok = c.lru.GetOldest() - c.lock.Unlock() + c.lock.RUnlock() return } diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index a86c8539e06..9233583c91c 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -25,7 +25,7 @@ type entry struct { // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { - return nil, errors.New("Must provide a positive size") + return nil, errors.New("must provide a positive size") } c := &LRU{ size: size, @@ -109,7 +109,7 @@ func (c *LRU) Remove(key interface{}) (present bool) { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { +func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) @@ -120,7 +120,7 @@ func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { +func (c *LRU) GetOldest() (key, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index 92d70934d63..cb7f8caf03d 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -1,3 +1,4 @@ +// Package simplelru provides simple LRU implementation based on build-in container/list. package simplelru // LRUCache is the interface for simple LRU cache. @@ -34,6 +35,6 @@ type LRUCache interface { // Clears all cache entries. Purge() - // Resizes cache, returning number evicted - Resize(int) int + // Resizes cache, returning number evicted + Resize(int) int } diff --git a/vendor/github.com/hashicorp/golang-lru/testing.go b/vendor/github.com/hashicorp/golang-lru/testing.go new file mode 100644 index 00000000000..492760782c5 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/testing.go @@ -0,0 +1,16 @@ +package lru + +import ( + "crypto/rand" + "math" + "math/big" + "testing" +) + +func getRand(tb testing.TB) int64 { + out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + tb.Fatal(err) + } + return out.Int64() +} diff --git a/vendor/github.com/prometheus/alertmanager/api/api.go b/vendor/github.com/prometheus/alertmanager/api/api.go index f50ebee1c57..59bfb76da66 100644 --- a/vendor/github.com/prometheus/alertmanager/api/api.go +++ b/vendor/github.com/prometheus/alertmanager/api/api.go @@ -20,6 +20,11 @@ import ( "runtime" "time" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/route" + apiv1 "github.com/prometheus/alertmanager/api/v1" apiv2 "github.com/prometheus/alertmanager/api/v2" "github.com/prometheus/alertmanager/cluster" @@ -28,11 +33,6 @@ import ( "github.com/prometheus/alertmanager/provider" "github.com/prometheus/alertmanager/silence" "github.com/prometheus/alertmanager/types" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/common/route" - - "github.com/go-kit/log" ) // API represents all APIs of Alertmanager. @@ -128,7 +128,6 @@ func New(opts Options) (*API, error) { log.With(l, "version", "v2"), opts.Registry, ) - if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v1/api.go b/vendor/github.com/prometheus/alertmanager/api/v1/api.go index 29789867fdd..39018a7589c 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v1/api.go +++ b/vendor/github.com/prometheus/alertmanager/api/v1/api.go @@ -171,7 +171,7 @@ func (api *API) receivers(w http.ResponseWriter, req *http.Request) { func (api *API) status(w http.ResponseWriter, req *http.Request) { api.mtx.RLock() - var status = struct { + status := struct { ConfigYAML string `json:"configYAML"` ConfigJSON *config.Config `json:"configJSON"` VersionInfo map[string]string `json:"versionInfo"` @@ -618,13 +618,13 @@ func (api *API) listSilences(w http.ResponseWriter, r *http.Request) { } } - sort.Slice(active, func(i int, j int) bool { + sort.Slice(active, func(i, j int) bool { return active[i].EndsAt.Before(active[j].EndsAt) }) - sort.Slice(pending, func(i int, j int) bool { + sort.Slice(pending, func(i, j int) bool { return pending[i].StartsAt.Before(pending[j].EndsAt) }) - sort.Slice(expired, func(i int, j int) bool { + sort.Slice(expired, func(i, j int) bool { return expired[i].EndsAt.After(expired[j].EndsAt) }) diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/api.go b/vendor/github.com/prometheus/alertmanager/api/v2/api.go index da4797dc08e..b272322c74d 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/api.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/api.go @@ -23,6 +23,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/go-openapi/analysis" "github.com/go-openapi/loads" "github.com/go-openapi/runtime/middleware" "github.com/go-openapi/strfmt" @@ -73,9 +74,11 @@ type API struct { Handler http.Handler } -type groupsFn func(func(*dispatch.Route) bool, func(*types.Alert, time.Time) bool) (dispatch.AlertGroups, map[prometheus_model.Fingerprint][]string) -type getAlertStatusFn func(prometheus_model.Fingerprint) types.AlertStatus -type setAlertStatusFn func(prometheus_model.LabelSet) +type ( + groupsFn func(func(*dispatch.Route) bool, func(*types.Alert, time.Time) bool) (dispatch.AlertGroups, map[prometheus_model.Fingerprint][]string) + getAlertStatusFn func(prometheus_model.Fingerprint) types.AlertStatus + setAlertStatusFn func(prometheus_model.LabelSet) +) // NewAPI returns a new Alertmanager API v2 func NewAPI( @@ -99,9 +102,9 @@ func NewAPI( } // Load embedded swagger file. - swaggerSpec, err := loads.Analyzed(restapi.SwaggerJSON, "") + swaggerSpec, swaggerSpecAnalysis, err := getSwaggerSpec() if err != nil { - return nil, fmt.Errorf("failed to load embedded swagger file: %v", err.Error()) + return nil, err } // Create new service API. @@ -111,7 +114,9 @@ func NewAPI( // the API itself via RoutesHandler. See: // https://github.com/go-swagger/go-swagger/issues/1779 openAPI.Middleware = func(b middleware.Builder) http.Handler { - return middleware.Spec("", swaggerSpec.Raw(), openAPI.Context().RoutesHandler(b)) + // Manually create the context so that we can use the singleton swaggerSpecAnalysis. + swaggerContext := middleware.NewRoutableContextWithAnalyzedSpec(swaggerSpec, swaggerSpecAnalysis, openAPI, nil) + return middleware.Spec("", swaggerSpec.Raw(), swaggerContext.RoutesHandler(b)) } openAPI.AlertGetAlertsHandler = alert_ops.GetAlertsHandlerFunc(api.getAlertsHandler) @@ -225,14 +230,14 @@ func (api *API) getAlertsHandler(params alert_ops.GetAlertsParams) middleware.Re matchers, err := parseFilter(params.Filter) if err != nil { - level.Error(logger).Log("msg", "Failed to parse matchers", "err", err) + level.Debug(logger).Log("msg", "Failed to parse matchers", "err", err) return alertgroup_ops.NewGetAlertGroupsBadRequest().WithPayload(err.Error()) } if params.Receiver != nil { receiverFilter, err = regexp.Compile("^(?:" + *params.Receiver + ")$") if err != nil { - level.Error(logger).Log("msg", "Failed to compile receiver regex", "err", err) + level.Debug(logger).Log("msg", "Failed to compile receiver regex", "err", err) return alert_ops. NewGetAlertsBadRequest(). WithPayload( @@ -354,7 +359,7 @@ func (api *API) getAlertGroupsHandler(params alertgroup_ops.GetAlertGroupsParams matchers, err := parseFilter(params.Filter) if err != nil { - level.Error(logger).Log("msg", "Failed to parse matchers", "err", err) + level.Debug(logger).Log("msg", "Failed to parse matchers", "err", err) return alertgroup_ops.NewGetAlertGroupsBadRequest().WithPayload(err.Error()) } @@ -492,7 +497,7 @@ func (api *API) getSilencesHandler(params silence_ops.GetSilencesParams) middlew for _, matcherString := range params.Filter { matcher, err := labels.ParseMatcher(matcherString) if err != nil { - level.Error(logger).Log("msg", "Failed to parse matchers", "err", err) + level.Debug(logger).Log("msg", "Failed to parse matchers", "err", err) return alert_ops.NewGetAlertsBadRequest().WithPayload(err.Error()) } @@ -524,13 +529,11 @@ func (api *API) getSilencesHandler(params silence_ops.GetSilencesParams) middlew return silence_ops.NewGetSilencesOK().WithPayload(sils) } -var ( - silenceStateOrder = map[types.SilenceState]int{ - types.SilenceStateActive: 1, - types.SilenceStatePending: 2, - types.SilenceStateExpired: 3, - } -) +var silenceStateOrder = map[types.SilenceState]int{ + types.SilenceStateActive: 1, + types.SilenceStatePending: 2, + types.SilenceStateExpired: 3, +} // SortSilences sorts first according to the state "active, pending, expired" // then by end time or start time depending on the state. @@ -672,3 +675,33 @@ func parseFilter(filter []string) ([]*labels.Matcher, error) { } return matchers, nil } + +var ( + swaggerSpecCacheMx sync.Mutex + swaggerSpecCache *loads.Document + swaggerSpecAnalysisCache *analysis.Spec +) + +// getSwaggerSpec loads and caches the swagger spec. If a cached version already exists, +// it returns the cached one. The reason why we cache it is because some downstream projects +// (e.g. Grafana Mimir) creates many Alertmanager instances in the same process, so they would +// incur in a significant memory penalty if we would reload the swagger spec each time. +func getSwaggerSpec() (*loads.Document, *analysis.Spec, error) { + swaggerSpecCacheMx.Lock() + defer swaggerSpecCacheMx.Unlock() + + // Check if a cached version exists. + if swaggerSpecCache != nil { + return swaggerSpecCache, swaggerSpecAnalysisCache, nil + } + + // Load embedded swagger file. + swaggerSpec, err := loads.Analyzed(restapi.SwaggerJSON, "") + if err != nil { + return nil, nil, fmt.Errorf("failed to load embedded swagger file: %w", err) + } + + swaggerSpecCache = swaggerSpec + swaggerSpecAnalysisCache = analysis.New(swaggerSpec.Spec()) + return swaggerSpec, swaggerSpecAnalysisCache, nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/compat.go b/vendor/github.com/prometheus/alertmanager/api/v2/compat.go index 6b948c2f065..112bfc4f2f9 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/compat.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/compat.go @@ -18,10 +18,11 @@ import ( "time" "github.com/go-openapi/strfmt" + prometheus_model "github.com/prometheus/common/model" + open_api_models "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/alertmanager/silence/silencepb" "github.com/prometheus/alertmanager/types" - prometheus_model "github.com/prometheus/common/model" ) // GettableSilenceFromProto converts *silencepb.Silence to open_api_models.GettableSilence. diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go index 17030754466..a3f30c53c6c 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -59,7 +61,6 @@ func (m *Alert) Validate(formats strfmt.Registry) error { } func (m *Alert) validateGeneratorURL(formats strfmt.Registry) error { - if swag.IsZero(m.GeneratorURL) { // not required return nil } @@ -73,9 +74,45 @@ func (m *Alert) validateGeneratorURL(formats strfmt.Registry) error { func (m *Alert) validateLabels(formats strfmt.Registry) error { - if err := m.Labels.Validate(formats); err != nil { + if err := validate.Required("labels", "body", m.Labels); err != nil { + return err + } + + if m.Labels != nil { + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + } + + return nil +} + +// ContextValidate validate this alert based on the context it is used +func (m *Alert) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Alert) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go index 3db729359dd..c943e683303 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "strconv" "github.com/go-openapi/errors" @@ -83,6 +84,8 @@ func (m *AlertGroup) validateAlerts(formats strfmt.Registry) error { if err := m.Alerts[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("alerts" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("alerts" + "." + strconv.Itoa(i)) } return err } @@ -95,13 +98,21 @@ func (m *AlertGroup) validateAlerts(formats strfmt.Registry) error { func (m *AlertGroup) validateLabels(formats strfmt.Registry) error { - if err := m.Labels.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("labels") - } + if err := validate.Required("labels", "body", m.Labels); err != nil { return err } + if m.Labels != nil { + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + } + return nil } @@ -115,6 +126,80 @@ func (m *AlertGroup) validateReceiver(formats strfmt.Registry) error { if err := m.Receiver.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("receiver") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("receiver") + } + return err + } + } + + return nil +} + +// ContextValidate validate this alert group based on the context it is used +func (m *AlertGroup) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAlerts(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateReceiver(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlertGroup) contextValidateAlerts(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Alerts); i++ { + + if m.Alerts[i] != nil { + if err := m.Alerts[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("alerts" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("alerts" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *AlertGroup) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + + return nil +} + +func (m *AlertGroup) contextValidateReceiver(ctx context.Context, formats strfmt.Registry) error { + + if m.Receiver != nil { + if err := m.Receiver.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("receiver") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("receiver") } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go index cb48c08e5f5..31ccb2172b9 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "strconv" "github.com/go-openapi/errors" @@ -45,6 +46,33 @@ func (m AlertGroups) Validate(formats strfmt.Registry) error { if err := m[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this alert groups based on the context it is used +func (m AlertGroups) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go index 9ee99f7851b..ae1f248c996 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "encoding/json" "github.com/go-openapi/errors" @@ -133,6 +134,11 @@ func (m *AlertStatus) validateState(formats strfmt.Registry) error { return nil } +// ContextValidate validates this alert status based on context it is used +func (m *AlertStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (m *AlertStatus) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go index 958114bbf8c..bf7721eb55e 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -59,6 +61,11 @@ func (m *AlertmanagerConfig) validateOriginal(formats strfmt.Registry) error { return nil } +// ContextValidate validates this alertmanager config based on context it is used +func (m *AlertmanagerConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (m *AlertmanagerConfig) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go index 483beb23e11..0d5370edfb3 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -85,6 +87,8 @@ func (m *AlertmanagerStatus) validateCluster(formats strfmt.Registry) error { if err := m.Cluster.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cluster") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("cluster") } return err } @@ -103,6 +107,8 @@ func (m *AlertmanagerStatus) validateConfig(formats strfmt.Registry) error { if err := m.Config.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("config") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("config") } return err } @@ -134,6 +140,78 @@ func (m *AlertmanagerStatus) validateVersionInfo(formats strfmt.Registry) error if err := m.VersionInfo.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("versionInfo") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("versionInfo") + } + return err + } + } + + return nil +} + +// ContextValidate validate this alertmanager status based on the context it is used +func (m *AlertmanagerStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateCluster(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateVersionInfo(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlertmanagerStatus) contextValidateCluster(ctx context.Context, formats strfmt.Registry) error { + + if m.Cluster != nil { + if err := m.Cluster.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("cluster") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("cluster") + } + return err + } + } + + return nil +} + +func (m *AlertmanagerStatus) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + if m.Config != nil { + if err := m.Config.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("config") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("config") + } + return err + } + } + + return nil +} + +func (m *AlertmanagerStatus) contextValidateVersionInfo(ctx context.Context, formats strfmt.Registry) error { + + if m.VersionInfo != nil { + if err := m.VersionInfo.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("versionInfo") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("versionInfo") } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go index a3373a729d4..0078320f15c 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "encoding/json" "strconv" @@ -65,7 +66,6 @@ func (m *ClusterStatus) Validate(formats strfmt.Registry) error { } func (m *ClusterStatus) validatePeers(formats strfmt.Registry) error { - if swag.IsZero(m.Peers) { // not required return nil } @@ -79,6 +79,8 @@ func (m *ClusterStatus) validatePeers(formats strfmt.Registry) error { if err := m.Peers[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("peers" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("peers" + "." + strconv.Itoa(i)) } return err } @@ -135,6 +137,40 @@ func (m *ClusterStatus) validateStatus(formats strfmt.Registry) error { return nil } +// ContextValidate validate this cluster status based on the context it is used +func (m *ClusterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePeers(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ClusterStatus) contextValidatePeers(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Peers); i++ { + + if m.Peers[i] != nil { + if err := m.Peers[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("peers" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("peers" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + // MarshalBinary interface implementation func (m *ClusterStatus) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go index 2f74818c27e..f7db3321c1f 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "strconv" "github.com/go-openapi/errors" @@ -206,13 +207,21 @@ func (m *GettableAlert) Validate(formats strfmt.Registry) error { func (m *GettableAlert) validateAnnotations(formats strfmt.Registry) error { - if err := m.Annotations.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("annotations") - } + if err := validate.Required("annotations", "body", m.Annotations); err != nil { return err } + if m.Annotations != nil { + if err := m.Annotations.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("annotations") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("annotations") + } + return err + } + } + return nil } @@ -253,6 +262,8 @@ func (m *GettableAlert) validateReceivers(formats strfmt.Registry) error { if err := m.Receivers[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("receivers" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("receivers" + "." + strconv.Itoa(i)) } return err } @@ -286,6 +297,8 @@ func (m *GettableAlert) validateStatus(formats strfmt.Registry) error { if err := m.Status.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("status") } return err } @@ -307,6 +320,83 @@ func (m *GettableAlert) validateUpdatedAt(formats strfmt.Registry) error { return nil } +// ContextValidate validate this gettable alert based on the context it is used +func (m *GettableAlert) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAnnotations(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateReceivers(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateStatus(ctx, formats); err != nil { + res = append(res, err) + } + + // validation for a type composition with Alert + if err := m.Alert.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GettableAlert) contextValidateAnnotations(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Annotations.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("annotations") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("annotations") + } + return err + } + + return nil +} + +func (m *GettableAlert) contextValidateReceivers(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Receivers); i++ { + + if m.Receivers[i] != nil { + if err := m.Receivers[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("receivers" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("receivers" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *GettableAlert) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { + + if m.Status != nil { + if err := m.Status.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("status") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("status") + } + return err + } + } + + return nil +} + // MarshalBinary interface implementation func (m *GettableAlert) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go index f5a5c04215e..4efe8cd5ec8 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "strconv" "github.com/go-openapi/errors" @@ -45,6 +46,33 @@ func (m GettableAlerts) Validate(formats strfmt.Registry) error { if err := m[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this gettable alerts based on the context it is used +func (m GettableAlerts) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go index 8fb7a5129ce..fe9d178d7f7 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -155,6 +157,8 @@ func (m *GettableSilence) validateStatus(formats strfmt.Registry) error { if err := m.Status.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("status") } return err } @@ -176,6 +180,41 @@ func (m *GettableSilence) validateUpdatedAt(formats strfmt.Registry) error { return nil } +// ContextValidate validate this gettable silence based on the context it is used +func (m *GettableSilence) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateStatus(ctx, formats); err != nil { + res = append(res, err) + } + + // validation for a type composition with Silence + if err := m.Silence.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GettableSilence) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { + + if m.Status != nil { + if err := m.Status.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("status") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("status") + } + return err + } + } + + return nil +} + // MarshalBinary interface implementation func (m *GettableSilence) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go index 32d109ef7e2..cda5ef6497a 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "strconv" "github.com/go-openapi/errors" @@ -45,6 +46,33 @@ func (m GettableSilences) Validate(formats strfmt.Registry) error { if err := m[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this gettable silences based on the context it is used +func (m GettableSilences) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go index d7d2985238d..9dcae13b8bb 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/strfmt" ) @@ -32,3 +34,8 @@ type LabelSet map[string]string func (m LabelSet) Validate(formats strfmt.Registry) error { return nil } + +// ContextValidate validates this label set based on context it is used +func (m LabelSet) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go index f2e2d6de8c8..0d30e7ea94d 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -96,6 +98,11 @@ func (m *Matcher) validateValue(formats strfmt.Registry) error { return nil } +// ContextValidate validates this matcher based on context it is used +func (m *Matcher) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (m *Matcher) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go index 3fb73c43429..4e2061872e4 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "strconv" "github.com/go-openapi/errors" @@ -52,6 +53,33 @@ func (m Matchers) Validate(formats strfmt.Registry) error { if err := m[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this matchers based on the context it is used +func (m Matchers) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go index 204c3d7857f..9d9b393aea5 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -76,6 +78,11 @@ func (m *PeerStatus) validateName(formats strfmt.Registry) error { return nil } +// ContextValidate validates this peer status based on context it is used +func (m *PeerStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (m *PeerStatus) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go index 88c06e83522..dcec7f0a19e 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -140,11 +142,15 @@ func (m *PostableAlert) validateAnnotations(formats strfmt.Registry) error { return nil } - if err := m.Annotations.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("annotations") + if m.Annotations != nil { + if err := m.Annotations.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("annotations") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("annotations") + } + return err } - return err } return nil @@ -176,6 +182,39 @@ func (m *PostableAlert) validateStartsAt(formats strfmt.Registry) error { return nil } +// ContextValidate validate this postable alert based on the context it is used +func (m *PostableAlert) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAnnotations(ctx, formats); err != nil { + res = append(res, err) + } + + // validation for a type composition with Alert + if err := m.Alert.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PostableAlert) contextValidateAnnotations(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Annotations.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("annotations") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("annotations") + } + return err + } + + return nil +} + // MarshalBinary interface implementation func (m *PostableAlert) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go index 9a135636821..ed4d7fb9bab 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "strconv" "github.com/go-openapi/errors" @@ -45,6 +46,33 @@ func (m PostableAlerts) Validate(formats strfmt.Registry) error { if err := m[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this postable alerts based on the context it is used +func (m PostableAlerts) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) } return err } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go index c77a9534a80..88aaa54fd85 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -97,6 +99,21 @@ func (m *PostableSilence) Validate(formats strfmt.Registry) error { return nil } +// ContextValidate validate this postable silence based on the context it is used +func (m *PostableSilence) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with Silence + if err := m.Silence.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + // MarshalBinary interface implementation func (m *PostableSilence) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go index 9f85db60a11..8e1bf9ee457 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -59,6 +61,11 @@ func (m *Receiver) validateName(formats strfmt.Registry) error { return nil } +// ContextValidate validates this receiver based on context it is used +func (m *Receiver) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (m *Receiver) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go index 27fb9f3d153..b58913b5c06 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -124,6 +126,8 @@ func (m *Silence) validateMatchers(formats strfmt.Registry) error { if err := m.Matchers.Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("matchers") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("matchers") } return err } @@ -144,6 +148,34 @@ func (m *Silence) validateStartsAt(formats strfmt.Registry) error { return nil } +// ContextValidate validate this silence based on the context it is used +func (m *Silence) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMatchers(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Silence) contextValidateMatchers(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Matchers.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("matchers") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("matchers") + } + return err + } + + return nil +} + // MarshalBinary interface implementation func (m *Silence) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go index 0c63df85331..25b073795ca 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go @@ -20,6 +20,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" "encoding/json" "github.com/go-openapi/errors" @@ -99,6 +100,11 @@ func (m *SilenceStatus) validateState(formats strfmt.Registry) error { return nil } +// ContextValidate validates this silence status based on context it is used +func (m *SilenceStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (m *SilenceStatus) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go index f7124eca8e7..18b77584d7c 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go @@ -20,6 +20,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -144,6 +146,11 @@ func (m *VersionInfo) validateVersion(formats strfmt.Registry) error { return nil } +// ContextValidate validates this version info based on context it is used +func (m *VersionInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (m *VersionInfo) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/configure_alertmanager.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/configure_alertmanager.go index 2765053b6a0..69dfe66bf98 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/configure_alertmanager.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/configure_alertmanager.go @@ -32,7 +32,7 @@ import ( "github.com/prometheus/alertmanager/api/v2/restapi/operations/silence" ) -//go:generate swagger generate server --target ../../v2 --name Alertmanager --spec ../openapi.yaml --exclude-main +//go:generate swagger generate server --target ../../v2 --name Alertmanager --spec ../openapi.yaml --principal interface{} --exclude-main func configureFlags(api *operations.AlertmanagerAPI) { // api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... } @@ -48,6 +48,10 @@ func configureAPI(api *operations.AlertmanagerAPI) http.Handler { // Example: // api.Logger = log.Printf + api.UseSwaggerUI() + // To continue using redoc as your UI, uncomment the following line + // api.UseRedoc() + api.JSONConsumer = runtime.JSONConsumer() api.JSONProducer = runtime.JSONProducer() @@ -113,18 +117,18 @@ func configureTLS(tlsConfig *tls.Config) { // As soon as server is initialized but not run yet, this function will be called. // If you need to modify a config, store server instance to stop it individually later, this is the place. // This function can be called multiple times, depending on the number of serving schemes. -// scheme value will be set accordingly: "http", "https" or "unix" +// scheme value will be set accordingly: "http", "https" or "unix". func configureServer(s *http.Server, scheme, addr string) { } // The middleware configuration is for the handler executors. These do not apply to the swagger.json document. -// The middleware executes after routing but before authentication, binding and validation +// The middleware executes after routing but before authentication, binding and validation. func setupMiddlewares(handler http.Handler) http.Handler { return handler } // The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document. -// So this is a good place to plug in a panic handling middleware, logging and metrics +// So this is a good place to plug in a panic handling middleware, logging and metrics. func setupGlobalMiddleware(handler http.Handler) http.Handler { return handler } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/doc.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/doc.go index d8ee687c48b..ea249db26c3 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/doc.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/doc.go @@ -15,19 +15,19 @@ // Package restapi Alertmanager API // -// API of the Prometheus Alertmanager (https://github.com/prometheus/alertmanager) -// Schemes: -// http -// Host: localhost -// BasePath: /api/v2/ -// Version: 0.0.1 -// License: Apache 2.0 http://www.apache.org/licenses/LICENSE-2.0.html +// API of the Prometheus Alertmanager (https://github.com/prometheus/alertmanager) +// Schemes: +// http +// Host: localhost +// BasePath: /api/v2/ +// Version: 0.0.1 +// License: Apache 2.0 http://www.apache.org/licenses/LICENSE-2.0.html // -// Consumes: -// - application/json +// Consumes: +// - application/json // -// Produces: -// - application/json +// Produces: +// - application/json // // swagger:meta package restapi diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts.go index 27caf27b111..c1daf2f9d9d 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts.go @@ -43,10 +43,10 @@ func NewGetAlerts(ctx *middleware.Context, handler GetAlertsHandler) *GetAlerts return &GetAlerts{Context: ctx, Handler: handler} } -/*GetAlerts swagger:route GET /alerts alert getAlerts +/* + GetAlerts swagger:route GET /alerts alert getAlerts Get a list of alerts - */ type GetAlerts struct { Context *middleware.Context @@ -56,17 +56,15 @@ type GetAlerts struct { func (o *GetAlerts) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewGetAlertsParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_parameters.go index 66a63b1c124..9f5a915c6fa 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_parameters.go @@ -135,7 +135,6 @@ func (o *GetAlertsParams) BindRequest(r *http.Request, route *middleware.Matched if err := o.bindUnprocessed(qUnprocessed, qhkUnprocessed, route.Formats); err != nil { res = append(res, err) } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -151,6 +150,7 @@ func (o *GetAlertsParams) bindActive(rawData []string, hasKey bool, formats strf // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations // Default values have been previously initialized by NewGetAlertsParams() return nil @@ -169,10 +169,8 @@ func (o *GetAlertsParams) bindActive(rawData []string, hasKey bool, formats strf // // Arrays are parsed according to CollectionFormat: "multi" (defaults to "csv" when empty). func (o *GetAlertsParams) bindFilter(rawData []string, hasKey bool, formats strfmt.Registry) error { - // CollectionFormat: multi filterIC := rawData - if len(filterIC) == 0 { return nil } @@ -198,6 +196,7 @@ func (o *GetAlertsParams) bindInhibited(rawData []string, hasKey bool, formats s // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations // Default values have been previously initialized by NewGetAlertsParams() return nil @@ -221,10 +220,10 @@ func (o *GetAlertsParams) bindReceiver(rawData []string, hasKey bool, formats st // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations return nil } - o.Receiver = &raw return nil @@ -239,6 +238,7 @@ func (o *GetAlertsParams) bindSilenced(rawData []string, hasKey bool, formats st // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations // Default values have been previously initialized by NewGetAlertsParams() return nil @@ -262,6 +262,7 @@ func (o *GetAlertsParams) bindUnprocessed(rawData []string, hasKey bool, formats // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations // Default values have been previously initialized by NewGetAlertsParams() return nil diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_responses.go index 8c4f4b8f55d..1a4d19415c1 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_responses.go @@ -30,7 +30,8 @@ import ( // GetAlertsOKCode is the HTTP code returned for type GetAlertsOK const GetAlertsOKCode int = 200 -/*GetAlertsOK Get alerts response +/* +GetAlertsOK Get alerts response swagger:response getAlertsOK */ @@ -77,7 +78,8 @@ func (o *GetAlertsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Pro // GetAlertsBadRequestCode is the HTTP code returned for type GetAlertsBadRequest const GetAlertsBadRequestCode int = 400 -/*GetAlertsBadRequest Bad request +/* +GetAlertsBadRequest Bad request swagger:response getAlertsBadRequest */ @@ -119,7 +121,8 @@ func (o *GetAlertsBadRequest) WriteResponse(rw http.ResponseWriter, producer run // GetAlertsInternalServerErrorCode is the HTTP code returned for type GetAlertsInternalServerError const GetAlertsInternalServerErrorCode int = 500 -/*GetAlertsInternalServerError Internal server error +/* +GetAlertsInternalServerError Internal server error swagger:response getAlertsInternalServerError */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts.go index 858db94bbca..d4a16643764 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts.go @@ -43,10 +43,10 @@ func NewPostAlerts(ctx *middleware.Context, handler PostAlertsHandler) *PostAler return &PostAlerts{Context: ctx, Handler: handler} } -/*PostAlerts swagger:route POST /alerts alert postAlerts +/* + PostAlerts swagger:route POST /alerts alert postAlerts Create new Alerts - */ type PostAlerts struct { Context *middleware.Context @@ -56,17 +56,15 @@ type PostAlerts struct { func (o *PostAlerts) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewPostAlertsParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_parameters.go index 1602b5830f8..ed202f19cb5 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_parameters.go @@ -26,12 +26,14 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/runtime" "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" "github.com/prometheus/alertmanager/api/v2/models" ) // NewPostAlertsParams creates a new PostAlertsParams object -// no default values defined in spec. +// +// There are no default values defined in the spec. func NewPostAlertsParams() PostAlertsParams { return PostAlertsParams{} @@ -77,6 +79,11 @@ func (o *PostAlertsParams) BindRequest(r *http.Request, route *middleware.Matche res = append(res, err) } + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + if len(res) == 0 { o.Alerts = body } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_responses.go index 7e273c31067..87302a98009 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_responses.go @@ -28,7 +28,8 @@ import ( // PostAlertsOKCode is the HTTP code returned for type PostAlertsOK const PostAlertsOKCode int = 200 -/*PostAlertsOK Create alerts response +/* +PostAlertsOK Create alerts response swagger:response postAlertsOK */ @@ -52,7 +53,8 @@ func (o *PostAlertsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Pr // PostAlertsBadRequestCode is the HTTP code returned for type PostAlertsBadRequest const PostAlertsBadRequestCode int = 400 -/*PostAlertsBadRequest Bad request +/* +PostAlertsBadRequest Bad request swagger:response postAlertsBadRequest */ @@ -94,7 +96,8 @@ func (o *PostAlertsBadRequest) WriteResponse(rw http.ResponseWriter, producer ru // PostAlertsInternalServerErrorCode is the HTTP code returned for type PostAlertsInternalServerError const PostAlertsInternalServerErrorCode int = 500 -/*PostAlertsInternalServerError Internal server error +/* +PostAlertsInternalServerError Internal server error swagger:response postAlertsInternalServerError */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups.go index 3ea3975d53c..29579e8eef1 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups.go @@ -43,10 +43,10 @@ func NewGetAlertGroups(ctx *middleware.Context, handler GetAlertGroupsHandler) * return &GetAlertGroups{Context: ctx, Handler: handler} } -/*GetAlertGroups swagger:route GET /alerts/groups alertgroup getAlertGroups +/* + GetAlertGroups swagger:route GET /alerts/groups alertgroup getAlertGroups Get a list of alert groups - */ type GetAlertGroups struct { Context *middleware.Context @@ -56,17 +56,15 @@ type GetAlertGroups struct { func (o *GetAlertGroups) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewGetAlertGroupsParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_parameters.go index b4e656bdb05..0f5712dc426 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_parameters.go @@ -122,7 +122,6 @@ func (o *GetAlertGroupsParams) BindRequest(r *http.Request, route *middleware.Ma if err := o.bindSilenced(qSilenced, qhkSilenced, route.Formats); err != nil { res = append(res, err) } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -138,6 +137,7 @@ func (o *GetAlertGroupsParams) bindActive(rawData []string, hasKey bool, formats // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations // Default values have been previously initialized by NewGetAlertGroupsParams() return nil @@ -156,10 +156,8 @@ func (o *GetAlertGroupsParams) bindActive(rawData []string, hasKey bool, formats // // Arrays are parsed according to CollectionFormat: "multi" (defaults to "csv" when empty). func (o *GetAlertGroupsParams) bindFilter(rawData []string, hasKey bool, formats strfmt.Registry) error { - // CollectionFormat: multi filterIC := rawData - if len(filterIC) == 0 { return nil } @@ -185,6 +183,7 @@ func (o *GetAlertGroupsParams) bindInhibited(rawData []string, hasKey bool, form // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations // Default values have been previously initialized by NewGetAlertGroupsParams() return nil @@ -208,10 +207,10 @@ func (o *GetAlertGroupsParams) bindReceiver(rawData []string, hasKey bool, forma // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations return nil } - o.Receiver = &raw return nil @@ -226,6 +225,7 @@ func (o *GetAlertGroupsParams) bindSilenced(rawData []string, hasKey bool, forma // Required: false // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations // Default values have been previously initialized by NewGetAlertGroupsParams() return nil diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_responses.go index bb0d90faeea..3b9227aaaac 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_responses.go @@ -30,7 +30,8 @@ import ( // GetAlertGroupsOKCode is the HTTP code returned for type GetAlertGroupsOK const GetAlertGroupsOKCode int = 200 -/*GetAlertGroupsOK Get alert groups response +/* +GetAlertGroupsOK Get alert groups response swagger:response getAlertGroupsOK */ @@ -77,7 +78,8 @@ func (o *GetAlertGroupsOK) WriteResponse(rw http.ResponseWriter, producer runtim // GetAlertGroupsBadRequestCode is the HTTP code returned for type GetAlertGroupsBadRequest const GetAlertGroupsBadRequestCode int = 400 -/*GetAlertGroupsBadRequest Bad request +/* +GetAlertGroupsBadRequest Bad request swagger:response getAlertGroupsBadRequest */ @@ -119,7 +121,8 @@ func (o *GetAlertGroupsBadRequest) WriteResponse(rw http.ResponseWriter, produce // GetAlertGroupsInternalServerErrorCode is the HTTP code returned for type GetAlertGroupsInternalServerError const GetAlertGroupsInternalServerErrorCode int = 500 -/*GetAlertGroupsInternalServerError Internal server error +/* +GetAlertGroupsInternalServerError Internal server error swagger:response getAlertGroupsInternalServerError */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertmanager_api.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertmanager_api.go index fcc6431c882..8cbd9a6efb0 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertmanager_api.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertmanager_api.go @@ -52,6 +52,7 @@ func NewAlertmanagerAPI(spec *loads.Document) *AlertmanagerAPI { PreServerShutdown: func() {}, ServerShutdown: func() {}, spec: spec, + useSwaggerUI: false, ServeError: errors.ServeError, BasicAuthenticator: security.BasicAuth, APIKeyAuthenticator: security.APIKeyAuth, @@ -102,13 +103,16 @@ type AlertmanagerAPI struct { defaultConsumes string defaultProduces string Middleware func(middleware.Builder) http.Handler + useSwaggerUI bool // BasicAuthenticator generates a runtime.Authenticator from the supplied basic auth function. // It has a default implementation in the security package, however you can replace it for your particular usage. BasicAuthenticator func(security.UserPassAuthentication) runtime.Authenticator + // APIKeyAuthenticator generates a runtime.Authenticator from the supplied token auth function. // It has a default implementation in the security package, however you can replace it for your particular usage. APIKeyAuthenticator func(string, string, security.TokenAuthentication) runtime.Authenticator + // BearerAuthenticator generates a runtime.Authenticator from the supplied bearer token auth function. // It has a default implementation in the security package, however you can replace it for your particular usage. BearerAuthenticator func(string, security.ScopedTokenAuthentication) runtime.Authenticator @@ -139,6 +143,7 @@ type AlertmanagerAPI struct { AlertPostAlertsHandler alert.PostAlertsHandler // SilencePostSilencesHandler sets the operation handler for the post silences operation SilencePostSilencesHandler silence.PostSilencesHandler + // ServeError is called when an error is received, there is a default handler // but you can set your own with this ServeError func(http.ResponseWriter, *http.Request, error) @@ -158,6 +163,16 @@ type AlertmanagerAPI struct { Logger func(string, ...interface{}) } +// UseRedoc for documentation at /docs +func (o *AlertmanagerAPI) UseRedoc() { + o.useSwaggerUI = false +} + +// UseSwaggerUI for documentation at /docs +func (o *AlertmanagerAPI) UseSwaggerUI() { + o.useSwaggerUI = true +} + // SetDefaultProduces sets the default produces media type func (o *AlertmanagerAPI) SetDefaultProduces(mediaType string) { o.defaultProduces = mediaType @@ -366,6 +381,9 @@ func (o *AlertmanagerAPI) Serve(builder middleware.Builder) http.Handler { if o.Middleware != nil { return o.Middleware(builder) } + if o.useSwaggerUI { + return o.context.APIHandlerSwaggerUI(builder) + } return o.context.APIHandler(builder) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status.go index 8b6a6f5c7cb..01a89753147 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status.go @@ -43,10 +43,10 @@ func NewGetStatus(ctx *middleware.Context, handler GetStatusHandler) *GetStatus return &GetStatus{Context: ctx, Handler: handler} } -/*GetStatus swagger:route GET /status general getStatus +/* + GetStatus swagger:route GET /status general getStatus Get current status of an Alertmanager instance and its cluster - */ type GetStatus struct { Context *middleware.Context @@ -56,17 +56,15 @@ type GetStatus struct { func (o *GetStatus) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewGetStatusParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_parameters.go index 3a7d16c7680..2108b285953 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_parameters.go @@ -27,7 +27,8 @@ import ( ) // NewGetStatusParams creates a new GetStatusParams object -// no default values defined in spec. +// +// There are no default values defined in the spec. func NewGetStatusParams() GetStatusParams { return GetStatusParams{} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_responses.go index 60d9e620757..ddfc323df6d 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_responses.go @@ -30,7 +30,8 @@ import ( // GetStatusOKCode is the HTTP code returned for type GetStatusOK const GetStatusOKCode int = 200 -/*GetStatusOK Get status response +/* +GetStatusOK Get status response swagger:response getStatusOK */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers.go index 40197940a24..d1ef5699653 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers.go @@ -43,10 +43,10 @@ func NewGetReceivers(ctx *middleware.Context, handler GetReceiversHandler) *GetR return &GetReceivers{Context: ctx, Handler: handler} } -/*GetReceivers swagger:route GET /receivers receiver getReceivers +/* + GetReceivers swagger:route GET /receivers receiver getReceivers Get list of all receivers (name of notification integrations) - */ type GetReceivers struct { Context *middleware.Context @@ -56,17 +56,15 @@ type GetReceivers struct { func (o *GetReceivers) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewGetReceiversParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_parameters.go index ef1ca653ffc..059a10c5b2b 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_parameters.go @@ -27,7 +27,8 @@ import ( ) // NewGetReceiversParams creates a new GetReceiversParams object -// no default values defined in spec. +// +// There are no default values defined in the spec. func NewGetReceiversParams() GetReceiversParams { return GetReceiversParams{} diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_responses.go index ee0e6f2c311..0727afc9fbe 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_responses.go @@ -30,7 +30,8 @@ import ( // GetReceiversOKCode is the HTTP code returned for type GetReceiversOK const GetReceiversOKCode int = 200 -/*GetReceiversOK Get receivers response +/* +GetReceiversOK Get receivers response swagger:response getReceiversOK */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence.go index d0a227484a1..704e5292c51 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence.go @@ -43,10 +43,10 @@ func NewDeleteSilence(ctx *middleware.Context, handler DeleteSilenceHandler) *De return &DeleteSilence{Context: ctx, Handler: handler} } -/*DeleteSilence swagger:route DELETE /silence/{silenceID} silence deleteSilence +/* + DeleteSilence swagger:route DELETE /silence/{silenceID} silence deleteSilence Delete a silence by its ID - */ type DeleteSilence struct { Context *middleware.Context @@ -56,17 +56,15 @@ type DeleteSilence struct { func (o *DeleteSilence) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewDeleteSilenceParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_parameters.go index 7f8c87250b0..da9d4f4f7bd 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_parameters.go @@ -29,7 +29,8 @@ import ( ) // NewDeleteSilenceParams creates a new DeleteSilenceParams object -// no default values defined in spec. +// +// There are no default values defined in the spec. func NewDeleteSilenceParams() DeleteSilenceParams { return DeleteSilenceParams{} @@ -64,7 +65,6 @@ func (o *DeleteSilenceParams) BindRequest(r *http.Request, route *middleware.Mat if err := o.bindSilenceID(rSilenceID, rhkSilenceID, route.Formats); err != nil { res = append(res, err) } - if len(res) > 0 { return errors.CompositeValidationError(res...) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_responses.go index de87e29a3b8..0878e35dd39 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_responses.go @@ -28,7 +28,8 @@ import ( // DeleteSilenceOKCode is the HTTP code returned for type DeleteSilenceOK const DeleteSilenceOKCode int = 200 -/*DeleteSilenceOK Delete silence response +/* +DeleteSilenceOK Delete silence response swagger:response deleteSilenceOK */ @@ -52,7 +53,8 @@ func (o *DeleteSilenceOK) WriteResponse(rw http.ResponseWriter, producer runtime // DeleteSilenceInternalServerErrorCode is the HTTP code returned for type DeleteSilenceInternalServerError const DeleteSilenceInternalServerErrorCode int = 500 -/*DeleteSilenceInternalServerError Internal server error +/* +DeleteSilenceInternalServerError Internal server error swagger:response deleteSilenceInternalServerError */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence.go index afaf6dd5f40..bbbed0579e4 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence.go @@ -43,10 +43,10 @@ func NewGetSilence(ctx *middleware.Context, handler GetSilenceHandler) *GetSilen return &GetSilence{Context: ctx, Handler: handler} } -/*GetSilence swagger:route GET /silence/{silenceID} silence getSilence +/* + GetSilence swagger:route GET /silence/{silenceID} silence getSilence Get a silence by its ID - */ type GetSilence struct { Context *middleware.Context @@ -56,17 +56,15 @@ type GetSilence struct { func (o *GetSilence) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewGetSilenceParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_parameters.go index 931dc4fad4d..b3b0b737586 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_parameters.go @@ -29,7 +29,8 @@ import ( ) // NewGetSilenceParams creates a new GetSilenceParams object -// no default values defined in spec. +// +// There are no default values defined in the spec. func NewGetSilenceParams() GetSilenceParams { return GetSilenceParams{} @@ -64,7 +65,6 @@ func (o *GetSilenceParams) BindRequest(r *http.Request, route *middleware.Matche if err := o.bindSilenceID(rSilenceID, rhkSilenceID, route.Formats); err != nil { res = append(res, err) } - if len(res) > 0 { return errors.CompositeValidationError(res...) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_responses.go index 5ece7ad73c1..51255fe83cd 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_responses.go @@ -30,7 +30,8 @@ import ( // GetSilenceOKCode is the HTTP code returned for type GetSilenceOK const GetSilenceOKCode int = 200 -/*GetSilenceOK Get silence response +/* +GetSilenceOK Get silence response swagger:response getSilenceOK */ @@ -74,7 +75,8 @@ func (o *GetSilenceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Pr // GetSilenceNotFoundCode is the HTTP code returned for type GetSilenceNotFound const GetSilenceNotFoundCode int = 404 -/*GetSilenceNotFound A silence with the specified ID was not found +/* +GetSilenceNotFound A silence with the specified ID was not found swagger:response getSilenceNotFound */ @@ -98,7 +100,8 @@ func (o *GetSilenceNotFound) WriteResponse(rw http.ResponseWriter, producer runt // GetSilenceInternalServerErrorCode is the HTTP code returned for type GetSilenceInternalServerError const GetSilenceInternalServerErrorCode int = 500 -/*GetSilenceInternalServerError Internal server error +/* +GetSilenceInternalServerError Internal server error swagger:response getSilenceInternalServerError */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences.go index 0346a421b86..cee92e28c85 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences.go @@ -43,10 +43,10 @@ func NewGetSilences(ctx *middleware.Context, handler GetSilencesHandler) *GetSil return &GetSilences{Context: ctx, Handler: handler} } -/*GetSilences swagger:route GET /silences silence getSilences +/* + GetSilences swagger:route GET /silences silence getSilences Get a list of silences - */ type GetSilences struct { Context *middleware.Context @@ -56,17 +56,15 @@ type GetSilences struct { func (o *GetSilences) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewGetSilencesParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_parameters.go index a2ee5cf25d7..0724540b846 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_parameters.go @@ -29,7 +29,8 @@ import ( ) // NewGetSilencesParams creates a new GetSilencesParams object -// no default values defined in spec. +// +// There are no default values defined in the spec. func NewGetSilencesParams() GetSilencesParams { return GetSilencesParams{} @@ -66,7 +67,6 @@ func (o *GetSilencesParams) BindRequest(r *http.Request, route *middleware.Match if err := o.bindFilter(qFilter, qhkFilter, route.Formats); err != nil { res = append(res, err) } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -77,10 +77,8 @@ func (o *GetSilencesParams) BindRequest(r *http.Request, route *middleware.Match // // Arrays are parsed according to CollectionFormat: "multi" (defaults to "csv" when empty). func (o *GetSilencesParams) bindFilter(rawData []string, hasKey bool, formats strfmt.Registry) error { - // CollectionFormat: multi filterIC := rawData - if len(filterIC) == 0 { return nil } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_responses.go index 314edf7b216..f2f7b88f746 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_responses.go @@ -30,7 +30,8 @@ import ( // GetSilencesOKCode is the HTTP code returned for type GetSilencesOK const GetSilencesOKCode int = 200 -/*GetSilencesOK Get silences response +/* +GetSilencesOK Get silences response swagger:response getSilencesOK */ @@ -77,7 +78,8 @@ func (o *GetSilencesOK) WriteResponse(rw http.ResponseWriter, producer runtime.P // GetSilencesInternalServerErrorCode is the HTTP code returned for type GetSilencesInternalServerError const GetSilencesInternalServerErrorCode int = 500 -/*GetSilencesInternalServerError Internal server error +/* +GetSilencesInternalServerError Internal server error swagger:response getSilencesInternalServerError */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences.go index 98cc0576d45..476aebdc4b6 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences.go @@ -20,6 +20,7 @@ package silence // Editing this file might prove futile when you re-run the generate command import ( + "context" "net/http" "github.com/go-openapi/runtime/middleware" @@ -45,10 +46,10 @@ func NewPostSilences(ctx *middleware.Context, handler PostSilencesHandler) *Post return &PostSilences{Context: ctx, Handler: handler} } -/*PostSilences swagger:route POST /silences silence postSilences +/* + PostSilences swagger:route POST /silences silence postSilences Post a new silence or update an existing one - */ type PostSilences struct { Context *middleware.Context @@ -58,17 +59,15 @@ type PostSilences struct { func (o *PostSilences) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { - r = rCtx + *r = *rCtx } var Params = NewPostSilencesParams() - if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request - o.Context.Respond(rw, r, route.Produces, route, res) } @@ -87,6 +86,11 @@ func (o *PostSilencesOKBody) Validate(formats strfmt.Registry) error { return nil } +// ContextValidate validates this post silences o k body based on context it is used +func (o *PostSilencesOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + // MarshalBinary interface implementation func (o *PostSilencesOKBody) MarshalBinary() ([]byte, error) { if o == nil { diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_parameters.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_parameters.go index 360f27c14b7..62e9c9fa2d2 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_parameters.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_parameters.go @@ -26,12 +26,14 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/runtime" "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" "github.com/prometheus/alertmanager/api/v2/models" ) // NewPostSilencesParams creates a new PostSilencesParams object -// no default values defined in spec. +// +// There are no default values defined in the spec. func NewPostSilencesParams() PostSilencesParams { return PostSilencesParams{} @@ -77,6 +79,11 @@ func (o *PostSilencesParams) BindRequest(r *http.Request, route *middleware.Matc res = append(res, err) } + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + if len(res) == 0 { o.Silence = &body } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_responses.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_responses.go index 7570855d271..a20e89a6ee9 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_responses.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_responses.go @@ -28,7 +28,8 @@ import ( // PostSilencesOKCode is the HTTP code returned for type PostSilencesOK const PostSilencesOKCode int = 200 -/*PostSilencesOK Create / update silence response +/* +PostSilencesOK Create / update silence response swagger:response postSilencesOK */ @@ -72,7 +73,8 @@ func (o *PostSilencesOK) WriteResponse(rw http.ResponseWriter, producer runtime. // PostSilencesBadRequestCode is the HTTP code returned for type PostSilencesBadRequest const PostSilencesBadRequestCode int = 400 -/*PostSilencesBadRequest Bad request +/* +PostSilencesBadRequest Bad request swagger:response postSilencesBadRequest */ @@ -114,7 +116,8 @@ func (o *PostSilencesBadRequest) WriteResponse(rw http.ResponseWriter, producer // PostSilencesNotFoundCode is the HTTP code returned for type PostSilencesNotFound const PostSilencesNotFoundCode int = 404 -/*PostSilencesNotFound A silence with the specified ID was not found +/* +PostSilencesNotFound A silence with the specified ID was not found swagger:response postSilencesNotFound */ diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/server.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/server.go index 3bf056ef70a..0f83b6c459e 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/server.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/server.go @@ -22,7 +22,6 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" "log" "net" "net/http" @@ -288,7 +287,7 @@ func (s *Server) Serve() (err error) { if s.TLSCACertificate != "" { // include specified CA certificate - caCert, caCertErr := ioutil.ReadFile(string(s.TLSCACertificate)) + caCert, caCertErr := os.ReadFile(string(s.TLSCACertificate)) if caCertErr != nil { return caCertErr } @@ -319,9 +318,6 @@ func (s *Server) Serve() (err error) { s.Fatalf("no certificate was configured for TLS") } - // must have at least one certificate or panics - httpsServer.TLSConfig.BuildNameToCertificate() - configureServer(httpsServer, "https", s.httpsServerL.Addr().String()) servers = append(servers, httpsServer) diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/testing.go b/vendor/github.com/prometheus/alertmanager/api/v2/testing.go new file mode 100644 index 00000000000..c813d350d38 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/api/v2/testing.go @@ -0,0 +1,70 @@ +// Copyright 2022 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "encoding/json" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/require" + + open_api_models "github.com/prometheus/alertmanager/api/v2/models" + "github.com/prometheus/alertmanager/pkg/labels" + "github.com/prometheus/alertmanager/silence/silencepb" +) + +func createSilence(t *testing.T, ID, creator string, start, ends time.Time) (open_api_models.PostableSilence, []byte) { + t.Helper() + + comment := "test" + matcherName := "a" + matcherValue := "b" + isRegex := false + startsAt := strfmt.DateTime(start) + endsAt := strfmt.DateTime(ends) + + sil := open_api_models.PostableSilence{ + ID: ID, + Silence: open_api_models.Silence{ + Matchers: open_api_models.Matchers{&open_api_models.Matcher{Name: &matcherName, Value: &matcherValue, IsRegex: &isRegex}}, + StartsAt: &startsAt, + EndsAt: &endsAt, + CreatedBy: &creator, + Comment: &comment, + }, + } + b, err := json.Marshal(&sil) + require.NoError(t, err) + + return sil, b +} + +func createSilenceMatcher(t *testing.T, name, pattern string, matcherType silencepb.Matcher_Type) *silencepb.Matcher { + t.Helper() + + return &silencepb.Matcher{ + Name: name, + Pattern: pattern, + Type: matcherType, + } +} + +func createLabelMatcher(t *testing.T, name, value string, matchType labels.MatchType) *labels.Matcher { + t.Helper() + + matcher, _ := labels.NewMatcher(matchType, name, value) + return matcher +} diff --git a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go index ff1271793aa..9a6df140328 100644 --- a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go +++ b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go @@ -38,9 +38,9 @@ var Assets = func() http.FileSystem { "/static/index.html": &vfsgen۰CompressedFileInfo{ name: "index.html", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 1381, + uncompressedSize: 1654, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x8c\x54\xc1\x6e\xdb\x38\x10\xbd\xe7\x2b\x26\xbc\x58\xc6\x5a\x12\x72\x5b\x78\x45\x03\x41\x12\x2c\x52\xa0\x4d\x81\xe4\xd2\xe3\x44\x1c\x49\x6c\x28\x92\x20\x47\x4e\x8c\x22\xff\x5e\x50\x2a\xe2\xc8\x76\xda\xf2\x62\x71\x38\xf3\xfc\xde\x9b\x21\xab\xf3\xeb\xbb\xab\x87\x6f\x5f\x6f\xa0\xe3\xde\x6c\xce\xaa\xf4\x03\x06\x6d\x2b\x05\x59\xb1\x39\x03\x00\xa8\x3a\x42\x35\x7d\x8e\xdb\x9e\x18\xa1\xee\x30\x44\x62\x29\x06\x6e\xf2\x7f\xc5\xe1\xb1\xc5\x9e\xa4\xd8\x6a\x7a\xf6\x2e\xb0\x80\xda\x59\x26\xcb\x52\x3c\x6b\xc5\x9d\x54\xb4\xd5\x35\xe5\xe3\x66\x05\xda\x6a\xd6\x68\xf2\x58\xa3\x21\x79\xb1\x82\xd8\x05\x6d\x9f\x72\x76\x79\xa3\x59\x5a\xf7\x1e\xde\x68\xfb\x04\x81\x8c\x14\xba\x76\x56\x00\xef\x3c\x49\xa1\x7b\x6c\xa9\x7c\xc9\xa7\x58\x17\xa8\x91\xa2\xc1\x6d\xda\x16\xba\x76\x02\xca\x77\x10\xac\xd9\xd0\xe6\xd2\x50\xe0\x1e\x2d\xb6\x14\xaa\x72\x8a\x4d\x7a\xcb\xbd\xe0\xea\xd1\xa9\xdd\xbb\xd2\x58\x07\xed\x79\x1f\x48\xab\x2c\xe1\xb6\x01\xee\x28\x10\xe8\x08\xd6\x01\x07\xd4\x46\xdb\x16\xa2\xc1\xd8\x01\x72\x3a\x05\xb2\x0a\xdc\x98\x08\x1e\xb9\x03\x6d\xc7\xef\x21\x98\xd5\x21\x1e\x2a\x05\xce\x52\x01\x0f\x9d\x8e\x40\x36\x0e\x81\x22\x60\x8c\xc4\x11\x8c\x7e\x22\x98\x88\x14\xdf\x23\x60\x20\x30\x0e\x15\x29\xf0\xc1\x79\x0a\x66\x37\x83\xd3\x0d\x64\xc6\xd5\xc8\xda\xd9\x22\xfd\x71\xea\x4d\x11\x87\xc7\xc8\x21\xcb\x2f\x96\x70\x2e\x61\x51\x2e\x96\xf0\x63\x56\x96\xd6\x51\x19\xc8\x13\xb1\x7f\x52\xf9\x7f\x47\xc5\xb5\xb3\xd1\x19\x2a\x8c\x6b\xb3\x05\xaa\xc4\x6f\xb4\x63\xb1\x9c\xe7\xbe\xee\xdd\x2d\x0f\xed\xfd\xe5\x37\xc4\x50\x4b\xf1\x26\x59\x6c\x3e\xcc\x9c\x77\x66\x8b\x01\xd0\x7b\x90\x70\x63\xfa\xe2\x33\x6a\x5b\xa4\x59\xcb\x8e\x95\x36\x06\xdb\xb8\x3e\x61\x41\x5a\x3e\x38\x35\xd4\x49\xf4\x1a\x38\x0c\xb4\x3a\x99\xa5\xa8\xc1\xc1\xf0\x55\x20\x64\x17\xd6\xa3\x51\xe6\x9e\x5d\xc0\x96\x8a\x96\xf8\x96\xa9\xcf\x16\xf3\xac\xc5\xf2\x34\x56\x1b\xdc\xe0\x6f\x5e\x3c\x5a\x75\x69\xcc\x1a\x3e\xdd\xdf\x7d\x29\x7c\xba\x71\xd9\x69\xd8\x79\xc1\x62\xb9\x3c\x82\x7d\x9d\x9b\x7e\xd0\x04\xf4\xbe\x48\x37\x35\x16\x9e\x42\xd4\x91\xaf\x67\x3c\xc7\x71\xa9\x83\x7e\xa4\xac\x19\xec\x68\x45\x96\x7a\xff\xd1\xd0\xbc\x11\x8c\x1f\xe8\x5e\x8d\x2f\xc4\xe1\x24\xfc\x89\xd4\xff\x33\x95\xa7\x48\xd1\x78\x48\xea\xef\x89\x1d\x38\xb7\x9a\xbc\x8e\x1c\xb4\x6d\x75\xb3\xdb\x23\xfe\x86\xec\x7c\x1c\xab\x72\x7a\x34\xaa\x72\x7a\x54\x7f\x06\x00\x00\xff\xff\x90\x80\x4f\x52\x65\x05\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x94\x55\xd1\x6e\xdb\x3a\x0c\x7d\xef\x57\xb0\x7e\x71\x82\x9b\xd8\xe8\xdb\x45\x6e\x1c\xa0\x68\x7b\x87\x0e\xd8\x3a\xa0\x05\x86\x3d\xb2\x16\x6d\x6b\x51\x24\x81\xa2\xd3\x06\x43\xff\x7d\x90\x3d\x34\xb5\x93\x6c\x9d\x5e\x62\x51\xe4\xc9\xe1\xe1\xb1\xbc\x3c\xbf\xbe\xbb\x7a\xf8\xf6\xe5\x06\x1a\xd9\x98\xd5\xd9\x32\xfe\x80\x41\x5b\x17\x09\xd9\x64\x75\x06\x00\xb0\x6c\x08\x55\xff\xd8\x6d\x37\x24\x08\x65\x83\x1c\x48\x8a\xa4\x95\x6a\xfe\x6f\x32\x3e\xb6\xb8\xa1\x22\xd9\x6a\x7a\xf2\x8e\x25\x81\xd2\x59\x21\x2b\x45\xf2\xa4\x95\x34\x85\xa2\xad\x2e\x69\xde\x6d\x66\xa0\xad\x16\x8d\x66\x1e\x4a\x34\x54\x5c\xcc\x20\x34\xac\xed\x7a\x2e\x6e\x5e\x69\x29\xac\x7b\x0b\x6f\xb4\x5d\x03\x93\x29\x12\x5d\x3a\x9b\x80\xec\x3c\x15\x89\xde\x60\x4d\xf9\xf3\xbc\x8f\x35\x4c\x55\x91\x54\xb8\x8d\xdb\x4c\x97\x2e\x81\xfc\x0d\x84\x68\x31\xb4\xba\x34\xc4\xb2\x41\x8b\x35\xf1\x32\xef\x63\x7d\xbf\xf9\xbe\xe1\xe5\xa3\x53\xbb\x37\xa5\xa1\x64\xed\x65\x1f\x88\x2b\xcf\xe1\xb6\x02\x69\x88\x09\x74\x00\xeb\x40\x18\xb5\xd1\xb6\x86\x60\x30\x34\x80\x12\x4f\x81\xac\x02\xd7\x25\x82\x47\x69\x40\xdb\xee\xb9\x65\x33\x1b\xe3\xa1\x52\xe0\x2c\x65\xf0\xd0\xe8\x00\x64\x43\xcb\x14\x00\x43\x20\x09\x60\xf4\x9a\xa0\x27\x92\x7d\x0f\x80\x4c\x60\x1c\x2a\x52\xe0\xd9\x79\x62\xb3\x1b\xc0\xe9\x0a\x26\xc6\x95\x28\xda\xd9\x2c\xfe\x71\x9c\x4d\x16\xda\xc7\x20\x3c\x99\x5f\x4c\xe1\xbc\x80\x34\x4f\xa7\xf0\x63\x50\x16\xd7\x41\x19\x14\x47\x62\xff\xc4\xf2\xff\x0e\x8a\x4b\x67\x83\x33\x94\x19\x57\x4f\x52\x54\x91\x5f\x27\x47\x3a\x1d\xe6\xbe\xec\xd5\xcd\xc7\xf2\xfe\xd2\x1b\x02\x97\x45\xf2\xda\x72\xb2\x3a\x99\x39\x9c\xcc\x16\x19\xd0\x7b\x28\xe0\xc6\x6c\xb2\x4f\xa8\x6d\x16\xbd\x36\x39\xec\xb4\x32\x58\x87\xc5\x11\x09\xe2\xf2\xec\x54\x5b\xc6\xa6\x17\x20\xdc\xd2\xec\x68\x56\xa5\x39\xc8\x35\xee\xee\xaa\xaf\x44\xeb\x05\x7c\xbc\xbf\xfb\x9c\xf9\xf8\x96\x74\xf2\x9b\x7b\x71\x8c\x35\x65\x35\xc9\xad\xd0\x66\x92\x0e\x0b\xd2\xe9\xf4\x38\xae\xa2\x0a\x5b\x23\x57\x4c\x28\x8e\x17\x70\x1c\x6c\x98\x95\x9e\xc0\xaa\xd9\xb5\xfe\xe6\xd9\xa3\x55\x97\xc6\xbc\x83\xe3\xb0\x20\x9d\x4e\x0f\x60\x5f\x86\xc3\x1c\x0d\x17\xbd\xcf\xe2\x0d\x10\x32\x4f\x1c\x74\x90\xeb\x01\xcf\xce\x86\x25\xeb\x47\x9a\x54\xad\xed\x24\x9e\x44\x4f\x9d\x32\xe3\x2b\xc1\x70\xa2\xef\x59\x77\xf3\x8c\x1d\xf6\x27\x52\x1f\x06\x5d\x1e\x23\x45\xdd\x21\xa9\xf7\x13\x1b\x29\x37\xeb\xb5\x0e\xc2\xda\xd6\xba\xda\xed\x11\xff\x96\xec\xff\x03\xdb\x1c\x23\x3b\x34\xd6\xfb\x29\x8f\x0c\x79\x40\x79\x84\xfb\x1b\xe2\xc3\xf7\x73\x99\xf7\xb7\xe8\x32\xef\xbf\x32\x3f\x03\x00\x00\xff\xff\xc3\xab\x41\xac\x76\x06\x00\x00"), }, "/static/lib": &vfsgen۰DirInfo{ name: "lib", @@ -152,9 +152,9 @@ var Assets = func() http.FileSystem { "/static/script.js": &vfsgen۰CompressedFileInfo{ name: "script.js", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 108434, + uncompressedSize: 110123, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xdc\xbd\x8b\x76\xdb\xba\x92\x28\xf8\x2b\x32\xdb\xad\x03\x6c\x95\x18\xca\xaf\x24\x94\x71\x34\x4e\xe2\xbc\x76\x62\x27\xb1\xf3\xf4\xf6\xcd\x05\x29\x48\xa2\x2d\x81\x0a\x08\x5a\x76\x2c\xf5\x6f\xcc\x07\xcd\x8f\xcd\x02\xc0\x07\x48\x3d\x92\xdd\x7d\xfa\xde\x3b\x73\xce\x5e\x0e\x45\x16\x0a\x05\xa0\x50\xa8\x2a\x14\x0a\x5b\x83\x94\x87\x32\x8a\x39\xe2\xf8\xde\x49\x13\xd6\x48\xa4\x88\x42\xe9\x74\xf3\x0f\x0d\x81\x38\x08\x90\xf8\x5e\x30\x99\x0a\xde\x90\x2e\x25\x1c\xa4\x3b\x20\x02\xe4\xa2\x00\x1b\xa3\x12\x44\xa0\x1d\x90\x50\xa0\x16\xc5\x07\xbb\xb6\x1c\x1d\x12\xc0\xf1\x62\x81\x4b\x54\x7d\xc4\x2c\x54\xbb\xc0\x4a\x54\x72\x19\xd5\x46\xec\x0c\x49\x30\xf8\xed\x0a\x24\x4a\xad\x0a\xf6\x20\x2d\x2b\x60\xcb\xd8\xfe\x6e\x9d\x29\x62\x50\xd4\x6a\x57\x1b\x22\x6a\x55\xbb\x0f\xb4\xac\x36\x5d\x46\xf8\x2f\xa0\x84\xa2\x14\x6c\x5a\x6c\x62\x18\x0a\x2d\x62\x0e\x20\x2c\x89\xa1\xcb\x38\xff\x7b\xe8\x0b\x11\x85\x1a\x85\x36\x89\x29\x8a\x2d\x12\x1f\x42\x5c\x92\x18\x2e\xa3\xfd\x5f\x46\x75\x8c\x42\x58\xa6\xdb\x26\x9c\xa2\xc8\x22\xfc\x11\x44\x25\xe1\xf1\x32\xe6\xff\x9d\x6d\x89\x50\x0c\x2b\x5b\x63\x37\x27\x46\x03\xab\x39\x8f\x61\x50\x36\x27\x5a\x46\xfe\x7f\x58\x0b\x07\x28\x82\x75\x6d\xb4\x1b\x39\xab\x89\xb9\x1d\x42\x08\x77\x69\x8f\xbb\x03\xa4\xde\xfb\xaa\x1e\x24\xad\x12\x77\xa6\x04\x94\x44\xee\x56\xcb\x00\x2b\x4a\x21\x66\x15\xbc\xcd\x0b\x42\xd9\xe4\xbd\x7a\x51\x48\xed\xc2\x28\xb5\xca\x27\x65\x79\x28\xfb\x71\x7f\x19\x03\xd0\x1a\x0e\x44\x2d\x34\x03\x1b\x0d\x94\x63\x74\xb0\x0a\x11\x84\xcb\xa8\x50\x68\x61\x0b\xaa\xd8\xa0\x64\x83\x87\xab\xf1\x41\xbc\x12\x23\x8a\xf1\xe2\x86\x8a\x46\x44\xfa\xa8\x1c\x4e\x33\x34\x83\x58\x20\xf5\x8d\x91\x23\x21\xe8\x1d\xe2\x18\x52\xe2\x75\xd3\x43\xde\x4d\x5b\x2d\xcc\x2e\xd2\x4b\x22\x91\x68\xa5\xb8\x9b\xcb\xff\x05\x86\x1b\x32\xae\x60\x2a\xf1\xc8\x12\x0f\x23\x5e\x97\x1d\xf2\x66\x53\xb8\x41\x97\xb5\x5a\x58\x5e\xb0\x4b\x22\x5c\x0a\x82\xa8\x57\xc5\xe2\x37\x66\x7c\x28\x47\x84\xc1\xa9\x5a\x5a\xf0\x02\xc3\x36\x41\xf5\x0a\xf2\xc9\x72\xc1\x2f\x17\x18\x36\x35\x24\x47\x08\x69\x46\x0b\xc3\x40\x89\xd7\xa5\x87\xac\x4b\x5b\x2d\x9c\x5e\xd0\x4b\x22\x2f\xe8\x65\x4e\x41\x7a\xc1\x2f\x89\x80\x74\x81\x61\x7d\xb3\x44\x8e\x35\xef\x29\xd9\xea\xe4\x7d\x25\xcb\xbe\x12\x17\x69\x81\x97\x5d\xc8\x4b\xc2\x81\xfd\x3e\xbd\x0a\x19\xd3\xc8\x04\x51\xb3\x47\x5e\xa4\x97\x20\x8a\xae\x17\xbf\x89\xa9\xdd\xe9\x7a\x87\x84\x75\x59\xbb\x5d\x20\x62\x35\x44\xb8\xfb\x77\xda\xba\xa2\xa5\x1c\xa9\xb6\x56\xd8\xe2\xbf\x3c\x2a\x5a\x62\xb4\x28\xa8\xd1\x29\x30\xa7\x2b\x31\x17\xdc\x93\x8c\xa3\x90\xe9\x16\xfc\x82\x02\x51\x52\xc0\xdb\x0c\x68\x4e\x47\x0b\xa5\x05\x75\x87\x69\x2f\x7f\xf4\x53\x8c\x21\x24\x5e\x37\x3c\x64\xdd\xb0\xd5\xc2\xf4\x22\x54\xe3\x1b\x5e\x76\x15\x4e\xf3\x25\xcd\xbf\xb4\x98\xe2\xa9\xb0\x18\x7b\xba\x82\x99\xac\x51\x5c\xf3\x29\x8c\x79\x12\x8f\x99\x3b\x8e\x87\x88\xb7\x1c\xbf\xe1\xb4\x26\x08\x63\x10\x0b\x5c\x6a\x8f\x13\x94\xc3\x3b\x87\x11\x97\x4c\x70\x3a\x4e\xfe\xe9\x94\x72\x63\xa4\x64\xb5\x1c\x89\x78\xd6\x38\x16\x22\x16\xc8\x19\x49\x39\x4d\xfc\x07\x0f\x86\x91\x1c\xa5\x81\x1b\xc6\x93\x07\x6c\x3c\x79\x10\xc6\x82\x3d\x08\xc6\x71\xf0\xa0\xe3\x7a\xae\xf7\x60\x14\x71\x99\x3c\x70\x5a\xbc\xe5\xb8\x93\xbe\x63\x89\xa2\x61\x8d\x45\x80\x91\x8b\x4b\x48\xc9\x54\x77\xb3\x07\x0c\x77\xd3\x66\x13\x49\xc2\xdc\x69\x3c\x45\x18\x77\xd5\x37\xe9\x52\x90\x6e\xa0\xbf\x5b\xc3\x59\x60\x9d\x96\xe2\x3e\x1a\xa0\x8e\xe7\x1d\x4a\x9c\xb3\x93\x3b\x4d\x93\x11\x3a\xd5\xf5\x62\xd8\xf2\xba\xd1\x00\x71\x42\x88\xc8\x20\xcc\x1b\x27\x0e\xae\x58\x28\x9d\x2d\x22\xef\xa6\x2c\x1e\x34\xf8\x7c\xce\xd3\xf1\x58\xc9\xc7\xe2\x29\x2f\xe2\xe4\x15\x3b\xa4\x00\x6f\x36\x47\x68\x1f\xc3\x56\xa7\x9b\xb7\x2d\x6d\x44\xbc\xc1\xdd\xed\x43\xaf\xd9\x44\x9c\x0c\xa4\x92\x65\x42\xfd\xab\x08\xe1\x38\x1a\xa0\xad\x29\xe2\x7a\x66\xaa\x3f\xb2\xd5\x51\xcd\xcb\xa8\xea\x74\x73\xf2\xb4\xc8\x3d\x26\x63\x34\xc4\x70\x4d\x56\x0f\xf8\xd6\x30\x63\xdc\x72\x74\x8f\x72\xc6\x5d\xd9\xba\xbc\x7b\x74\x4f\xf4\x3c\x9f\x1f\x8a\x5e\xbb\xe3\x77\x54\x5f\x6c\x71\x77\x3b\xfb\x8e\x24\x39\x42\x5c\x49\x5a\x97\x62\x3c\x9f\x67\xbf\x03\x10\x6e\x80\x71\x4f\xfa\xea\x57\x08\xc2\x0d\xb1\x6e\x77\x97\xbb\x81\x96\xd3\xcd\xe6\x56\xb5\x6c\x97\x13\x5d\x4e\x49\xec\x62\x08\xe5\x7c\xae\xb0\xf5\x3a\xbe\x70\x03\x55\xbf\x67\x16\x98\xb3\x35\xcd\x34\x8d\xc2\x87\xde\x02\xc3\xd5\x5a\xd1\x9e\x01\x75\xd6\x4f\x0f\xef\xf0\x28\x9f\xe8\x6b\x21\x48\x0e\x82\xe1\xe9\x32\x39\xd9\xd7\x42\x0e\x1e\x7a\xbd\xbe\xf4\x45\x6f\x2c\xfd\x44\x2e\x30\xbc\x22\x5e\x39\x14\xa7\x36\xea\x7b\xea\x73\x08\x7c\xb1\x28\xd9\xf7\x7b\x55\x16\xe5\x10\x10\xfa\xd2\x82\xfa\x60\xb0\xe4\xd3\xe6\x7e\xa1\x7b\x5c\x6a\x2e\xc3\x66\x89\xb8\x90\x97\xe5\x4b\x61\x5e\x0a\xf5\xb2\x90\xad\xaa\xf4\x17\x32\x46\xe7\x16\xa7\x9c\x1b\xc4\x8a\x4f\x94\x85\xc9\x87\x16\x5b\x17\x7c\xd2\x12\x19\x6b\x04\xf9\x2b\xd1\x35\x42\xfe\xa3\x19\xe4\x6c\x84\xbb\x96\xa4\x56\xdc\x60\xde\x62\x46\x98\x1b\x94\xa0\xf9\xf8\x6b\x7a\x4e\xc8\xfd\xb6\xef\x2d\x4a\x82\x3e\x56\xfa\x6b\xdb\xef\x40\xd1\x67\x0a\xfe\x0d\x19\xa3\x8f\x16\xfd\xcf\x94\x98\xca\xab\x15\xe4\x04\x24\xe1\x99\xec\xed\xca\x76\xbb\x8b\x85\xaa\xf8\x42\x56\x17\xae\xa2\xf8\xe7\x6a\xf1\x8b\x4b\x8b\x6c\x61\xa4\x07\x77\xa9\x55\x52\xc1\xfd\xdc\xa8\x02\x5d\x5c\x76\xf5\x1c\x90\x4a\x3b\x51\x1c\x0f\x92\x48\xd5\x0b\x06\x9d\x5e\x9d\xb4\x48\xa3\xa5\x38\x7b\xa6\xf4\x50\x0c\xaf\x09\x92\x35\xcc\x4a\xac\x15\x42\xc5\xc6\xdd\x6c\xb2\x6a\x05\xa0\xbb\x19\xa7\xa6\x9a\xbb\xb2\x1a\x60\xd5\xaa\x52\x55\x55\xb8\x54\x8f\x52\x7a\xf3\x9a\xe8\x52\x4d\xcd\x66\xba\xaa\x3a\x48\x49\xea\x06\x98\x9a\x4a\x6f\xab\x95\x42\x5a\xad\x98\xaa\x8a\xd9\x8a\x8a\x95\xb6\x9c\x57\x1d\xae\xae\xba\xd9\xa4\xeb\xeb\x07\x4a\xa8\x1b\xe0\xd0\x50\x91\x2c\x53\x01\xb4\x4a\x49\x58\x9b\xfd\x12\x4a\xc3\xe4\x19\x52\x4c\xe1\x26\xb1\x90\xeb\x44\x8c\x96\xe6\x5a\x94\x2f\xf4\x7f\xf0\xad\x22\x8f\x7e\x13\x1b\x99\x29\x48\x9b\x2f\x09\x21\x89\xec\x79\xbe\x7a\xe8\x4b\x2d\x93\x75\x05\xeb\x04\x15\x6f\x29\x1d\x0c\x5e\xae\x95\x86\xbc\xbd\x41\x4f\xe0\x7f\xe8\xd2\xef\xd7\x97\x7e\xb0\xa9\xf4\x03\x31\xf7\xf4\xe7\xb7\x54\x8e\xdc\x69\x3c\x5b\xaf\xab\xfc\x3b\x5f\x85\xe7\xdf\x09\xcf\x5b\xee\xa9\xa5\xb6\x37\x42\x9d\x0e\xf6\xbd\x43\xd1\x6c\xf2\x43\x6f\x3e\x17\x6a\xf5\xf4\x0e\x79\x4f\xb4\xb8\x9f\x69\x9b\xba\x32\x2a\x29\xdf\xc1\x5a\x04\xfd\x20\xfa\x4d\xc8\xa2\x31\xbc\x35\xcf\x83\x71\x1c\x0b\x78\x62\x7e\x88\x38\xe5\x7d\x78\x6e\x7e\x8c\xe3\x61\x77\x5d\x73\x9a\xcd\x4d\x8d\x9d\xcf\x37\x7d\xdd\x22\x44\x29\x57\x8a\x9e\x77\x64\xd3\x58\x75\xff\xd6\xc7\xdf\xd3\xa5\xcd\xaa\x40\x89\x70\xc3\x11\x15\x4f\xe3\x3e\x3b\x92\x28\xc5\x5d\x7a\xb8\xbf\xbf\xf3\xf8\x60\x3e\xdf\x3f\xd8\xed\x3c\x3e\xa4\x3d\x64\x6b\xdc\xa0\x54\x70\xdf\x7e\xd5\x12\x17\x69\xab\xa3\xbf\x90\x1d\xbc\x28\x54\xa8\xab\x38\xe2\xc8\x71\xf0\x46\xc3\xe6\xe2\x12\x2a\xba\xb1\xb1\x41\x0a\xd2\x94\x8a\x13\x2e\x51\x98\xb6\x5a\x10\x56\xa9\x0c\xe7\x73\x44\x5b\xa6\x80\xa2\x10\x38\xa2\x58\x89\x02\x3d\xb3\x69\x41\x96\xb4\xc8\xea\xfe\x2d\xbb\x28\xa7\x49\x1a\x9a\xe4\x6f\xd3\x24\x0b\x9a\x8c\x35\xa4\xd6\xb2\x45\xa9\x8d\xeb\xd1\x7f\x41\x7e\x87\x16\x65\x52\x65\x74\xa4\x44\x1b\x55\xb4\x4a\x07\xc3\x5d\x53\x47\x4a\xe8\x3f\xc9\xfe\xc1\xee\x8e\xd7\x6c\xee\x3f\xdc\xdd\xdb\xfd\x27\xa1\x3d\x79\xd1\x6e\xb3\xcb\x56\xea\xa7\x55\x0a\xe0\xd3\x3a\xde\x13\x6e\x32\x1d\x47\x4a\x6a\x2d\x30\x7c\x5d\x0f\xa5\xfb\x54\x03\xfd\x49\x7e\xd3\x58\xfa\x0d\xa6\x35\xeb\xf0\x7d\x66\x3f\xa9\xa5\x38\xad\xb2\x82\xc4\x5a\x2f\x47\x8c\xa4\xf5\xd6\xa6\x3d\x71\xd1\x6e\xcb\xcb\x16\xf3\x4b\xf5\xd8\x5b\xe4\x7a\x72\xd6\xed\x9c\x93\x7f\x19\x19\x5b\xbf\x4f\x47\x67\x51\x10\x84\x41\xf0\x75\xea\xf9\xd6\x7f\x08\x37\xe2\x7d\x76\x7b\x3a\x30\x9d\x2b\xd7\x81\x2a\x39\x58\x83\x65\x6b\x61\x73\x35\xe7\xb0\x68\xa1\x52\xbe\xc7\x34\x91\xaf\x0a\x04\xa4\xf8\xd6\xce\xc1\x17\x18\xd2\x65\x9c\xa6\xab\x0a\xcd\x29\x1a\x20\x79\xd8\xc9\x95\xbd\x13\x4b\xaf\xf3\x40\x6b\x21\xed\xce\x21\x62\x36\xad\xda\x38\xcb\x95\x0f\x86\x81\xb5\x88\xac\xaa\x1c\x7a\xa4\xe8\xba\xe6\x28\x4d\xcf\x83\x7e\xae\xea\xad\x93\x94\x0a\xac\x03\xac\x04\x2b\x9d\xf8\x4b\x70\xbb\x30\xf0\x39\x0c\x73\xcd\x31\xde\x54\xf5\x9e\xd6\xb9\x47\x3e\x5f\x64\x84\x0e\xd6\xf6\xbb\xae\xe8\x42\x5c\xaa\xd1\x09\xf8\xfa\x79\x92\xc1\x81\xd4\x90\x09\x5f\xad\xdb\xd5\x81\x81\x5d\xae\x57\xcf\x96\x81\x21\xbd\xdc\xa4\x54\xad\x2a\x00\x54\x17\x49\x57\x16\xb1\xdc\x8d\xb5\x42\x10\xea\x62\x74\x4d\x31\xcb\xaf\xb8\x54\x10\x62\x5d\x34\x5e\x5b\x14\xa2\x4d\x85\x21\xba\x5c\xb5\xf8\x48\x71\x97\x17\xea\xab\x57\xaf\xcf\x4e\x4f\xdc\x29\x15\x09\xd3\x8a\x59\x48\x65\x38\xb2\xfc\xcd\xdb\x12\xcd\xd0\x44\x82\x73\x3e\x8a\x92\x46\x94\x34\x78\x2c\x1b\x37\x74\x1c\xf5\x1b\xaa\xe4\x56\xc3\x69\x71\x77\xc2\x92\x84\x0e\x19\x28\x04\x4a\x37\x1a\x2b\x2e\xe8\x73\x8b\xcd\xfa\x59\xed\xc9\x2c\xd2\xf8\xdd\x6d\x7c\x1f\xd2\x84\x35\x76\xfd\xcc\x3f\x10\xc4\xf1\x98\x51\xcb\x3d\x20\x7a\x53\xa5\x2b\xfa\x13\x8e\x1c\xda\x78\x72\x7a\xfa\xc6\x51\x4a\x9f\x2e\xb5\x93\x97\xe2\xe9\x24\x60\xa2\x34\xd2\x45\x4f\x83\xf3\xc6\xab\x93\x73\x05\xee\x23\x71\x48\xda\x3b\x9d\xbd\x87\x7b\x8f\x76\x0f\xf6\x1e\xce\xe7\xe5\xf3\x21\x11\xf3\x39\xf2\xe6\x02\x2b\x45\x04\x37\x9b\x68\x2b\x4a\x9e\x47\x3c\x92\xaa\x2b\xe6\x73\xf1\xef\x1d\x5c\x47\xa7\x49\x32\x34\xec\xd5\x68\x58\x43\xf8\xf3\x37\xa7\x47\xe7\x25\xe5\x07\x79\xa9\xba\xd9\x98\x97\x12\x8d\x88\x27\x92\xf2\x50\xbd\x3c\xd3\x40\xfa\x4b\xcb\x71\x72\x94\x67\xe7\x1f\x5e\x9d\xbc\x28\x71\x3e\xf6\x73\xd9\x96\xf9\x5c\x54\x01\xee\x86\x06\x5e\xbd\x2c\x61\xf7\x73\x58\xab\x25\x0f\xf3\x77\x5a\x49\x72\xa3\xc4\x28\x4b\x02\xf7\x6e\xb8\xf1\x5c\xc0\xb3\xbc\xee\x37\xaf\xce\xac\xd6\x3c\xfa\x75\xc9\x6d\x9e\x15\xe5\x8d\xa3\x0f\x1f\x8e\xbe\x96\x85\x3b\x9e\x9f\xcb\xcf\xfe\x4a\x7f\x92\x28\xbd\x48\xf3\xf9\x56\x6e\xa1\xe7\xe2\x35\x43\x7a\xfa\xe4\xf5\xf1\xd3\xf3\xc6\x2c\x92\xa3\x06\x6d\x0c\x22\x36\xee\x37\x38\x9d\xb0\x7e\xe3\x7f\x3a\x2d\xd9\x72\xfe\xa7\xae\xd0\x48\xe1\x7e\x46\xd4\x85\x2c\x3d\x9c\x31\x43\x0c\xf7\x98\xaf\x19\x7d\x24\xd5\x0c\xd2\x46\x8f\x21\xb1\xe3\x2b\xf2\x98\x5e\xe1\xea\x6d\xac\x11\x52\xb6\x2e\x1a\x20\x51\xac\x32\xb2\x02\xd6\x78\x73\x7a\xf2\xe2\xf8\x43\x83\x6a\x5c\x8d\x13\xc6\xfa\x0d\xbd\x18\x34\x34\xb1\x8d\x20\x95\x8d\x98\x8f\xef\x1a\x09\x63\x0d\xa7\x95\xa3\x69\x39\x0d\xc6\xa5\x88\x58\xa2\x2b\xf8\x8d\x96\x0c\xeb\x2d\xd9\xf1\x7f\xd9\xc5\xbf\x68\xa0\xe9\xe9\xa2\x3b\x53\xa0\xc4\x2c\x71\xa9\x19\x18\xdd\xec\x11\x4d\x4e\x67\xfc\x9d\x88\xa7\x4c\xc8\x3b\x94\x62\x7c\x6f\x51\x9b\x5e\x1a\x65\x41\x93\x8a\x6d\x11\x33\x92\x90\x66\xf4\x52\xf2\x11\x9d\x22\xf3\x0b\x4a\xdd\x75\x2a\xd1\x47\x89\xca\x06\xed\xfa\xa5\xed\xcb\xdd\x01\xc4\x84\xbb\x43\x88\x88\xd7\x8d\x0e\xe3\x62\x45\x6e\xb5\x32\x02\xe2\x8b\xe8\x32\x1b\x9c\x6a\xf5\xac\x1b\x92\x10\xa9\xca\xac\x9a\xc2\xbc\x96\x3d\xbf\x24\xbf\xd6\xd3\xfa\xf5\x48\x97\x54\xa2\x81\x65\x25\xf6\x0b\xba\x06\xe4\x04\x02\x45\x55\x37\x70\x83\x6e\x40\x02\x37\xc8\x88\x09\x8c\x4b\x27\x1a\xa0\x1a\x29\x03\xf2\x51\x21\x84\x41\x41\x8c\xea\x1d\xdd\xf2\x01\x2e\x9a\xee\xd7\x84\xb3\xf1\x10\x65\x5f\x3d\x6b\x86\x73\xd5\xaa\xd2\x6d\x73\xb3\xd9\x33\xbf\x72\x6f\xe0\xde\xf4\xb0\x11\xe0\x7a\x6b\x20\xeb\xc1\xb0\x3a\x80\x43\xa9\x96\x1d\x3d\x80\x7a\x3b\x21\x74\xa9\xd5\x9f\xca\x48\xb0\x5c\xda\xdb\xf6\x2e\xe7\x0c\x85\x0c\x0a\x32\x56\xec\x79\x9a\x7d\xa7\xb2\xf4\xa4\xaa\x59\x14\x4b\xd4\xf1\xed\x94\x85\x32\xe2\x43\xb5\x28\xe9\xc5\xa8\x74\xcb\xf3\xc2\x61\xb7\xec\xc8\xe6\xee\xb6\x5a\x01\x0a\x0f\xee\x56\xa7\xbb\xb4\x4e\x79\x7e\xb5\xeb\xb9\x4b\x15\x1e\x97\x76\xb3\x65\x2c\x5b\x97\xb2\xa5\x21\x93\xf5\x15\x91\xbb\xe5\xd5\xc5\xb5\x1b\x6a\x1c\x61\x2e\x86\x33\x99\x9a\x4f\xd8\x0c\x6c\xc4\x0b\xf7\x71\x21\x38\x0b\x0c\x7d\x8d\xa1\xdf\x6c\x2e\x43\x59\xb4\x32\x0d\xc5\x56\x41\xed\x96\x50\x03\x0d\x35\x68\x36\x87\x0a\x6a\x08\xc2\x1d\x96\xd3\xa0\x80\x1a\x69\xa8\xd1\x2a\x5c\xc5\xe2\x62\x21\xb0\xd8\x6f\xb8\x5e\x6b\xde\x2a\x55\xee\x72\x10\x2c\xf5\xb9\xcb\x0e\xa5\xde\xc7\x54\xcc\xa7\x2a\xd6\x7b\x6a\x17\xec\x72\x9d\xf3\x7f\xba\x56\x11\xd5\x0a\x8f\x59\x7d\xa3\xc1\x1d\x12\xa0\x04\x20\x70\xdc\x72\x1c\x5b\x31\x9e\xd9\x1c\xc8\x35\xce\xbb\x0d\x6a\xab\x34\xfb\x98\x32\xd3\x83\x6f\x39\x51\x68\x4b\x74\xc7\x16\xba\xfb\x6d\xdf\x03\xaa\x94\xe6\xe2\xf3\x75\xf5\x73\xa7\xf6\xf9\xa8\xfa\x79\x07\x02\x9f\x43\xe8\xab\x2a\x8c\x96\x7e\xb6\x41\x4b\xdf\xd5\xd0\x7d\xad\xf8\xc3\xd5\x06\xc0\x3d\x0b\x50\xb7\xe2\x29\xb7\x5d\xf2\xaf\x34\x11\x5c\x7b\x9e\x81\xf9\x4f\x79\xab\x95\x99\x0a\xba\x07\x47\xfe\xc5\xe5\x22\x97\x90\x27\x0a\x16\x78\xd9\x82\x53\x7b\xc6\x1f\x71\x64\x4f\x73\x8e\x8e\x39\x7a\xa5\x00\x30\xb6\xe7\xf9\xf7\x8c\x40\xee\x8e\x8c\x81\x24\x30\x68\xc4\xba\xc9\x1f\x2a\x2d\xb1\xc7\xa2\x86\xfe\xbb\xf9\x0a\xa6\x1a\x5d\x85\x69\xde\x17\x4e\xb6\x3a\x70\xce\x95\x65\x56\x54\xaa\x2b\x50\x72\xe2\x9c\x67\x8e\x6c\x0c\x5b\x5f\x32\x77\xb7\x2a\xe1\x75\x39\x39\xe7\x6e\x32\x8a\x06\x12\xe1\x2e\xde\xb2\x03\x37\xf4\x86\x8e\x70\x07\x99\xc5\xcc\xd5\x7c\x72\xb7\x15\x8b\x7b\x66\x5b\xac\xa3\xfe\x29\xe0\x86\xca\xf6\x1c\x6a\xf1\xc3\xbb\x58\xb8\x43\xa2\x7e\x46\x5a\xc4\xaa\xc9\x63\x1a\xa4\x10\xea\x0f\x01\x52\xe8\xd4\x72\x93\x43\x2e\xd8\x38\x61\x8a\x5a\x1d\xdf\x51\x6c\x33\xb8\x03\x37\xd4\x55\x07\x95\x9e\x50\x78\xb8\xea\x42\xed\x82\x88\x06\x68\xdf\x50\x93\x91\x27\xdc\x51\x75\x0e\x66\x15\x0f\x74\xc5\xa3\xbc\xc9\x58\x57\xda\x50\x34\x28\xee\xd2\x3e\x48\xcf\xef\x28\x53\x50\x81\x42\xe4\x0b\x77\xb8\x80\xbc\x6c\x7f\xb1\x58\x20\x8e\xbb\xba\xb7\x17\x8b\x0d\xd6\xdc\x47\x35\x50\x0c\xb8\x1b\x1e\xa9\x3f\xfb\xea\x8f\x57\x2e\x08\xcb\x61\x31\xf8\x7e\xb1\xa8\x6c\xe0\x7d\xac\x19\x72\x66\xed\x9a\xa1\x31\x07\x0e\xa2\x27\xdc\xc1\x98\x0e\x13\xff\x26\x8e\xfa\x0d\x0f\x77\xf5\x2a\x36\x9f\x8f\x50\xe6\x15\x8d\xc9\xfd\x02\x22\x82\x42\x22\x91\x5e\xca\xd4\x4a\x4c\x28\x0a\x20\x52\x8b\xe2\x0a\xdb\x1f\x98\x96\x52\x4c\x69\x40\x6f\x78\xee\x9c\x7a\xa3\xc4\x53\x37\x75\x69\xb3\x89\x90\x24\x72\x3e\xbf\x5f\xe0\x0b\x76\x49\x52\x97\x22\x6d\x26\x81\x82\x58\x81\x90\x91\xfb\xa1\xb6\xa8\x0d\x89\x0b\x48\x09\x77\x43\xa0\x4a\x47\x06\xa5\xe7\x30\xad\xe7\x0c\x8a\xdd\x29\x77\x44\x5e\x71\x34\x43\x67\xbc\xe8\xa8\x86\x1d\x55\xa4\xbf\x70\xb8\xdf\xf6\xf7\x21\xf0\x6d\x66\x30\x5b\x37\xdc\xa5\x15\x6f\xb2\xbb\xdd\xbb\x43\x14\x98\x16\x6e\x7e\xd8\x6c\xc6\xbd\x5b\x1d\x76\x27\xdc\x08\x84\x7b\xa5\xde\xde\xe9\x17\x61\x4f\xb8\x6a\xa8\xd5\x2b\x35\x0c\xc0\xdd\x00\xe3\x05\xb2\xdd\x6b\x72\x81\x62\x08\xac\x01\x0a\x4c\x53\xd5\x98\x30\xe0\xaa\x5b\x07\x28\x52\xba\x02\x08\x0c\xdf\x38\x8a\x21\x74\x03\x48\x51\x84\x0b\x1c\xd5\xb7\x40\x7b\xf7\xd3\x58\xc8\xc4\xa7\x0b\xff\x3e\xdb\xdc\xe2\xe4\x7e\xa1\x07\xf0\xd9\xef\xca\x04\xe1\x0e\x51\x5d\x24\xac\x59\x2f\x66\xe8\x03\x07\xee\x8e\x20\x13\xdb\xa2\xca\x72\x9f\x37\x07\x6c\x69\x61\x7e\xed\x0b\x18\x2b\x81\x5e\xca\xb6\x9f\x75\x89\x3e\x51\xdf\x55\x1b\x5e\x6f\x14\xe5\xdc\xe7\x10\xd7\x7c\x38\xdf\x8a\xc5\x48\xb3\x10\xa4\xf9\x46\xa7\x66\xcb\x97\x1c\x6d\x79\x20\x20\xd5\x0b\x1d\x06\xf5\xbb\x03\xb2\xf8\xcd\xf1\xf7\x6c\x3d\xbd\xdf\xf6\x9d\xc1\xad\x03\xd4\x4f\x2f\xd8\xe5\x7c\x7e\x1f\xf9\x27\x70\xe5\x9f\x54\x02\xcb\x5e\x5a\xf3\x36\xd3\x92\x44\xa1\x25\x75\x7c\x33\x01\x84\x7b\x0d\x94\x20\x4a\x52\x88\x09\x83\x19\xe2\xbd\x37\xfc\x82\x5e\xba\xcc\x37\xff\x0e\x2a\x7a\x5e\xb9\x93\x18\x77\x85\xde\xad\xfa\x81\x95\xe8\x9c\x2a\xb9\x51\x2c\xc1\x4a\x47\x2c\x77\xa3\xd4\x04\x41\xf2\x22\xbd\x54\xd5\x50\x48\x09\x4a\xb5\xaf\x19\x5b\x74\x03\xef\xa5\x6e\x44\x3e\x22\x0a\xa9\x1b\x61\x3f\x75\xaf\xb2\x1f\x57\x18\x52\x5c\x38\x13\x4a\x43\x42\xb8\x93\x6e\xe8\x06\xca\x24\x70\x03\xac\xdb\xaa\x98\x53\xb5\x36\xab\xb8\x5b\x71\x5b\x68\x32\xb2\x3e\x71\x63\x90\x70\x3f\xf5\x85\xcb\xe1\x87\xcf\x16\x66\x99\xa2\x10\x97\x9d\xf7\x5e\x37\xf7\x0d\xbf\xe0\x97\xcd\xe6\x08\xed\x5a\xfd\xfa\xa3\xca\x75\x1a\x12\x34\x24\xb9\x67\xfe\x5b\x0e\xc2\x17\x40\xfd\x27\x7c\x01\x9f\x8b\x35\xf0\xed\x5a\x2d\xa7\x12\xb5\xf2\xa4\x98\xf0\x12\x42\x72\x71\x09\x31\xd1\x98\x5d\xa1\xa4\x9d\x24\x1e\xd4\xa6\x87\x19\x8c\x84\xc9\xf3\x68\xc2\xe2\xd4\x92\xd9\xf9\x6a\x8d\xf1\x02\x64\x31\x18\xd6\xe7\x70\xcc\xa8\xc8\x8b\x09\xed\x0f\xca\xa1\x4c\x9d\x01\x89\x4c\xbb\xdc\x70\x8d\xdb\xbf\x2b\xf2\x3d\x4b\x5c\xaa\x80\x21\xa4\x24\x46\x42\xdb\x84\xc6\x3c\xc9\xf5\x47\xaa\xa3\xa2\xe8\x25\x2a\xe3\xe4\xa2\x05\x86\xfb\x24\x0d\x92\x50\x44\x01\xab\x88\xbd\x30\x5f\xd5\x17\x90\xf2\xd5\x20\x88\xab\x25\x20\xcc\x1c\xf6\x18\x5b\xae\x65\x7c\xe8\xcd\xe7\xa1\xde\x17\xd0\xbe\xfc\x0e\x5e\x98\x59\xfb\x9c\x77\xd7\x48\x9e\x55\x06\x8d\xde\x0a\xc5\xb9\x9a\xf5\x8e\x13\x27\xe5\x7d\x36\x88\x38\xeb\x97\xb6\x79\x3f\x0e\xd3\x09\xe3\xb2\x97\x3f\xf8\xf7\xd6\x86\xff\x8b\x42\x39\xa2\xd3\x29\xe3\xfd\xa7\xa3\x68\xdc\x57\x1d\xbe\x6a\x81\x65\x84\xb9\x3c\xee\xb3\x72\xd9\x98\x52\xc1\xb8\x3c\x89\xfb\xcc\x15\x6c\x3a\xa6\x21\x33\x08\x6e\x04\xe2\xf6\x92\xbb\xc0\xc0\x30\xdc\x57\xe4\xcd\xa7\x95\xba\xac\x6a\xc9\xd7\x0a\x3f\xda\x6e\xd1\x5f\xec\x81\x79\xd6\x98\xdf\xe7\x32\x84\x76\x59\x8b\xa4\x6e\x30\x9f\x7b\x90\x6d\x65\xa5\xe5\x0e\x5b\xab\xdc\xa4\xd2\x42\x36\xf4\x43\xe8\xfb\x63\xa1\xc3\x1f\x7d\x09\x03\x9f\x42\xe0\x33\xad\x21\xa0\x6c\xc5\x87\x3f\xff\x5b\x08\xfc\x3d\x12\x77\x7e\x8b\x44\xb3\x33\x23\x36\x69\xe9\x57\x3e\xd7\x4b\x4a\xe0\x77\x5a\x48\xe8\xca\x71\x65\x80\x84\xa8\x95\xd9\x57\xcb\x0f\x4c\x7c\x01\xd7\xb9\x66\xb1\x58\x27\x38\x04\xba\xe0\x20\x2e\x57\xe8\x5d\x46\x6f\xcc\x98\x56\x8a\xf5\xc6\x50\x86\x03\xe4\x2a\x2c\x79\x64\xf2\x42\x7b\xe0\x05\xd9\xa0\x10\x16\x78\x80\xad\xc2\x54\x46\x2c\x6b\x5c\xa9\x20\x68\xe3\x06\x80\x85\x0e\xd2\x55\x08\xed\x48\xe6\xc5\x6f\x6c\x10\x54\x10\x02\x5d\x85\xb2\x1a\xdc\xbc\xf8\xad\x2d\x84\x1a\x5a\x08\x57\x21\xae\x87\x3b\x2f\x7e\x73\x9b\x61\x09\x39\xc4\xab\xd0\x2f\xc7\x3f\x2f\x6a\x9b\x11\x03\x08\x20\x81\x31\xf4\xe1\x06\xb6\x61\x02\xa3\x4a\x15\x4b\x5f\x57\x55\x22\x48\x00\x92\x24\xc0\xc8\x18\x52\xd2\x07\x4a\x6e\x20\x24\xdb\x10\x93\x09\x44\x64\x04\x8f\x08\x21\x88\x93\x01\x5e\x15\x6e\x0d\xd1\xba\x80\x6b\x14\x65\xb3\xa8\xbe\x5d\xb2\x58\x1f\x14\xa2\x74\x1f\xea\x39\x96\x7e\x85\x81\x6e\x98\x85\x0e\xed\xd8\xc0\x10\x6e\x84\xdd\xa9\xc0\xc6\x1b\x61\x77\x6d\xd8\xee\xba\x39\xa6\x41\xf7\x14\xa8\x80\xd8\xbf\x1f\xe8\x12\x72\x51\x11\x03\x91\x28\xe5\xb4\xa3\xd6\xb9\xa9\x74\x94\xb1\xe6\x4c\x1d\x9f\xaf\x99\xff\xaa\x13\xb4\x29\xb8\xdd\x9b\xa1\x54\x80\x52\x66\x90\x24\x1c\x38\x49\x98\xde\xbb\x8c\xb1\x12\x66\xcc\xdd\x56\x62\xbf\x77\x87\x06\x0c\xf8\xe1\x6e\x2f\x10\x7e\x22\x20\x60\x4a\xad\x66\x2e\xc5\xfe\x0c\x45\x2c\xf3\x41\x2f\x30\xf6\xb3\xd0\x37\x60\xf9\x4e\xa1\x80\x60\x5d\x3f\x34\x4e\x11\x37\x4b\xbd\x12\xb7\x0b\x0c\xc9\xda\x1e\x0b\x5f\x29\x26\x70\xc3\x57\x18\xe8\x91\x2f\x5c\x7a\x04\x34\x55\xff\xa6\x95\xae\xd0\x32\xd7\xd2\x33\xef\x17\x56\xc4\x5a\xe1\x90\xa2\xc0\x88\x74\xb7\x21\x25\xd2\xe5\x3a\xba\x20\xee\xaa\xc1\xdb\x22\x84\xf5\x90\x24\x42\xab\xc5\x48\xfd\x43\xd4\xca\xa8\x06\x8b\x10\xc2\x9a\x4d\x27\x1c\xd3\x24\x51\x3f\xd2\x5e\x5f\x20\x69\x0e\x2b\x68\xf5\x94\x62\xdf\x7c\x3d\xa1\x13\x56\x40\x08\x03\x21\x34\xc4\x62\x39\xc2\xae\x2f\x2a\x3a\x3d\xe1\x17\xe2\xb2\xab\xfe\x10\xd6\x63\x2d\xa7\xe1\xb4\xa4\x6f\x1d\x57\xbb\x11\x55\xd7\xda\x76\x6e\xdd\x17\xdb\x0f\x0a\xc2\xbd\xd6\xc1\xa3\xd7\x84\xbb\x26\xb4\x19\xe7\x9e\x89\x02\xec\x1d\x77\x43\xc1\xa8\x64\xe7\xec\x56\xab\x07\x26\x90\x2f\x1a\xa0\x3d\x0d\x66\x79\x8e\xb9\x7b\xad\xcd\xd3\xab\xae\xfa\xc4\xdc\xed\x2e\x5e\xda\x5f\x48\x7b\x29\xb9\x48\x81\xb9\x57\x97\x7e\xbe\xcb\xad\x94\x6f\xa5\x90\x5c\x9b\xad\x6d\x72\x7f\xe5\xa7\x30\xf5\x45\xee\x3c\x42\x21\xb9\x11\x88\x81\xb2\xbf\xd9\x78\xf2\x9d\xdd\x30\x2e\xbf\x2b\xf5\xe5\xbb\x60\x03\x42\x21\x5c\x44\x03\xb4\x6b\x53\xbd\x2d\x90\x32\x8e\x47\x88\xbb\x43\x0c\x02\xb8\xdb\xc7\x10\x76\x8b\xcd\x81\x5e\xd1\xac\xe3\x31\x53\xaa\xd4\xc9\x19\xe2\xee\x00\xf4\x26\x59\xfd\x9b\xde\x3a\xeb\x3e\xe7\xcd\xa6\x43\xd5\x7c\x71\xc3\x66\x33\x74\x69\xbf\x7f\xac\x08\x79\x13\x25\x92\x71\x26\x90\x13\x8e\xa3\xf0\xda\x81\xe7\x1c\x85\x18\x83\x22\x21\xab\xb9\x70\x5c\xc6\xda\x60\x5f\xb1\x25\xf1\x82\xa3\x10\x6e\x04\xea\xa8\x46\xf4\xe2\x8b\xe8\xd2\x57\x7f\xf4\x26\x43\xa1\xc4\x86\x96\xbf\x5c\x2c\x39\xee\x95\x69\x27\xed\x68\x98\xae\x12\x49\x6a\x20\x7a\x2b\xfd\x14\x84\xbb\x89\xbc\x1b\xb3\x95\xc1\xae\x0b\xc4\x21\xc5\x7e\x36\xf9\xab\x18\x6c\xbb\x92\xab\x01\x79\x9e\x68\x2e\xd2\x4f\x6a\x1a\x94\xa6\xa6\x2c\xa3\x84\xd8\x25\x84\x44\x99\x91\x8a\x75\xa8\x76\x31\x85\xe6\xaf\xfb\xc3\xdd\x26\x84\x50\x6d\x34\xba\x3f\x08\xed\x86\x31\x97\x11\x4f\xd9\x82\xbb\x82\x4d\xe2\x1b\x56\xed\x68\xa6\x56\xb7\xb0\x74\x96\x44\xa0\xa6\xb2\x75\xec\x27\xb7\x57\x06\xee\x0f\x90\xa4\xaf\x45\x07\xf0\x7c\x7b\x45\x62\xab\xd7\x20\x25\x7a\xa3\x1b\x04\x91\x2e\x05\x4a\xd2\x5e\x7a\xb8\xdb\x13\x2e\xf5\x95\x10\xf1\x05\x48\xd2\x51\x53\x54\xb8\x81\xbf\x4b\x48\xda\x6c\x6a\x99\x12\x12\x24\x9b\x4d\xd5\x85\xf1\xf4\x9d\x88\xa7\x74\x48\xcd\x52\x06\x68\x67\x09\x3c\xc5\x0a\x74\x2a\x34\xe3\x3e\x63\x03\x9a\x8e\x25\xc2\x10\xe1\x2e\x23\xa1\x7b\xd5\x35\x71\xc3\xcb\x01\xf1\x0c\x53\xc2\x10\xc5\x5d\xed\x5f\x2b\x99\xa8\xb0\x74\xe2\x76\xbb\xab\x60\x2e\xe2\x4b\x05\xa6\x6c\x94\xe9\x22\x44\x54\x7b\x60\x72\xbd\xc0\xfd\x41\x38\x0c\x16\x48\x00\xc5\xc0\x97\xf9\x96\x41\x08\x03\xd1\x6c\xde\x4f\x69\x92\x44\x37\xcc\x4f\x54\x9d\x87\x3b\x4a\x33\x51\x82\x2d\x34\xee\xbd\xf5\x63\x61\xc0\x72\x35\x52\xb3\x88\xe6\x9d\xdd\x55\xdc\x57\xa8\xd1\x86\xe3\xac\x38\xa2\x2e\xeb\x71\x37\x61\xf2\x48\x4a\x11\x05\xa9\x64\xc8\x9c\x30\xcb\xea\xb5\x5e\xe3\x45\xc1\x9f\x7b\x7f\xaf\x0e\x48\x09\x73\x07\x5a\xda\xc4\x4b\xf5\x9d\x9c\xa1\x14\x56\xd7\x69\x3e\x95\xf5\xde\xd0\x71\xca\x0a\x51\x3f\x62\xe1\x35\xeb\x67\x3f\xb5\x23\x8f\x90\x54\xcd\x09\xed\xe2\xc3\x8b\x85\x14\x77\xf7\xb3\x88\xf7\xe3\xd9\x0a\xb1\x21\x1d\xb3\xe3\x70\xaa\x45\xa5\x6b\xcc\xbe\x62\xc3\xf4\x7e\x01\x4e\x36\x30\x0e\xdc\x0f\x99\xf4\x2d\xb5\x69\x20\xc8\x96\xa7\x54\x93\x32\x4c\xc3\xda\x15\xab\x2c\x01\x17\x45\x00\xfb\x30\x93\x1d\xe0\x61\xfb\x80\xf3\x48\xd8\xe6\xe1\xfd\xb6\x2f\x40\xf8\x12\x12\x9f\x81\xcc\x6c\x04\x48\x73\x63\xa1\x70\xc2\x94\x81\x4a\xd6\xb6\x8e\xa8\x1c\x25\xd1\x61\x9d\xb9\x60\xe2\x4a\x4d\x50\xda\x84\x9a\x8c\xe9\x16\x21\x46\x14\x74\xb6\x74\x8f\xed\xe8\x17\xb6\x27\x65\xa4\xd6\x4e\x0f\x98\xde\x6c\x25\xab\x7d\x44\x4a\xa6\xfe\xe6\x39\x29\xb5\xbc\x16\xa7\xdd\x32\x2b\x91\xbb\xca\x08\xe3\x6e\x1f\x98\xcf\x60\xe0\xab\x75\x20\xf0\xb9\x1b\x2c\x16\x4a\x30\x50\xd2\x59\x64\x7e\x2d\x9a\x79\xb5\xf6\x2b\x3b\xcd\x63\x88\x55\xe5\x10\x91\xb0\xd8\xb3\x24\x11\x21\xa4\x90\xf0\x83\x66\x33\x52\x33\x75\x40\xc2\x8b\x48\x31\x87\x92\xed\xaa\x03\x06\x76\x5b\x91\xd0\x0b\xf1\x35\xee\xaa\x07\xa1\x56\x64\xbd\x60\x05\xb5\xb1\x73\xaf\x41\xb8\xd7\x10\xa8\xf1\xd3\xe5\xbc\xc3\xa0\x88\x5c\xd3\xfd\xd5\x01\x06\x01\x2e\xc2\x52\x72\x62\x13\xb5\x36\xc3\x98\x08\xf7\x0a\xfa\x64\xab\x03\x37\xaa\x3a\xbd\x58\xdf\xa8\xc5\xba\x4f\xb6\x3c\x58\x5a\xb1\x93\x5e\x42\x2e\x12\xb8\x51\x2b\x76\x62\x86\xfb\x46\xad\xd8\x37\xe4\xc6\xbd\x2e\x56\xb6\x6d\x22\x32\x54\xdb\xeb\x51\x8d\x7b\x63\x72\x31\x86\x6d\x85\x6a\x6c\x50\x6d\x2b\x54\xdb\x64\xdb\xbd\xce\x9b\xd8\x6f\x36\x93\xac\x39\x5b\x84\x8c\xb3\xc7\x5e\x9d\x1b\x7c\x84\xfa\xeb\xa6\x3d\xf1\xba\xf2\xb0\x3c\xbe\x60\x76\x09\xf9\x85\xbc\x54\x9c\x78\x21\x2f\x57\x6c\x11\xa2\x04\xc6\xd8\x4f\x08\x21\x63\x3c\x9f\xeb\x7a\x76\x80\xc1\xd8\x74\xb1\xea\x77\x65\xb6\x48\x60\xad\xce\xd2\xbe\xba\x1e\x04\xee\x52\xbd\x67\x49\xb3\x31\xd8\xd5\x2e\x72\xba\xb4\x45\xaf\xd1\x4d\x8b\x19\x02\x77\xf5\x10\xa6\x25\x88\xdb\x1c\x62\xd7\xd7\x7b\xd3\x23\x5d\xcf\x68\xed\x34\x09\xc8\x4c\x71\x49\x1f\x84\x52\x3d\x82\x8c\x9e\x3d\xcd\x13\xdd\x80\x08\x37\x2a\x37\x73\xed\x16\xe4\x90\xfb\x86\x7b\x6c\xe7\xb4\x45\x4d\x9a\x2d\xe9\xdd\x62\xb3\x5a\xad\x6d\xd9\x4e\x71\x0f\x21\x6a\xd7\x8e\xad\xca\xa9\x32\x74\x73\xb9\x80\x7d\x8b\x62\xeb\x90\xb2\xa8\x29\x18\xb9\xa1\x56\x86\x95\x70\xac\xd4\x9a\x2d\xbd\xb4\x2a\xf5\x24\x7f\xda\x2d\x9e\xf6\xf4\x53\xcf\x04\xa1\xf4\x50\x4c\xf8\x45\x7a\x89\x95\xe5\x68\x22\xa8\x71\xb3\x99\xc9\xef\xac\x44\x2e\xbf\x8d\x0c\xca\x74\x1e\xd9\x6c\x22\x14\x92\x18\x2b\xe5\x04\xc5\x84\x62\x77\x5b\x6f\x71\x87\x2e\x85\x38\x3b\xc9\x85\x18\x61\x66\x2f\xc7\xe8\xf5\x95\xdf\xb2\x97\x29\x60\xb2\xe7\x38\xb9\x2a\x25\x55\x05\xbb\xe6\xad\x91\xa5\xda\x56\x53\x62\x69\x00\x71\x2e\x5e\xfd\xe5\x43\x44\x17\xe9\xa5\x42\xa3\x56\x0a\x3f\xeb\xe4\xfc\xd4\x9b\xaa\x11\x52\xd5\xd9\x75\x82\x74\xb7\x45\x59\x34\x8e\xee\xbd\x0a\xd1\x91\x12\x88\x91\x7d\x6c\xb4\x3c\xfc\x6d\xc9\xf0\x5c\x72\x33\x2d\xb9\x19\x70\x92\xe6\x82\x4e\x10\x9a\x4f\x33\x71\xc8\x7b\x7a\x50\x0f\x80\xc1\xfd\x8d\x2f\x20\xf2\xf5\xb9\x09\x9f\x1f\x8a\x8c\x0f\x1e\x9a\x4f\x1c\x98\x4f\x17\xa5\x5a\x1c\x12\x7e\x28\x7a\xda\x72\x25\x5e\x37\x3e\x0c\xbb\x71\x1e\x68\x12\x91\xf4\x22\xbe\xec\x0e\x05\x8a\x80\x5e\xc4\x97\x20\xa1\xd5\x32\x71\xb1\x91\x76\x74\x59\x5c\x7a\x2b\x56\x9f\xf5\x01\x4a\xee\x17\xb9\x9f\xdb\x28\xe0\xaa\x19\x83\x42\x40\x43\x40\xa2\xfc\x31\x21\x1e\x8c\x89\x07\x7d\xc2\xba\xc9\xe1\xa0\xd9\x1c\x1f\x06\xd9\xe6\xed\x0d\x6c\x13\x74\x43\xe2\x8b\xe4\x12\xbb\x14\x26\x04\x5d\x91\xe8\x62\xac\x7f\x8c\xc8\x8d\x1b\xc0\x90\x5c\xb9\x81\x12\xec\xdb\x5b\x84\x4c\x4c\xa9\x29\xcc\xe0\x0e\x6e\xe1\x18\xae\xe1\x48\x15\x6e\x75\x2e\xe1\x4c\x15\x6c\x75\xf4\x22\x70\xd4\x6c\xa2\x19\x39\x72\x03\xb8\x23\x13\xc5\xa6\x53\x72\xa4\xf8\x0b\xce\x9a\x4d\x74\x4c\xce\xdc\x00\xae\x89\xd2\x90\xd1\x2d\x39\xd3\x1f\xae\x9b\xcd\x3b\x3c\x14\x68\x04\xc7\x90\x42\xab\xd5\xc7\x70\x2d\x74\xae\x89\x6d\x18\xc2\x58\xa9\x64\xfd\x16\x19\x19\x2f\xe4\x51\xfe\x65\x66\x20\xfb\x2d\x32\x33\x5f\x92\x16\xd9\x81\x71\x8b\xec\x18\xfd\x32\x1a\xa0\x6b\xdc\x6f\xb5\x72\x5c\x93\x1c\x57\x51\x53\xdf\xc6\x9b\xb4\x48\xa7\x5a\xfa\x0e\x17\x75\x8d\x8a\xba\x32\xe8\xa1\x40\x33\x18\xe6\xd4\x2e\xd3\xd0\xe9\xe6\x1b\xd7\x5b\x47\xf3\xf9\x74\x8b\x90\x5b\x1c\x08\x46\xaf\xbb\x75\x9c\x75\xea\x6a\x75\x1c\xaf\xaf\x63\x67\x61\x34\x59\xdd\x1e\x9b\x96\xa2\x45\x2d\x18\xb7\x5a\x0b\xbd\xe5\x90\x1c\x0e\xba\x79\x7b\xac\x41\x37\xe3\xbc\x5c\xd0\x9c\xdc\x2c\x79\xe5\x0a\x9e\x92\xa7\xf3\xf9\xc5\x65\x37\xa3\xd7\xe2\x95\x2b\x37\x80\x4c\xa1\x7a\x8a\x75\x8d\xc8\x3b\xcc\xa7\xd4\x7c\xee\x1d\x86\xc5\xf3\xd3\x5c\x82\x3e\x52\x33\x67\xe6\xa7\x70\xeb\x87\x70\xe7\x3f\xcd\x36\x93\x8e\x05\x71\xbe\xb3\xf1\xe4\xf3\xc1\x93\x37\x56\x4e\x9b\x6b\xb1\x6a\xdb\x5b\x1f\x6d\x54\x3d\x1c\xe6\x6b\x47\x76\xde\xec\x5e\xf8\x29\x1c\xf9\x21\xb9\x0f\x7d\x0f\x7e\xfa\x0c\xd4\x8b\xa4\xf0\x0c\x67\x7a\x86\x2a\x4f\x42\x6d\x46\x29\x3b\x35\x74\x43\x7c\x5f\xc3\xb0\xc0\x10\xba\x21\xd9\xc9\x76\xcf\x2b\x8a\x4b\xe8\xfe\x04\x06\x31\x84\xae\x50\x50\x82\xa4\x06\x6d\xe8\x26\x6e\x42\xee\x67\x7e\x6c\x30\x2c\x72\xea\x5b\xc7\x22\x77\x93\x96\x81\x2f\xcb\x2b\x51\xd1\x2e\x5a\x04\x2e\x50\x43\x5a\x4e\x48\x58\x25\x84\x01\x75\x7f\x42\x08\x69\xb6\xb8\x8f\x04\x12\xf0\x18\x52\xd5\xc1\x21\x1c\x29\xd1\xb4\x38\xaa\xd2\x60\x42\x1c\xee\x05\x29\x60\x73\xef\xbc\xee\x94\xfb\xd0\xef\x58\xfd\x26\xec\xb5\xf3\xcc\x92\x4a\x5b\xd6\x96\x7c\xcd\x1b\x6b\x84\x9d\x52\x52\x8d\x37\x8c\x44\xae\x30\x4c\xa5\xd6\x56\x9a\xb1\x55\x00\x09\x89\x8c\xf2\xac\x86\x20\xe9\x9d\x29\x7a\xa4\x7b\x0d\x91\x9b\x40\x8c\xfd\x47\xfa\x2d\x8a\x5c\x49\x04\x44\x6e\x4a\x62\xf0\x0e\x91\x92\x6f\x89\x3b\xc3\x85\x9a\x68\xaa\x0f\xc0\xcb\xaa\xc7\xfe\xe3\xe5\x82\x48\xd5\x95\xa8\x65\x25\x71\x8f\xdc\x84\x08\x83\x6a\x33\x22\xec\xdb\x38\x30\x6c\x21\xd5\xaa\x56\x4b\x6f\xc4\x22\xdd\x2c\xfc\xcf\x82\x01\x53\xcd\xc5\x63\x22\x4d\x9b\xf6\xb4\xfe\x55\x08\xef\x3e\x91\x99\x52\xd9\x37\x4a\x65\xbf\x54\x13\x55\xc5\x7d\xd3\x81\xad\x0e\x84\x20\x56\xb8\x86\xcc\x1c\xb9\x21\xd2\x65\xdd\x5c\x49\x0d\x47\xd1\xb8\x7f\x12\xf7\x59\x52\x2c\x3f\x13\xe2\x75\x27\x87\x37\xf9\x42\x36\xc9\xd7\x9e\x91\xb2\xfc\xc9\xb8\x77\x73\x31\xb9\xf4\xd5\x1f\x2d\xe1\x5b\x2d\xda\x42\x66\xe2\xeb\xa9\x40\x0f\xc9\xa0\xd9\x1c\x1c\x92\x61\xb3\x89\x52\xc2\xd1\xf6\xc5\xe4\x12\x46\xd9\xd8\x0e\xa1\xe8\x83\x5a\x0f\x14\x5d\xd0\xa5\x64\xb8\x28\xfa\x23\xb7\xcd\xc0\x03\xe1\x06\x60\xe7\x55\xb9\x12\x4b\x1b\x22\xda\x55\x97\xab\xc8\xdc\x47\x16\xb3\xc1\x53\xf5\x2c\xed\xa0\xc7\xa7\x62\x95\xa2\x2c\x6c\x45\xb9\x6e\x2b\x4b\x60\xb5\xa8\x94\xa5\x3d\xfe\x42\x0f\x5e\xe9\x24\x22\xdc\xda\x5a\x04\x49\x6e\x34\xc3\xe2\xae\x5c\x31\x5e\xf3\x39\x5a\xf5\xda\x78\x99\xea\x63\xdb\x65\xcd\xa6\xdc\x22\x84\x37\x9b\xb5\x2d\x4b\x09\xdc\x3a\x1d\xad\x77\xe2\x13\x10\x6e\x5a\x8b\xd3\xcf\x9c\x68\x6e\xaa\xbe\x63\xa8\x6f\xe4\xf3\x1c\xe9\x33\x2a\x29\xf2\x80\x17\x3a\x8f\x05\x5d\xa8\xf5\xa6\x6b\xdd\xa4\xae\xcc\xaf\x22\xbd\xb7\xea\xa5\x7b\x45\x84\x9b\xf8\xab\x3e\x91\xfb\x2b\x5f\x35\x61\xea\x0b\x37\x5d\xe4\x55\x1f\xf8\xf6\xd9\xac\x24\xcb\xf0\x22\xdd\x48\x87\x45\xe6\x1e\x0c\xd3\x23\xdc\x62\xfb\x0b\xe9\xde\x94\x2a\x1f\xcf\x43\x4d\x4b\x85\x09\x69\x7c\x58\xab\x7b\x95\x82\x8c\xa8\xa2\x5d\x56\xac\x55\x59\x4d\x11\x4f\x98\x90\x4f\xd8\x20\x16\x0c\xdd\x08\x94\xea\x58\x4c\x37\xc5\x40\xeb\xf5\x3c\x56\x26\xcc\x56\x56\x03\x2e\x9d\x08\xf6\xe6\xb3\x45\xb6\xea\x67\x23\xc0\xa5\x7b\x64\x1b\x2b\x0d\x6f\x4b\x2d\x40\x42\xfb\xd5\xd6\x16\x0e\xdd\x84\x98\x59\xe0\xce\x8a\x21\x7b\xb4\x8a\x5d\x73\x2f\x89\xe9\xc8\xea\x97\x68\x60\xbb\x1b\x24\x29\xdc\xc3\xcf\xb2\x6d\xf8\xe7\x82\x0e\xb5\x9f\xb8\x48\xb3\x63\xf7\x4f\xae\x3e\x5f\xb0\x4b\xf7\xa8\xfb\x82\x2b\xcb\x92\x10\x92\xba\x61\x2f\x75\x13\x5f\xf5\x97\xfb\x53\x77\x97\x15\x25\xb5\x40\xd2\xbd\x33\xf9\x00\x8a\x06\x94\x49\x31\x88\x74\x6f\xb3\xa0\x87\xd4\x0e\x7a\xc8\xd6\xf9\xf4\x82\x2a\x4d\x37\x74\x8f\x20\x26\x3b\xda\x11\x11\xf6\x62\x53\x57\x9c\xd5\xd5\xad\x0d\x5b\x0c\x95\xa1\x0e\x5d\x71\x89\x17\xac\xd9\xd4\x51\x05\xcc\x0a\xba\x31\x29\x1b\xaa\x47\x47\x84\x9b\x20\x8e\xbb\x7d\xe3\xd9\xf4\x47\xa8\xe3\xe1\xc5\x02\xa5\x3a\x13\x09\xd1\x53\x14\x71\xc2\x8a\xf6\x59\x61\xa0\xaf\x44\x16\x5d\x69\x72\x4b\x29\x96\x3f\xbf\x9b\xb2\x9c\x35\x3e\x71\xc4\x5d\xc9\x6e\xe5\xd3\x98\x4b\xc6\xcd\xd1\xc2\xce\xd6\x1a\x50\xc7\x29\x3b\x29\xcf\x61\x40\x73\x17\x5e\x02\xf5\x93\xa3\xd6\xc1\x51\x41\x3e\xa2\x19\x8a\x05\xa4\x2e\xa7\x13\x06\xa9\xab\x2d\x44\xbd\x23\x52\x1e\xe6\xe7\xae\xa4\xc3\x13\x3a\x61\xae\x8c\xdf\xc4\x33\x26\x9e\xd2\x84\x21\x0c\x21\x39\xd1\x96\x45\xd9\x81\xc0\x4a\xef\x8f\xae\x2b\x24\x1f\xd1\x2b\x81\xe2\x0b\x76\x89\x21\x2c\xfa\xf3\x0e\x7d\xd5\x27\x60\x21\xac\xc4\x69\x08\xe0\x20\xad\x0d\x60\x1d\xc6\xa8\x53\x84\x1c\xa9\x3f\xfb\xea\x8f\x15\x08\xa9\x0f\xca\xe7\xd1\xfb\xe1\x43\x48\x49\xa8\xbb\x07\x28\x79\x25\xac\x28\x98\x0f\x95\x58\x8e\xc2\x4d\xce\xf4\x44\x23\x13\xa5\xa8\x0a\x35\x62\x57\x02\xa9\xb5\x4b\xad\x1e\xca\xe4\x5b\x94\x9b\xfc\xa7\x95\xed\x79\x43\x26\xab\x92\xc9\xd6\x92\xc9\x80\x5b\xa1\x12\x33\xed\x1a\x9f\x99\xe3\x05\x9a\xec\x50\x4d\x2d\x19\xc9\x31\x83\x58\x3d\x06\x71\xff\x0e\x22\xd5\x84\x78\x7d\x13\x9e\x73\x92\x76\x4d\x3b\xa8\x4e\x27\x40\xbe\x72\xe4\xa8\xa2\x0e\x46\x27\x18\x09\x37\x78\x94\xb5\x2e\x52\xeb\x4e\xac\x5a\x17\x43\x04\x8a\xad\x21\x22\x12\x9e\x73\xe2\x41\xa8\x9d\x2a\xe1\x4e\xb3\x89\x72\x22\x88\x3e\x69\xbd\x83\x4d\xf3\xe1\xbb\x58\x19\x94\x23\xd8\x8f\x94\x25\xf2\x88\x47\x13\xbd\x03\xf0\x5c\xd0\x09\xeb\xad\x7c\x5b\x89\x29\xb2\x62\xa9\x38\x74\xd8\xee\x83\x03\x0f\x5b\xd1\x3c\x1f\x04\x32\x9e\x58\x24\xb3\xd3\x35\x76\xe4\x35\x45\xf8\x3e\xd5\xda\x49\xda\xf3\x7c\xf4\x5d\x20\x8a\x41\xef\xb4\x76\x8a\x49\x56\x3b\xe5\x47\x38\x88\x1e\xd2\x30\x5a\xfc\x68\x65\xa5\xa3\xd4\x35\xcf\xfc\x34\x48\x52\xb2\x63\x47\xe8\x7f\x11\xe5\xe6\xfc\x19\x53\x5d\x33\x8e\x43\xdd\x22\x77\xa4\x16\x61\x97\xce\xe7\x23\xd4\xc1\x8b\xb5\xb1\x92\x2c\x86\x2b\x56\x09\x27\xc3\xf7\xa2\xd9\x1c\x45\x89\x8c\xc5\x9d\x3b\x8c\x91\xc0\xc0\x91\xc9\x00\xa1\x5b\x7a\xbe\x76\x17\x78\x35\xb6\x1c\x95\x32\x44\xce\x24\x95\x4c\xfb\xcc\x1d\xb0\xf0\xc2\x89\x58\x9b\x9d\x61\x33\xd2\x4c\x07\x58\x87\xf7\xbe\xee\xce\xb7\xbd\xf3\x0b\x58\xb1\x63\xe2\x57\x83\x95\xe1\xe3\x6a\xb6\x32\xbb\x05\x3d\xf3\x8f\x7f\x22\xec\x7d\xff\x6a\x10\xcb\x29\x47\xb5\x60\x3d\x3b\xb5\x29\xbe\x7f\xc5\x51\xaa\x83\xcb\xca\xfc\xa6\xcb\x3b\x40\xa2\xba\x03\xa4\x4f\x62\x5b\x84\xca\x35\x7b\x3f\x26\x9c\x6f\x55\x0c\x45\x76\x3e\xc8\x3e\x21\x25\x70\xef\x56\xea\x5d\x7c\xff\x58\xda\x7b\xf1\x6f\x32\x5e\x5f\x11\x99\x2b\xf0\xfd\x77\x61\x8f\x8b\x09\xa2\xcf\x03\xe0\xdc\x21\x93\xd9\x26\xed\x93\xbb\x57\x7d\x35\x57\x04\xe2\xbd\x63\x8e\x94\x4c\xc3\xfe\x35\x47\x63\xbd\xe1\x67\x26\xb1\x8e\x08\x16\xd5\x88\xe0\x32\x08\xef\x4d\x5d\xb4\xe4\xab\xd5\x85\xb8\x44\x18\x5e\x6d\x8a\x09\x96\x64\x39\x98\xe5\xa3\x70\x93\x50\xc4\xe3\xb1\x86\x84\x57\x8b\x7a\x50\x65\xb5\x65\x3a\x8c\x52\x22\x6c\x9d\x47\x90\x1b\xe2\x3d\xd6\x93\x9b\xd5\xfa\x86\x0d\x94\x19\x96\xff\x3c\x8f\xa7\x44\x66\x8d\x50\xb8\x3f\x0b\xf2\xab\xdc\x35\x85\xc6\x1b\x12\xd1\xa2\x87\xa5\xd7\x2f\x26\x5e\x37\x6c\x36\xe3\x43\x6a\x16\xd1\x48\x69\x33\x65\x02\x00\x65\xde\x13\x7e\x11\xb7\x5a\x7a\x23\xec\x42\xb4\x5a\x97\xcd\x26\xea\x78\x84\x44\x3d\x24\x5b\x2d\x60\xa4\x83\x7d\xc4\x5a\x2d\xd0\x19\x22\x08\x41\x07\xbb\x7b\x8f\x1e\x35\x23\xdc\xab\x95\xf3\x3b\xe5\xfe\xf7\x77\x14\xf6\x84\xdf\xee\x64\x11\x5e\xf0\x73\x53\xc4\xd9\x61\x61\x16\x55\xab\x90\x55\x4a\x71\x8f\x23\xe9\x26\x69\x90\x48\x65\x98\xec\x60\xdc\x13\xad\x1d\xbf\xdd\xf1\x39\x92\x17\xe2\x12\xf7\x9c\xbf\xb8\x76\xd7\x5e\x88\xcb\x5e\x7b\xc7\x17\xad\x8e\xfa\xda\xee\x2c\x30\xbc\x16\x9b\xd2\x3b\x54\xea\x51\xda\xcd\x02\xc3\x37\xb1\x32\xc3\x42\x97\x97\x56\x18\xcf\x15\x39\x59\x4d\xab\x60\xf6\xaf\xe5\xe1\xde\xa3\xf9\x7c\xff\x61\x99\x9c\x8d\x97\x5a\x15\x86\x97\x62\x63\xe6\x0c\xaf\x5b\xf6\x4b\x57\x94\xca\x69\x8d\xd8\xf6\xde\x23\xbd\x3d\x77\xe8\xcd\xe7\xfc\x90\xa4\x99\x27\x8e\x11\xfe\x07\x6b\xa5\x8b\x22\x26\x47\x98\x71\x78\x2f\x36\xa4\x8d\xf0\x56\xb6\x8d\xad\x6a\xdb\xde\xa3\x7f\xb2\xf9\x9c\xfd\x73\xff\x21\x8e\x06\xe8\x60\xdf\xfc\x7a\xe8\x69\xfd\x90\x1d\x3e\x7e\x38\x9f\x77\xbc\x9d\x43\x96\x91\x23\x49\xe7\xe0\x0f\xd9\x62\xed\x47\x0f\x8d\x5f\xaf\x78\xb1\xbf\xdf\xad\xbe\xd8\x7b\x54\x12\xcd\x75\xa8\x61\xf7\x57\xcc\x9f\x5a\x39\x19\x34\x43\xd3\x43\xaf\x97\xcf\x00\x9f\xb6\x78\xe9\xf7\x0e\x33\xe7\x4c\x5c\x9b\x06\xad\x16\xee\x2a\xa6\x8f\x7b\x88\x91\x0e\x48\x93\x29\x66\x89\xe9\x63\xdc\x6c\x2a\xd8\x45\xc1\xe6\x34\xe3\x70\x93\x99\xa7\xd2\xbb\x76\xb4\x61\x4d\x50\x9a\xb8\x0e\x4e\x38\x9b\x35\xbe\xbc\x7d\xf3\x52\xca\xe9\x07\xa3\x86\xa8\x91\x83\xbb\x01\x92\x84\x62\x65\x2d\x2f\xef\x41\x4f\x45\x3c\x14\x2c\x49\x9c\x8a\x44\xc9\xdb\xf8\x34\x9e\x4c\x53\x49\x83\x31\x6b\x36\x5f\xa9\xf9\x42\xd1\x7d\x48\x7d\xa5\x0c\xd0\x3e\xeb\x43\x18\xf8\xdc\x95\xb1\xa4\x63\xb3\x1a\xac\x08\x32\x70\x98\x10\xb1\x70\x2a\x31\x7f\xe8\x9a\xa3\xe9\x60\x6d\x09\x69\xd4\xa3\xe5\x32\xb3\xf5\x65\x14\x41\xb5\x02\xab\xcc\xbc\x15\xc7\x32\xc2\x03\xbd\xeb\x9f\x4c\x63\x9e\xb0\x8f\x1f\xde\x40\xf0\xde\xbf\x0f\x07\x3e\x77\x13\x49\x65\x9a\x40\xf8\xaa\x78\x3e\x67\xb7\x72\x01\xe1\x6c\xc5\xf1\x99\x20\x36\xc9\x4f\xca\x04\x6f\xe5\x54\xe0\x59\xfa\x18\xe7\x2f\xf1\x17\x77\x30\xac\xce\x64\x03\x14\x42\x63\x94\x28\x13\x2e\x67\x42\xc7\x6f\x38\xb8\xeb\x1d\xc6\x5a\x6f\x0b\x33\x89\x15\xf1\x21\xf2\x20\x56\x1a\xb4\xfd\x6a\xa7\x15\x63\x10\xe4\x0e\xbd\x1a\xd8\xb9\xbf\xcb\x35\xe2\x56\xa2\x3b\x65\xd5\xf6\x68\xcb\x01\x9d\xb1\x81\xfa\x14\x2f\xf4\x11\xd9\x22\xf4\x0c\x71\xb5\xbc\x1e\x8d\xc7\x1f\xb2\x5e\x79\xc9\x68\x9f\x89\x04\x61\x0c\xc1\x23\xab\xb7\xcc\x71\x2e\xbd\x37\x69\xfa\xe7\x70\xc7\xf3\xe6\xf3\x5d\xcf\x3b\x24\xf9\x2b\x5c\x88\x45\xa5\x9a\x93\xb2\xb0\xea\x4b\xb8\xe6\x68\x34\x50\xeb\x74\x57\x10\x81\x64\x4d\x6b\x38\x36\xb1\x7f\x3e\x5a\x5b\x78\x86\x26\x03\x93\xab\x4c\x2d\x9f\x88\x43\xea\x86\x53\x97\x6a\x95\x52\x8a\xbb\x7b\xee\xc6\x53\xc6\x51\xea\x86\xaf\xd5\xa7\x03\xd8\xf2\x96\x33\x5b\x68\xde\x1a\x0e\x14\xd4\x81\x42\xb3\xb5\x3e\x13\x4e\x38\xeb\x4a\x37\xe8\x9a\x84\x74\x3a\x88\x24\x9b\x69\xa6\x8b\xd4\x0c\x31\x89\xd3\xdc\x40\x59\xda\x05\xb9\x77\x53\x2d\xf8\xa6\x6e\x00\xdc\x9d\x45\x72\xf4\x54\xb0\x3e\xe3\x32\xa2\xe3\x44\x7d\x78\xac\x66\xa9\x70\xc3\x0e\x56\x16\xb3\x9b\xcd\x00\xf5\xa5\xe3\x52\xd3\xb0\x3c\x75\x41\xea\x06\x8f\xca\x28\x8c\x84\xf1\x3e\xba\x1d\x20\x86\x7b\x68\x05\x3d\x4e\x66\x3e\xb7\x15\x05\x8e\x39\x48\xcf\xdc\x00\xfb\xfa\xc9\x56\x42\x5c\x1a\xc4\x42\x22\xbc\xa8\x6b\x3b\xd5\x50\x53\x0f\x02\x5f\xba\x01\xd0\xca\x14\xe0\x44\x89\x86\xd2\xf9\x36\x43\xc7\x83\x22\x3d\xbd\xa6\xfb\xad\x58\x93\x32\xc7\x19\x3a\x5d\xee\x86\x6f\x9a\x4d\x24\x5b\xc4\x99\x38\x6a\x7e\x87\x61\xf6\x33\x72\xcc\x38\x96\xec\xfb\x81\x0d\x8f\x6f\xa7\x9a\xaa\xe5\x91\x3c\x96\x4a\xb3\x7e\xb2\x5e\xe7\xe7\xe9\x78\xac\x0d\xbe\x49\x56\x72\x73\xb6\x5a\xc8\x76\x11\x3d\x35\x33\x4d\xa8\x49\x9e\x1e\x08\x62\xd2\xee\x74\x69\xab\x75\xc8\x9b\x4d\x1d\x0e\xcb\x6e\x59\x88\x42\x8c\x9b\xcd\x78\xcb\x86\xec\x96\x08\xa3\x22\x82\xab\xdd\x81\x41\x16\xae\x12\xa9\xe9\x1d\xe5\x7e\x7e\xc2\x2e\xa2\xcb\xee\xe0\xa2\xdd\x8e\x2e\x49\xa0\x14\xe7\x40\xab\xcd\x69\x9e\x21\xf0\x3a\x00\x76\xe1\x5d\x02\x33\x22\x02\x28\x3c\xd3\x19\x01\x4c\x30\x4a\x5e\x69\x31\x9b\xcb\x57\x44\x42\x96\xbb\x50\xda\xcb\x8a\x49\xc4\x95\x79\x90\xbc\x22\x95\x5e\x6e\x00\xd5\x74\xcd\x68\x80\xc2\x56\xeb\x9f\x24\x2d\xb4\x10\xcb\xf1\x42\xc5\x50\x6b\xe7\x79\x18\x47\x7b\x17\xf2\x4c\xd4\x42\x35\x52\x14\xe9\xa3\x0a\xc8\x0b\x71\xd9\x95\x17\xed\xb6\x0e\x67\xbd\x95\x88\xe9\xc6\x16\x69\xfe\x75\x73\x39\x94\xe0\x4b\x55\xec\x5c\x42\x08\xcf\x0a\x55\x1f\xc3\xf3\xcd\x9a\x90\xcc\xc7\xd4\x1e\xcb\xca\xc8\x76\x79\x21\x93\xc3\x7c\x58\x19\xce\xf6\xcf\x8c\x06\x52\x04\xb0\x9a\x83\x41\x14\x32\x79\x8d\x71\x15\x71\xe1\xdd\xaf\xc1\x63\x0c\x2b\x07\xe6\xdd\x5a\x0b\x98\x37\x37\x65\xbf\xdb\x98\x5c\xef\x7f\x6c\x28\x29\x0e\x0f\xf9\x02\x77\x4f\xd6\xd6\x2b\xfe\xf9\x4f\xbe\x21\x89\xf2\x3f\xf5\xe7\x6e\x25\x77\x23\x5b\x77\xd4\xb2\x38\x4b\xf6\x8a\x4b\x26\x6e\xe8\xd8\xb6\x82\x5e\x71\xc4\x36\x9e\x24\x2b\x0a\x09\x5c\x3b\xeb\xfb\xc2\x8a\x6e\x6f\xcc\xd0\x53\x09\xce\x5f\xbc\xd1\x68\x34\x1c\x98\xa1\x57\xfa\x97\x03\xdc\xde\x0f\xf9\x64\x97\xb8\x43\xa7\x72\x6d\x03\x5b\x4a\xdd\xf7\x94\x24\x2b\x0a\x7f\xb5\x0b\x3f\x7e\x78\x48\x10\x27\x27\xfa\x18\x57\xb3\xc9\x0f\x49\x67\x67\xa7\x84\xfd\xd3\x82\x2d\xc0\x0e\xc9\x63\xaf\xd9\x3c\xd8\x3f\x24\x96\x3f\x94\xcb\x95\x90\xfb\x0f\x9b\xcd\xbd\x47\x15\x48\x61\x41\x1a\x62\xe6\xf3\x3f\xcd\x3f\x1a\x89\x75\x1d\x88\xac\x64\x2e\x28\xaf\xc8\xa8\xbc\x77\xa9\x75\x33\xc5\x9a\x12\xd4\x7a\xef\x38\xda\xa6\xd1\xf3\x43\x92\x37\x10\x4b\xb2\x0d\x91\x24\xa8\xe2\xa9\xb0\x0e\x32\x0b\x37\xd4\xa9\x8b\xfb\x90\xae\x63\xb3\x3b\x14\x4b\xe0\xee\x76\x4f\xfa\xa9\x8e\x9d\xa6\x6a\x84\xed\x8f\x29\xe8\x7f\x0c\x62\x63\x7e\xac\x39\x33\x68\xa2\x6a\xdb\x3b\x7a\x87\x6c\xbb\x9a\x70\x57\x69\x5e\x8a\x0c\x0e\x94\xdc\x69\xf7\x7a\x00\xd2\x0d\xe1\x0e\x45\x1a\x33\x48\x97\xe9\x74\xcb\x29\x08\x42\x41\x12\xa6\xcf\x7f\x0c\x56\x69\x8e\x0d\x5d\x68\xad\x1d\x38\x43\xa1\x04\x93\xbc\x58\x1b\x1e\x70\xa2\xb8\x08\x82\x95\xa8\xb8\xd2\xd3\x7f\x8d\x8e\xdb\x98\x12\x49\x3a\x30\x96\x64\x07\xfa\x92\x78\x70\x23\xd7\xca\x8e\x05\x86\xed\x95\xba\x6f\x91\x5f\x02\x26\x72\xd3\x59\xe3\x22\x83\x30\x86\xd1\x06\x40\xcf\x06\x1c\x6e\x00\xec\xd8\x80\xd3\x35\xa4\x65\xa7\x09\x61\xb6\xe6\xfb\x4e\xf6\xfd\x4e\x92\x6f\x70\xfb\x0b\x24\xc7\x92\xa8\x7a\x17\x70\x2d\x09\xe7\x70\x24\xc9\x17\x38\x93\x64\xca\xe1\x6a\xf5\x80\xb4\x1c\x67\x01\x4f\xe5\x7a\x8f\xe4\x57\xe0\xf0\xd9\xa4\xb0\x85\x57\x6b\xe1\x9e\xa1\x19\xfa\x04\x26\x07\x0d\x86\x53\x49\x36\x32\xed\x96\xac\x67\x88\x66\x3a\x55\xaf\x61\x58\x7d\x85\x40\x9e\x2d\xda\x66\x50\xf8\x2e\xc9\x4f\xf8\xf0\x0b\xe4\x9d\x3c\x2b\x77\xa1\x92\x67\x35\x70\x48\x89\x68\x77\x74\x0d\xa1\xd4\xe5\xba\x9c\x30\x10\x24\x05\x49\xa8\xaa\xe0\x8b\x5c\x3f\x73\x3f\x98\x99\x73\xa2\x1a\x78\xbe\x01\xee\xbb\x82\x9b\xa1\x2f\x12\x3c\xf8\x24\x90\xc0\xed\x0e\x36\x69\xc4\x4f\xaa\x43\x90\x07\xf7\x5a\x26\xb5\x67\x25\x0b\xaf\xe4\x16\x15\x3d\xe1\x77\xbc\x9d\xbd\x3f\x90\x68\xeb\x0f\xb8\x55\x29\xd8\xc1\x6d\x9d\x88\xb2\x75\xb0\xbf\xbf\x7b\xb0\x80\x8f\x6b\x66\xf2\xa9\x84\x50\x66\x13\xeb\xcd\xdf\x21\x47\xbb\x36\xeb\x34\x9d\x22\xae\x54\x34\x9e\xad\xf7\x1d\x8c\x7d\xf3\xaa\xc5\x2f\x3a\xe5\xfb\x1d\x8c\xb5\xba\x03\xcf\xd6\x75\x9b\xf3\x17\xff\x8b\x23\xa7\x75\x25\x11\x6f\x75\x70\x0b\x39\xb8\xe1\xb4\x5e\x08\xf4\x59\x66\x49\x52\xe0\xf3\xca\x16\xcd\xd0\x4f\xd5\xdd\x27\x78\x01\x3f\x97\x91\x73\x3f\xe3\x8b\x15\xd9\x8d\xec\x23\x50\x5c\xf3\xde\x92\x17\xf6\x4d\x9e\xeb\xb3\x63\x12\x45\x94\x61\xb5\x45\x2a\x09\xe0\xd6\xed\x22\xe8\xab\x40\x6a\x01\xa0\x66\xa5\x12\x6a\xa5\x9c\xa1\x6b\x09\x42\xaa\x0e\x47\x4a\x6b\x52\xba\xb6\x66\xbf\xb4\xe7\xb8\x4e\x4b\xfa\xce\xc5\x3f\x74\x26\xb4\x7f\x5c\x3a\x86\xe3\xa9\x62\xc8\xe2\x88\x48\xa3\xd8\x88\xcf\xa3\x0d\x02\x08\x89\x73\x61\xfa\xca\xa5\xb8\xe5\x5c\x3a\x36\xde\x70\x13\x96\x1d\x3f\x3f\xa5\x43\xf5\x61\x11\x37\xd0\x53\x26\x76\x83\xfc\x54\x58\x44\x90\x70\x83\x9e\x73\x3e\x62\x8d\xd7\x49\xcc\xdd\x67\x2c\x8c\xfb\xcc\x8d\x39\x3b\x1d\x34\xa8\x6c\x5c\x25\x31\x77\x5a\x46\xfd\x70\xe0\xa3\x1e\x1f\xdf\x59\x02\x75\x70\xcb\x69\x0c\x68\x34\xd6\xf9\xde\x1a\x72\xc4\x1a\x83\x78\x3c\x8e\x67\x26\x5b\xd5\x95\x44\x9f\x04\x8a\xb1\x82\x9a\xd1\xbb\xc4\x77\xba\x35\xc5\x46\x29\x33\xba\x41\x11\xcc\xd0\xb9\x84\x67\x52\x47\xdf\x2c\x38\xa1\x84\x91\x58\x5f\xec\x92\x12\x61\x35\x31\xd3\x42\x9d\x0f\x94\x37\x22\x2e\xe3\x06\x5d\xd1\x02\x9d\x36\x8f\xc7\x8d\x69\x9c\x24\x51\x10\x8d\x23\x19\xb1\xc4\x69\x99\x46\xaf\x6f\xdf\x96\x53\x6e\xff\x86\x7a\xe0\x63\x9d\xc8\x3e\x1b\xf8\x88\xe8\xf2\xef\x44\x1c\x8c\xd9\xc4\x54\xa2\x9a\xac\x37\x59\xd7\x61\x6d\x39\xbe\x6a\xa6\xd6\xe0\xfc\xe5\xb2\xc3\xe8\x86\x71\x83\x41\xc3\x39\xb8\x85\x5e\x08\x34\x43\x67\x12\xf6\x40\x77\x5d\xf6\x3a\x54\xa2\xeb\xb5\xdc\x70\xb0\xd8\x5e\xb4\x20\xf4\x25\xf4\xf5\x21\x6c\xf8\xa6\x0f\x7b\xbf\x94\xe4\x07\xbc\x5f\x2b\xd2\x9e\x23\x81\x1f\x3c\x37\x46\xe4\x0f\x49\x5e\x4a\x34\x43\xef\x25\xec\xc0\xee\x0e\xc6\xf0\x56\x92\x5b\xf4\x5a\x09\xbb\x1f\x12\xbe\xa9\xff\x30\x3c\x91\x24\x82\xe7\xeb\x17\x6a\x7d\xac\x1a\xa2\xb5\xca\xb8\xa9\xeb\x9d\x24\xc7\xf0\x42\x92\xb7\xf0\x69\xf5\xda\x95\x67\xaa\x85\xaf\x6b\xab\xca\x2f\x71\xe8\xe9\xc4\xe0\xf0\xa7\x24\x2f\x81\x33\x72\x03\x82\xad\x76\x54\x77\x73\x83\x7d\x86\x38\x83\xdd\x1d\xe0\xc6\x8f\x15\x80\x34\xf3\x2b\xeb\x4a\xe9\xd2\x45\x9e\x5d\x8f\x95\xab\xd9\x47\x2d\x32\xcc\xaa\xa2\x8d\x74\xf9\xab\x7a\x5e\x4a\x24\x1e\xec\xee\x14\x72\xa6\xf0\x7b\x97\x04\xb8\xb4\xcb\xc9\x0c\x09\xa6\x25\x5d\x8e\x99\x2d\x63\xd6\x09\x09\x59\x8e\x79\x77\xe7\x0f\xe1\x32\x60\xe4\x45\x36\x62\xbb\x3b\x20\xdb\x1d\x8c\x81\x13\xde\x53\x3c\xe8\x0e\xb1\x2f\xdc\x21\x28\xec\x52\x61\x57\xa5\xbb\xc5\xd9\xf0\xd7\x12\x3e\x29\xa8\x01\x6e\x49\x98\xa1\xaf\x3a\x74\xfe\x8f\x1f\x12\x6b\xc8\x32\x63\x9f\x0d\x99\x71\x81\xfe\x8c\x21\x65\xe4\x0c\x28\xdb\xb0\x13\x93\x2d\xda\xe2\xd0\x2b\xdb\xcd\x18\x6c\x75\xe0\x7e\xe8\x33\x60\xbe\x7c\xb0\xbb\x33\xf7\x60\xe0\xa7\x79\x6a\x60\x92\x69\x56\x77\xe8\x89\x6e\x93\x76\xc0\x74\x95\x6a\x29\x88\x68\xab\x46\x12\x09\xcc\x8c\x16\x05\x86\x21\x25\xa9\x3e\xb1\xbc\xb2\xc3\xbc\x43\x5e\x9c\x22\xfd\xf7\xdd\x1d\x60\x44\xe3\x95\xc0\xdb\xd2\xda\xdf\x4b\x10\x65\xaa\xa6\xb6\x54\x35\x70\x38\x81\x32\xdc\xe3\xad\x5c\x60\x88\xd9\x0a\x1e\xdd\xe2\xee\xf6\x02\x22\x46\x06\x1c\x06\x8c\x04\x1c\x02\xb6\x59\x81\x4b\xaa\xdf\x57\x2c\x5c\x39\x67\xd7\x02\xb4\x3a\xb5\xc0\xac\x9d\x42\x5e\xe5\x97\x9b\x2d\x16\x30\x66\x24\x95\xd0\x67\x64\xf3\x39\xff\x7b\xba\xef\x53\x08\x94\xb0\xa0\xd2\x67\x10\x24\xbe\x84\x60\xe6\x73\x08\xdf\xfb\xba\x2f\x6f\x18\x11\x1c\xb6\xd9\xe6\x79\x39\x61\xe4\x4f\x18\xb1\xb5\x72\xe0\xb0\xd3\x13\xfe\x1d\x9a\x28\xd6\xdb\x36\xa7\x14\x15\xd7\x0c\x19\x49\x39\x4c\x37\x96\x73\x1c\x53\xd0\x83\xec\x8e\x96\x19\x5b\x73\x7a\xca\xd3\x61\x32\x15\xad\x06\x18\xd9\xdb\xd5\x67\x15\xf6\xf6\x09\x91\xbd\x8e\xef\x41\x4a\x58\x37\x2d\x23\x9b\x5a\xad\x32\xfc\xb6\x96\xfa\x5d\xc7\x44\x66\xbb\x54\xc5\x61\xae\x63\xd9\x15\xa4\xe3\xfd\x21\x5a\xd4\xda\x8d\x49\x09\x61\xbd\x63\xe9\xdf\x4a\x64\x6a\x6a\x0b\x5f\xe0\x05\xdc\xad\x9b\x12\x0a\xb7\xaa\x64\x3e\x9f\xa1\x1b\x06\xce\xff\xe5\x40\x8a\xad\x3a\x0c\x45\x33\x34\x64\xe0\xf8\xea\x9b\xa6\x26\x5b\xc6\xa9\x5e\xc6\xab\xc0\x21\xa1\xfa\xd8\xe7\x8c\xa1\x19\x1a\x31\x08\x5b\x1d\x9d\x08\x28\x93\x36\xb4\xb4\x1c\x8f\x65\x97\x92\x22\x37\xd6\xad\x44\x03\xd4\x67\x5a\x95\x9d\x32\x13\x4c\x4c\xf3\xc3\x2d\x8b\x25\xa0\x14\x8e\x65\xf1\x15\xc3\x2d\x5b\xb9\x18\x99\xd6\xb1\x7a\x83\xd2\xac\x41\x0f\x1c\x30\x0e\xa8\x54\x35\x28\x25\x69\x99\xaa\x2b\x41\x77\x86\x96\x91\x62\x54\x86\x35\x46\x4d\x59\x6a\x13\x94\x81\x29\x4c\x79\x9e\x0c\x38\x66\x2b\xec\x05\x43\x89\xac\x53\xc2\x32\x4a\x7a\x0e\x18\xcd\x8f\x99\x0c\xa6\xac\xa4\xe4\x16\xdd\xaa\x2a\x6e\xa5\xe9\x4f\xd6\xea\x80\xc4\x8a\x1e\x4d\x0d\xd3\xfe\xda\x2a\xa8\xe9\x19\x45\xca\xf5\x4a\x09\x44\xa5\x95\x7a\x36\x23\x44\x66\x84\xfc\x5b\x9e\x5f\x57\x59\x4c\xf7\x92\xc8\x92\x90\x3b\x74\x6c\x13\x22\x5b\x1d\x9d\x67\x4d\x93\x21\xed\x9d\x8e\x0c\xf0\x38\xbb\xee\xee\x88\x11\xc9\xe1\x8c\xad\x56\xa7\x8f\x18\xe8\x0b\xbc\xfc\x07\x0f\x1c\xe0\xb8\x37\x43\xd7\x6a\x92\xe9\x2a\x1e\x02\xc7\xd8\x2f\x61\x92\x0a\x50\xc7\x00\x3d\xd2\x40\x4a\xdb\xbf\x5a\x9e\x91\xdd\x2e\xf6\x16\xf0\x94\x91\x63\x0e\x7f\x92\xa7\x4c\xcd\xc5\x57\x6b\x79\x85\x95\xa9\x67\x98\x66\x62\x66\x0e\x9f\xd0\xfc\xbd\x61\xee\x98\x50\xf3\x3e\x36\x83\x15\x6b\xd8\xd8\x82\x35\x10\x56\x3a\x96\x14\x66\x3a\xe0\x6c\xa6\xe3\xb9\xf4\x2d\x0d\x2e\x85\x7d\xcf\x3b\x94\x3d\x6d\x24\x29\x32\x3e\x4a\xa5\xa0\xfa\xb7\xe8\x15\x33\x3e\x93\x56\x47\xab\xa0\x65\xcf\x2e\xa3\x12\x6b\xbe\x8a\xfa\xeb\xea\x8d\x0c\xa7\x6c\xfd\x46\x7b\x51\xbf\x67\x98\xe8\x7b\x85\x89\xec\x3b\x64\xee\xd0\x29\x83\xb5\x86\x7c\x28\xc1\xdc\x2e\x56\xb8\x56\x30\x7c\x60\xe4\x8c\xc3\x17\xb6\x2e\x64\x64\x86\x3e\xb0\x55\xfb\x6b\x4f\x19\x12\x3a\xe6\xc6\xa0\x39\xaf\x90\x2f\x75\xe8\xd9\x6a\x14\xe2\x77\x50\xcf\xb2\x4b\x7f\xf1\x42\xcd\x60\x53\xc5\x09\x23\xcf\x38\x7c\x5c\xbb\x26\x9c\x72\xa4\x31\x9e\x30\xd3\x46\x6c\xee\x2f\xd8\xe0\x67\xfa\xb2\xb2\x7a\x6f\x01\x48\x90\x19\xfa\xce\xe0\x63\x86\x0a\x74\xbf\x9e\x33\x14\x4a\x0c\x4f\x19\x3a\xc1\x90\xd9\xa4\x72\x43\x05\x9a\xb9\x17\x18\x9e\xf1\xf5\xbe\x95\x2f\x2c\x5b\xbd\xba\x6f\xb8\x7b\x4e\x93\x6b\x72\x1f\xf8\x7f\x42\xe8\xa7\x1c\xfa\xbe\xe4\xc0\xfc\x67\x1c\x06\xc5\xa1\xe4\x32\xa2\x88\x59\xca\x03\xfd\xd3\xdf\xea\x40\x10\xf9\x8e\x03\xc1\x55\x25\x8d\xea\xb3\x0a\x5c\xdf\xbf\x89\x21\xf8\x52\x81\xf8\x6c\x41\xa0\x19\x1a\xc6\x5a\x78\xea\x39\x3d\x8a\xa1\x03\x1c\xfb\x1c\xb7\x9c\x07\x74\x1a\x3d\xb8\xd9\xb1\xee\xfd\xfb\xc9\x36\xe6\x77\x7d\x5d\xfd\xbc\x5f\xfb\xfc\x8d\xad\xf2\x71\x95\x39\xf7\xaa\x9f\x77\x6b\x9f\xdf\xb3\x8d\xc9\x63\x7f\x54\x3f\xef\xd5\x3e\xbf\xdd\x5c\xf7\x93\xcd\x75\x3f\xdf\x8c\xfc\xdd\xe6\x76\xbf\xd8\x4c\xf9\xa7\xcd\x9d\xfa\x75\x33\xe5\x7f\x6e\x2e\xcd\xd3\x8d\x94\x8b\x74\x23\xe5\x32\xdd\xd8\x2d\xac\xfa\xb9\xf3\xb8\xf6\x3d\xdd\x5c\x9c\xa6\xf6\x46\xb5\xd6\xf0\x4d\xeb\x60\xb5\xd3\x27\x56\x16\xc2\x0c\x3d\x89\x33\x9f\xa3\xf1\xc8\x58\x57\x75\xa7\x9b\x6f\x15\xbe\x43\xaf\x35\x0a\xb3\x79\x5b\xde\x94\x9c\x56\x32\x89\xbe\x8c\xeb\x9b\x82\x43\xb4\xad\xa5\x82\xc4\xb9\xc3\x6c\xc6\xac\x0d\x61\xe1\x6e\xf7\x68\x8a\xfe\xf1\x8a\x9b\x1b\x23\x22\x2e\xd9\x90\x09\xbf\xe1\xfc\xa3\xc5\x5b\xff\x70\xfe\x81\x7d\x11\xe9\xcd\xfe\x62\xef\x2f\x45\x59\x1a\x6f\xd6\x37\x7e\x11\x89\x5b\xc8\x69\xf4\xa3\x61\x24\x13\xd0\xf9\xf1\x87\xb1\x34\x9f\x74\xdd\x4a\xf4\xc0\x9f\x31\xfa\x14\x23\xae\xb7\x03\xcb\xf4\x47\xa9\x6d\x79\x58\x17\x05\xa7\x88\x97\xa7\x5c\xa9\x3e\xf6\x10\x40\x48\xb6\xb4\xaa\xd9\x5d\xd1\x4b\xd9\x16\x16\xea\x47\x90\x02\xd7\xbb\xf9\xc1\x31\x70\x97\x7e\xd0\x7b\x16\x7a\x23\x91\xea\x9b\x9c\xcc\x6d\x8d\x61\x71\xfa\x0c\x49\x68\x77\x94\xf4\xa8\x0c\x11\xc5\xd8\xd7\x5d\x1e\x82\x07\xf7\xf4\x83\xf6\x42\x94\x79\x09\x02\x65\x5e\x1c\xfb\x4c\x8d\xb8\x4b\x17\xf6\x88\x04\x36\x63\xdc\x44\x68\x86\xc6\x91\x49\xef\xfa\x48\xf3\x87\xdd\xfe\xc4\x86\x55\x1d\x9b\x0f\x43\x9f\xde\xf9\xa6\x07\x2b\xdb\x63\x63\xbb\xbf\xd0\x0c\x6d\x47\xb0\x07\x1c\xcf\xe7\x5b\xfa\xb9\xe3\x79\xc0\xb5\x0b\x4f\x7d\xd0\x3f\xec\x4b\xee\xad\xc2\x08\xf1\x36\xe9\xe0\x07\x7b\x73\x0f\xb7\x11\x7f\xd0\xf1\xbc\xb9\x87\x5b\x88\x3f\xd8\xd3\x4f\x56\xda\xa2\x5f\xf2\xe5\x50\x35\xef\x6d\xac\x43\x4f\xcb\x74\x38\x69\x05\xe6\x36\xaa\x0c\x48\x60\xdf\x6a\x3d\x49\xab\xfa\x5c\x04\x77\x51\x65\x5f\x6f\x94\xfe\xe6\xa6\xe0\x1d\x3a\x8d\x74\xa6\x7d\x73\x17\xc0\x02\x43\x10\x57\x30\x0d\x6d\x4c\xbc\xe7\x48\x91\x32\xc7\x77\x06\x74\x9c\x30\x6b\xa5\x98\x56\xc0\xdc\xed\xf9\xdc\x71\xb6\xb2\x2b\xa5\x95\xa2\x58\x9e\x8e\xb7\x01\x7f\xaa\xb1\x3e\x8b\xe1\x75\x04\x47\x12\xf1\x96\x43\x9c\x0a\xb3\xdf\xa5\x6b\xb7\x07\x9d\xef\x4e\x76\x25\x6b\x75\x9f\xf0\xb6\x42\x08\x29\x59\xc9\x08\x2b\x5c\x9d\x18\x84\x57\x62\x47\xfe\x05\xb3\x62\x3e\xf7\x0e\xc9\x1d\x7a\x17\xaf\xcc\xf9\x2f\x6d\xda\x17\xa0\x2f\x39\xf8\x2f\x4f\xa4\x6e\x11\x72\x55\x76\xc3\x71\x95\x43\x7e\xa8\x4e\x70\xec\xfb\x77\xaf\xd3\xcd\xb9\xd9\xd3\x8d\xcb\xcc\x99\x3d\x31\xb8\xbb\xdd\xbb\x4e\xfd\xa3\x14\x9b\xab\x19\xca\xb3\x7e\x9b\xeb\x78\xba\xb9\x8e\x57\xd5\x26\xbc\x50\x4d\x78\x17\x59\xe8\x4f\xab\x00\xf9\x40\x1b\x3c\x76\xc6\x75\x1b\x6e\x90\xa2\xd3\xb4\x2a\x22\x3e\xd8\xf3\x75\x1a\xa1\x67\xe8\x62\x86\xa2\x08\x44\x84\xae\x52\xe4\x61\x0c\xdf\x53\x74\x9c\xea\x4d\x0a\xd0\x5f\xf2\xcf\x4f\xf5\x4b\x78\x95\xa2\xe5\xc1\x76\xfe\xfa\xcb\x8c\xf2\x3a\x80\x2d\x6f\x91\xe3\xfb\x0d\x54\xe6\x90\x60\x71\xbd\x2f\xbe\xb4\x5b\xf0\xa5\x36\xfb\x32\x77\x44\x75\x34\xce\xab\x40\x3a\x1e\x87\x67\x81\xec\x65\xa2\xf8\x1a\x50\xf9\xe5\x63\x6a\x6f\x20\x05\x5f\xca\xb8\xad\xf0\xb8\x85\x66\xa8\x3f\x00\xc7\x81\x19\xfa\x19\x01\x93\x70\x9e\x2a\x29\x3e\x00\x3d\xbd\x4f\x52\x78\xa7\xea\x3a\xc5\x18\xde\x47\x6a\x71\xd3\xcb\x64\x4b\xa8\x7f\xca\x1a\xde\x54\x6b\xb8\xd5\x3e\x9d\x73\xbd\x75\x73\xa6\xd3\x74\x7d\xd0\x93\xf7\xa5\x4e\x19\xfe\x1d\xf8\xf2\x66\x4e\x32\x28\x08\xa1\x38\x8b\xe6\xdb\x2e\x6f\x2e\xe3\x76\x62\x70\xad\xfe\x6b\xf2\x3e\xa6\xe6\xdf\x5b\x09\xdf\x22\xe4\x0c\xa2\xb1\x64\x42\x09\xa2\x6c\x43\x27\x24\x33\xf4\x39\x5a\xad\xa4\xcc\xd2\x5c\x7a\xe2\x05\x9c\x23\x0e\xcf\xd0\xc5\x29\x72\x92\x68\xcc\x78\xc8\xfa\x8e\x32\xdd\x87\xa9\xa1\x6a\x4b\x7b\x11\x30\x86\x53\xe4\x44\x7c\x14\x05\x91\x5c\x86\x60\x39\x04\x0d\xa5\x4e\x97\x64\x7f\xf6\x20\xcd\x3f\x0b\x16\xb2\xe8\x86\x09\x07\xa6\x86\x73\x4e\x91\x33\x14\x71\x3a\x75\x20\x54\xcc\x51\x2c\xf9\xde\xe1\x27\x81\x42\xdc\x73\x7a\xf9\xde\x44\x53\x81\xf8\x8e\x25\xbd\x9f\xa5\x99\xea\xd3\x36\xc7\xd2\xb6\xe7\xf3\xec\xa9\x6f\x3d\xb3\xb2\x2f\x79\xb7\x04\x66\x06\xc8\x3c\xd0\x72\x00\xfb\x7a\xfc\x18\xe4\xdb\xbe\x52\xa7\x7c\x97\x3a\xe5\xbb\x74\x59\xe9\xf5\x39\x8b\x94\x0d\xa2\x45\x6d\x08\xfa\xa7\x39\x36\xac\x63\x2d\x74\x12\x16\x86\xf3\xf7\x79\x62\xd0\x22\x0d\xb5\xae\x28\xca\x2a\x8a\x74\x45\x91\x1b\x82\x24\x88\x92\xc8\xed\x63\x55\x9e\x50\x97\x41\x48\xa2\x5a\xa5\x1e\x50\x37\x00\x9a\x57\xba\x44\x43\xec\x06\x10\xbb\x21\xc4\x6e\x1f\x62\x45\x83\xc4\x39\xa4\x22\x43\x68\x32\x4a\x23\xeb\xbf\xd2\x87\xfd\xbc\x0f\xfb\xcb\x7d\xa8\xba\x80\xe9\x06\xa6\xfa\x96\x5b\x4a\x98\x1b\x42\x48\x98\x22\x8b\xb0\xa5\x9e\x8c\xf4\x02\x37\x20\x2b\x7a\x53\xea\x85\xab\xec\xcd\xfc\xe0\xbe\xe9\x4d\xbb\x20\x23\x48\x5f\x61\x85\x75\xb5\x48\xcd\x1c\x86\x75\xe5\x5c\x57\xce\x75\xe5\x7c\xa9\x47\xf3\x20\x94\x8c\x16\x24\xd4\x80\xe3\x95\x83\xd9\x81\x08\x06\xc0\x96\x68\xb1\xfa\xf4\xe7\x92\x1a\xf3\x6d\x00\x49\x55\x41\x79\x5d\x85\x49\x82\x62\xf3\xbb\x34\x44\x6b\xea\x52\x00\xf7\x61\xa8\xec\xe9\xf0\x8d\xbf\xd5\x59\x54\x60\x5f\x56\x61\x8f\xb5\x02\x30\x51\x0d\xd1\xff\x2a\x8d\xa2\x3f\x80\xb3\x00\xbe\xa5\xc8\xb9\x68\x5f\xfc\xf5\xd7\xe5\xfd\x02\xe1\x3f\x5a\x3d\x17\xfe\xfa\xeb\xaf\xbf\xfe\xc7\xf6\xfc\xdf\xfe\xfa\x2b\xb9\x74\x30\x86\x3b\x74\x15\x68\x87\xe6\x59\xbc\xfa\x90\x4b\xf8\x72\xa1\x14\x1f\x25\xdf\xb1\x71\x38\xd4\x6d\x9c\xf7\x75\xa1\xed\xdc\xe7\xd3\x18\x74\x4c\xd8\x77\xa6\xe4\x17\xc7\xb8\xe5\x2c\x9c\xca\xa4\xfe\xb1\xd9\x1a\x7b\x5b\x5b\x9c\xeb\xab\xf3\x93\xda\xf7\x47\x75\x1b\xb9\xfa\xbd\x6e\x0b\xbe\xab\x7e\xae\x97\x7e\xf1\xfb\x0a\xea\x1d\xaf\x2a\xa8\xf7\xd5\x01\xfb\x64\x63\x7a\x91\x22\x23\x80\x39\x9d\x30\x07\x4e\x02\xa4\x46\xcd\x48\x47\x93\x2e\x29\x7b\xf7\x25\x13\xc3\xc9\x07\x36\x64\xb7\x0e\xbc\x55\x4b\x45\x30\xcc\xdf\x1e\xff\x48\xe9\x58\x75\x6f\x7f\x00\xe7\x81\x59\xd5\xde\x0e\x14\x0d\x03\x5c\x5b\x7b\xbf\xa6\xab\xc3\xcc\xfe\xb4\x56\xb3\x19\xfa\xa8\x18\xa1\xb3\xb7\xe7\xe1\xd6\xc3\xce\xe3\xbd\x83\x47\x4a\x42\x89\x43\xaf\x27\xda\x9d\xbd\x03\xef\xf1\x81\x2f\xf0\x03\xfd\xf4\x70\xee\xa9\x59\x6a\x5e\x3f\xfc\x43\xaa\xe9\xc7\xda\x88\xe9\xaf\xda\xe2\x60\x0f\x76\x0f\xf6\x77\x8c\x1d\x62\x5e\x3f\x3e\x98\x7b\x18\xab\xd7\xf3\x3c\xea\xf5\x9e\x7e\xf2\x11\x27\xac\x8d\x76\x0f\xf6\xff\x48\x5b\x28\xcd\x2c\x97\x34\xb3\x5c\x30\x6e\x23\xd4\xd9\xdf\xfd\x03\x09\x82\xf6\xff\xe0\xad\x1d\xfc\xa0\xb3\xbf\xab\x6a\xd8\xc1\x0f\xf6\xd5\xbf\x1d\xa0\xb1\xcf\x88\x68\x21\x71\xd8\xf1\x7a\xbb\x7e\xfb\x31\x86\xa0\xe3\xa7\xad\x3d\xcf\xfb\x43\xb6\xd0\xce\x21\xeb\x79\x7e\xc7\xb6\x62\x38\xad\xcc\x9f\x27\x01\xec\xc1\x0c\xbd\x0b\xe0\x45\xa0\xb9\x14\x39\x6d\xc5\xc0\x4f\x02\xd8\x81\x55\x9b\x60\xf5\x2d\xb0\x4e\x6d\x0b\x6c\xa7\xb6\x05\xb6\x5b\xcb\x78\xb0\x57\xcb\x8f\xb0\x5f\x3b\xe6\x7d\x50\xbd\xb1\xb0\xf1\xb0\x76\x5b\xe0\xa3\xda\x01\xfb\xc7\xb5\xeb\xad\x3a\x5e\xfd\xba\xaa\x4e\xa7\xbe\x0b\xd7\xd9\x59\x2c\xd0\x0c\xbd\xcc\x1b\x5d\x6d\xf5\x0c\x7d\xb6\x7a\xe3\xdc\x7a\xff\xd3\x7a\xef\x5b\xef\xbf\xad\x79\xff\xdc\x7a\xef\x66\xef\x77\x61\x86\x5e\x17\x15\xeb\xff\xb5\x9c\x6f\x96\x58\x10\xb4\xa2\x65\x0d\x4d\x6e\x50\x23\xee\x83\x01\x56\xcb\x50\xb9\x01\x71\x1f\x1e\xfb\x5a\xee\x9d\xfa\x5b\xa2\xd9\xd4\xf7\x91\xcf\xe7\xb2\xa7\x9f\x77\xfc\x5d\xbf\xa3\x7d\x8d\x6e\xf0\xc5\xf6\x22\xd9\x2c\xc0\x0f\xbd\x4c\x1b\xa5\x09\x5a\x39\xd1\xeb\x41\x3a\x5c\x5b\x58\xc2\x8a\xb9\x69\x9c\x22\xf1\x80\xcd\xbd\x1e\x6f\xa1\x2b\x69\x9e\x71\x0b\xc9\x96\xd3\x70\x30\xf6\x95\x84\xdd\x8e\x8c\x63\x7e\xa1\x27\xb0\x03\x3c\x52\x3a\x3e\x84\x09\xd6\x27\x28\x4a\x17\x96\x45\xdb\x56\xe1\x0d\x68\x36\xb7\x6c\x77\x40\xee\x1c\x30\xfe\x00\xcb\xbf\x65\x37\x4c\x4d\x2b\xe3\x12\x50\xda\xef\x20\x01\x0e\x7b\xb8\x9d\x3d\x75\x3c\x0f\xb7\xf2\xb7\x9e\x67\x53\x40\xe9\xff\x1f\xd9\xbd\x74\xca\xd9\xcd\xd3\xf1\x04\x1d\xc5\x89\xf7\xd5\x66\x79\xb5\x66\x75\x6a\xcd\xda\xa9\x35\x6b\xb7\xd6\xac\xbd\x5a\xb3\xf6\x6b\xcd\x3a\xa8\x35\xeb\x61\xad\x59\x8f\xea\xad\x7a\x5c\xbf\x5e\xae\xe3\x2d\x35\xb3\x63\x3b\x11\xad\x59\x84\x98\x52\x0a\xf4\xb0\x6b\x99\xad\x6f\x58\xe2\x6a\x46\xe9\xd7\x4a\x67\xd3\x02\x5b\xbf\x97\x05\xb8\x52\x95\x3a\x7b\x07\x1d\xfd\x9a\x55\x60\x0b\xe5\x5d\xc9\x59\xd1\xea\x78\xde\x1f\xbc\xb5\xf7\x87\x6c\x31\x97\xb6\x10\x73\x83\x5e\xc7\xb7\x3d\x4e\x91\x4d\x8d\x20\x8a\x38\x22\x08\xc7\x70\x4f\x13\x9f\xb7\x53\x8a\x84\x16\xdc\xc2\x0e\x44\x1e\x27\x4a\x2f\xec\x68\xf5\x90\x26\x16\xb6\x81\xcd\xe6\x8f\x0e\xf6\x98\x66\xf4\x87\x9d\xc7\x9d\x03\xfb\x6e\x97\x80\x56\x96\xf0\xa3\x04\x3a\xbb\x46\xee\x58\xde\x3a\x5a\x8f\xb5\xee\x6d\x4b\xe4\x9c\x8d\xe2\x74\xdc\xd7\xb7\xee\x06\xac\xc1\x26\x53\x79\xe7\x60\x7f\x86\x4e\x13\xb8\x91\xc8\xf9\x2c\x62\x3e\x6c\xbc\x3a\x3b\x7d\x74\xe0\x75\x1a\x83\x58\x4c\xa8\x74\x30\x4c\x6a\x76\xfb\xd8\x26\xe0\x03\x9a\xc6\x70\xff\x41\x09\x9a\x2d\x0f\xc3\x59\xfe\x70\x9e\x3f\xbc\x54\x0f\xef\x95\x25\xf5\x9d\x81\xa0\x5a\x3c\xda\x5e\x82\xbe\x8d\x8d\x6a\x71\xc5\x31\xde\x40\xee\xb4\xea\x78\xba\xa9\xc8\xd5\xf0\x58\x5b\x3f\xa7\x46\xa6\x7d\xa9\x46\x1e\x66\xb3\x43\xd6\x27\xff\x29\xda\xea\xc0\x96\x57\xcf\xb6\x69\x5e\x77\xea\x59\x79\x4e\xd1\x96\xa7\xa1\x6b\x7c\x6a\xde\xab\x45\x19\x61\x9d\xe6\xb0\x10\xe6\xc1\x40\xf5\x41\xea\x06\x18\x82\xa1\xcf\x21\x98\xf8\x42\xc9\x6f\x66\x7b\x21\xab\xcb\xc3\x55\x37\x1f\x3c\x6d\xa7\x04\x91\xee\x90\x77\x63\xa6\x48\x09\xe3\xc9\x74\xcc\x24\x6b\xd0\x7e\x3f\xe2\x43\x1d\xd1\xa6\x0f\x23\x29\xf3\xd9\x17\x6e\xd0\x9b\x4a\xd3\xdf\x37\xfa\x2a\x4f\x5f\x95\x7d\x6b\x00\x92\x06\x15\x4c\x27\x9e\x88\x04\xeb\xdb\x9e\xa9\x49\x95\xaf\xde\x27\xb0\xa3\xd8\xea\x16\xbd\x4e\xa0\xd3\x81\x8e\xfa\xa1\xd9\x6d\x27\x5f\xe5\x2c\xbf\xe7\x32\x4f\x76\xea\x3c\x39\xa4\x1b\x37\x58\xa6\xd5\x0e\x08\x4d\x02\x96\x8f\x66\x20\x4f\xb5\x1f\x82\x7e\xd6\x76\x50\xa2\xed\xa0\x9b\x42\xe1\x3a\xf3\x67\x68\x30\x86\xbe\xd2\x55\xe9\x19\x06\xfa\xb1\x78\x21\x31\xdc\xf8\x21\x24\xe6\x45\x38\x06\x8a\x81\x7e\x36\xbf\xde\x24\x6a\x46\x7f\xc1\x90\x62\x08\x42\x35\x22\xa7\xe6\x43\xa2\x03\x9b\x5e\x2b\x13\xc5\xf2\x9f\xda\x2d\x54\xc6\x23\xe2\x44\x8f\x19\x76\xb7\x7b\xd1\xd8\x57\xad\xab\xb8\x7d\xee\xe8\xc6\x1d\xa1\xdb\xcd\xbd\x71\x6c\x57\x37\x1e\xa0\xc9\xb8\x3a\x07\xaf\x37\x17\x3f\xa2\x9b\xdd\x88\x9b\x3f\x5f\x55\x14\x89\x6a\xbe\x1a\xcb\x8d\x58\x15\x7e\x5a\x67\x3f\x31\x2a\xfb\x0c\xbd\x19\xeb\x98\x85\xd2\x64\xfb\xeb\xaf\x9e\x71\xed\xb8\x74\x1f\x63\xec\x06\xbd\x53\x9d\x59\x5d\xb8\x81\x71\x89\xe5\xf1\x9b\xc2\x0d\x74\x8c\x03\xf6\x4f\x91\xf3\xc0\x81\xe3\xe2\x38\x8f\xd9\x3b\x1e\xc3\xc7\x31\x7c\x40\x1c\xee\xe9\xbe\x7f\x2c\x81\x4a\x5f\xe1\x09\xdf\x2b\xc6\x5f\x60\x3d\x1e\xd7\xb1\x7a\x67\xb9\x34\x2d\x52\x35\xd9\xdd\xa5\x44\x64\xf9\x92\x67\xee\xc0\xa7\xba\x7c\x75\xf1\xbb\xdf\xf6\x1f\x56\x3e\xec\xf8\x15\x33\xab\xfc\xb0\xeb\x57\xcc\xb3\xf2\x83\xe7\x57\xb6\x00\xcb\x0f\xc5\x82\xfa\x79\x5c\x5b\x51\x7f\x8e\xeb\x72\xe6\xd9\xd8\x1a\xaa\xd3\xea\x48\x76\xea\x43\xf9\xbd\xfa\xfd\x61\xed\xf3\x87\xea\xe7\x83\xda\xe7\x2f\x56\xbf\xbd\x91\xd6\x5e\x5c\x27\xbb\xd5\x83\xfb\xca\x8a\xd1\x9a\x23\x76\x95\xbd\xf6\x23\x82\xaf\x63\x3d\x13\xaa\x57\xe6\x9e\xd7\xcc\x93\x31\x3c\x43\x17\x3f\xc6\xc8\xa1\x63\x26\x64\x43\xff\x6d\xcf\xa8\xe0\x11\x1f\x3a\xf8\x12\xab\xcf\xef\xc6\x48\x13\x50\xb5\xfb\x4e\x6c\x4c\x7f\x32\xc4\xd5\xd2\x22\xfa\x3a\xaa\x51\x6b\xcd\x14\x4e\x7d\x2f\x53\x8f\x17\x15\x29\xf5\xb1\x4a\x44\xda\x87\xfc\xda\x85\x80\x55\xa7\xd7\x9b\x2a\xe4\xfb\x31\x38\x23\xc1\x06\x0e\x3c\xf8\x1f\x57\xf4\x86\x9a\xeb\x4f\xfc\x07\x91\x2b\x59\x22\x11\xe2\x84\xe3\xe2\xb0\xe3\x83\xbf\x92\x07\x43\x70\x1c\x8c\x75\xce\x5d\x0b\xeb\xb3\x2a\xd6\xef\xac\xbe\x03\x6a\x79\x4d\xdf\x99\x5e\xac\xfb\x4e\x07\x7d\xd5\x33\x9a\x3c\xe8\xf7\x91\xf3\x3d\x18\x53\x7e\x6d\xf5\x18\xd7\xbd\x05\x1f\xd5\x84\x4a\xfa\x60\x72\x99\x9d\x54\xba\xe1\x73\x45\xd2\x2a\x95\x89\xdb\x11\x3c\x93\x3e\x9c\x80\xf6\xc5\x6f\xf7\xf3\x51\x92\xec\x56\xb6\x79\x3c\x13\x74\x6a\xd5\x25\x5a\x8e\xaf\x7e\x62\x98\xa1\x9b\x02\x76\xd6\xee\x78\x9e\x81\x5a\x1e\xbd\x9f\xd5\x2e\x18\x14\xa5\x02\xc9\x1b\x81\xe4\xed\x38\x95\xe3\x88\xb3\x76\xc4\x07\x71\x23\x88\x45\x9f\x89\xb6\xe7\x60\xd0\x4d\x36\x35\xcf\x90\x2c\x8a\x0d\x68\x63\x40\xdb\xba\x44\x38\xa2\x42\x36\x26\xa2\xbd\xa3\x2b\x3f\xc1\xf0\x6e\x8c\x9c\xb3\x38\x15\x21\x73\xaa\x54\xbc\xde\xdc\x03\x25\x77\x2a\x8a\xb4\x77\x57\xe3\x6d\x4c\x82\x0c\xb9\xa6\xe2\xf9\xb8\x4e\x7c\x32\xc9\x49\x16\xd1\x70\x24\xdb\x5e\x43\x77\xdc\x24\x95\x6a\xa9\x85\x19\xa2\x7d\x70\xd2\x84\x89\x76\xc2\xc6\x2c\x94\x0e\x38\x11\x8f\x64\x44\xc7\xc5\xd7\xf6\x24\xfe\xd9\xfe\x05\xc8\x8c\x05\xd7\x91\xfc\x05\x54\x46\x48\x18\x8f\x63\xe1\x80\xf3\x6f\x61\x18\x56\x86\xee\x1f\xa4\xd8\x37\x37\x23\xc8\xfb\xab\x9a\x33\x6c\x0f\x68\x9f\xf5\x2b\x63\x93\xb0\x30\xe6\x7d\x2a\xee\x1c\x0c\x1f\x29\x3a\xa1\xe8\x54\x47\x36\x61\x0c\x71\x1f\x39\xcf\xb5\x23\xbf\x11\xdc\x35\xe4\x28\x4a\x1a\x63\x1a\xb0\xb1\x55\xb5\xd3\xd2\xa3\x51\x19\x90\x6f\xb6\xbe\xfa\x6f\x0f\x32\x37\x7e\xf2\x80\xb3\x59\xcf\xec\x0b\x10\xa7\xf5\x3a\x42\xef\x6b\xaa\xe8\x4b\x6b\x20\xcf\x53\xc4\xdd\xe0\xbd\x1b\xbc\xad\x84\x0c\xfc\x4d\x26\xfb\x46\xd1\xd2\xcc\x2c\xf4\x9a\x97\x66\xcf\xd6\xcb\xf6\x1d\x06\x52\xd5\x38\xc2\xda\x35\xb5\x9a\x33\x03\x36\x1e\xb7\x93\x31\x4d\x46\xed\x78\x99\x37\x4d\x33\x75\x77\xf8\xe6\x74\x0b\x6c\x22\xb7\x4f\xf9\x50\x75\x6c\x85\x60\xbb\xbb\x9c\x96\xf8\x0d\x4a\xd6\xd1\xd1\xd7\x84\x58\xbd\xfb\xbe\x3a\x59\x67\xfd\xda\x29\xd6\x1f\xd5\xef\xcf\x2d\xa1\x1e\x0d\xb9\x62\xcd\x41\x3b\x64\x5c\x31\x43\x51\xa5\xe1\x81\xdb\xbe\xea\xb9\xd3\x1a\x17\xbc\x5d\x21\x9f\x47\xaa\xcd\x0e\xe8\xfb\x9f\x38\x9c\xf6\x6d\xf8\x27\x9b\x95\xa0\xe7\x74\xa3\x23\xf6\xdd\x6a\xc3\x81\xdb\xbb\x6c\x99\xea\xbe\x25\xe7\x73\x7d\x16\x54\x2b\xf0\x3b\x26\x15\xbb\xbe\x33\xa8\xd0\xe5\x2b\x4e\x56\xbb\x1d\xab\x18\xaa\xe2\x05\xe2\x6e\x30\xc8\x9c\x41\xb2\xea\x05\xca\x39\x2e\x98\xc0\x3a\x2f\x90\x76\xa9\x2e\x6a\x27\x93\xed\xea\x4f\x11\xd7\x09\x2b\x4a\xbf\x69\xb5\x93\x5f\xf7\x95\xe8\x98\xa6\x32\xeb\xe3\x4f\x34\xef\xeb\x6f\xfd\x0a\x33\xfc\xb9\x3c\x38\xd7\xec\xae\x1f\xcf\x78\x31\x3a\x2f\x2b\xa3\xc3\xc3\x55\x05\xd2\xe9\x1a\x70\x11\xae\xd1\x10\xc2\x78\xdc\x08\xe3\x71\x9b\xa6\x32\x2e\x85\xef\x6f\xca\x68\xbe\x71\xee\x1b\xf9\x35\x43\xe7\x7d\xd8\xd2\x21\x28\x05\x83\xea\x5d\xd7\xb5\x82\xb1\x3a\x21\x6d\x2c\x9d\x0a\x16\xe7\xff\xf9\xbf\x73\x59\x57\x61\x74\x19\x6e\xd4\xca\x58\xb8\x31\x5a\x2c\x5d\xee\xd8\x49\x9c\x26\x4c\xcf\xb4\x65\x5d\x86\xae\x01\x1f\x33\x7a\xc3\x96\xc1\xc3\x70\xa3\x4a\x18\x87\x1b\xcd\x9a\x28\xdc\x38\xe7\x06\xe1\xc6\x19\x1b\x2c\x93\x1a\x8c\xd3\x15\x6d\x4a\xfe\x37\x32\xcb\xa7\x25\x66\xe1\x7f\x93\x51\x3e\xfd\x36\xa3\x8c\x37\x77\x67\x7f\x33\xa3\xdc\x6c\x1e\xab\xed\x70\x63\x98\xc8\x64\x33\x23\x8c\x36\x23\x1f\x86\x1b\x4d\xcc\x69\xb8\xd9\x6e\x99\x85\xff\x6b\xbd\xb5\x35\x1b\xeb\xa1\x45\xca\x5d\x58\xf7\x6d\xec\xd4\x5d\x1b\xb7\xeb\xd8\xb1\x4f\x25\xab\xe8\x80\x35\x3d\xbd\xb4\xf7\xc3\x62\x5d\xd0\x87\xe1\xfa\x03\x48\x23\xe4\x69\x7b\x39\xc6\xab\x54\xd3\x90\x8e\x99\x52\xc2\xbe\x37\x26\x31\x97\xa3\x1c\x35\x12\x44\x82\x0d\x27\xe2\x99\x81\x68\x8f\x74\x3e\x9f\x55\xb3\x62\x2a\xd8\x4d\x5b\x03\x35\xfa\xed\xc1\x98\xdd\x66\xcb\xb6\x61\xd8\xa7\x37\x45\x91\xd3\x9b\x62\x89\x17\x22\x9e\x39\x6b\xb5\x0d\xca\x87\x63\xd6\x1e\xb3\x81\x54\xbf\x76\x6f\x1b\x61\x2a\x92\x58\xb4\xa7\x71\x64\x10\x6b\xed\xe3\x32\x17\xb1\x25\x29\x86\x54\xd5\x65\x75\x52\x8a\xae\xbb\x52\xb6\x8d\xd9\xd9\xca\x42\x83\x5e\xdd\xc0\x49\xa6\xcc\xfc\xc6\xde\x96\xf3\x9a\xf2\x54\x69\xaf\x55\x2e\x72\x9e\xb3\x40\x58\xef\x73\x6e\x72\xde\x52\x11\x8e\x9c\x2a\x4b\x39\x47\x53\x11\x8d\x9d\x2a\x5f\x39\x6f\x69\x5e\x78\xbf\xa8\x2b\xe5\xcc\xa9\x1a\xf9\xce\xeb\x74\x9c\xc3\x3d\x2c\xf0\xa5\xc3\x34\x91\x4e\xd5\x7f\xee\x9c\xb1\xa9\x64\x93\x80\x09\xa7\xea\x48\x77\x4e\x43\x19\x97\xaf\x0b\x7f\xba\x73\x12\xdf\x64\xf0\x55\x8e\x76\x9e\xb1\xd0\x7c\xb0\xb6\xc8\x04\x5e\xee\x7f\xae\x99\x75\x1d\x2b\x5c\xfd\xa7\x59\x41\x1b\x44\xbf\xe6\x85\x4b\x33\xa0\x67\x37\x3a\x4f\x45\x65\x8a\x5c\x57\x27\x59\x2c\xc0\x99\xd0\x5b\x73\xa4\xcc\x81\x7a\xfc\xe9\x51\x58\xf1\x61\xbd\xee\xe9\x46\x9e\xd4\x39\x7f\x12\xf7\xe9\xb8\xa1\x4c\x9c\x46\x32\x52\xad\xc8\xac\xa7\x7e\x94\x4c\xc7\xf4\xce\x51\x0b\x50\x1c\x5e\xaf\x9a\x34\xba\x68\xbb\x1f\xd1\x71\x3c\x6c\xd8\x3f\xb2\x1e\x2b\xa7\xfb\x72\xa9\xd0\xa4\xd4\x5a\x0f\x50\x9f\xab\xe5\xd2\x12\x8e\xe3\x84\x35\x26\xf9\x12\xa7\x46\x65\x18\xa2\xed\x1b\x7b\x35\xb9\x75\x56\x4f\x2d\x85\xd9\x64\x37\xce\xf1\x7e\x1a\xc3\x34\x04\x1d\x67\x72\x53\x01\xd6\x56\x7c\x43\xd1\x49\x23\x5e\x52\xa2\xe5\x54\x05\x70\x2a\xdb\x7b\x0d\x25\x64\xae\xd2\x44\x46\x83\xbb\xbc\x6d\xb5\x79\x3b\x43\x27\x6a\x4c\x3d\x55\x58\x3f\x75\xf2\xe1\x5e\x43\xe7\x20\x8e\xe5\xea\x1e\x98\x8c\xdb\x3b\x8d\xfa\x0a\x9b\xa4\x61\xc8\x92\x44\x2d\xeb\x1b\x3a\xe6\x29\xe5\xa1\x31\x46\xab\xeb\x75\x05\xe5\x54\x44\x93\xc2\xb8\x1d\x86\x68\x52\x41\x71\xc6\x64\xe3\x19\x95\xec\xc1\x79\x34\x61\xd6\xa2\xbd\xbe\xc3\x69\x78\xdd\x17\xf1\xd4\xe6\xb2\x9c\xe3\xfd\x1c\xdc\x70\x5d\x38\x8e\xa6\x0e\x38\x82\x85\x12\x79\xfa\x1a\x07\x0f\x17\x2c\x39\x8d\x93\x48\x5f\xf1\x08\xce\x20\xba\xdd\xc0\x5d\xba\xa2\xdc\xbe\xfb\x05\x3d\x25\x29\x96\x77\x78\xf3\xca\x7e\xb5\x59\x87\x7b\xba\x59\xab\x78\xb5\x59\x2f\x38\x5d\x9e\xe4\x22\x9e\x25\xcb\xf3\xfb\xfb\x66\x3c\x1f\x42\x3b\x0a\xe4\xa9\xd4\x27\x6f\x9e\xa1\x0b\xcb\x5a\x76\x40\x6f\x33\x38\xac\x1f\x49\x47\x0d\x9d\x82\xfc\x5b\xbe\x02\x61\xb1\xc5\xb1\xc2\xa2\xfa\xb1\x8c\xcf\x0c\xde\xbb\xc1\xbb\x9e\xf0\xff\x26\xd6\x19\x8a\xb6\x15\x69\x57\x66\x3b\xc3\xaa\xe3\x03\x33\x77\x07\xd4\xbc\x59\x5f\xac\xb6\xea\x90\x02\x51\x86\x14\x08\xdb\x79\xf8\x54\xc2\x96\x32\x3a\x9b\x4d\xd1\x73\x88\xe3\xab\xe7\xf9\x5c\xf4\xca\x77\xff\xe1\xf8\xce\x96\xfe\x43\x74\x77\x99\xd8\x2b\x37\xf8\x52\xa9\xf0\x7c\x73\x85\x20\xc8\x4b\x86\xee\x68\xcd\x2f\x6b\xa2\x19\xf2\xba\xbc\xa5\xda\xab\x61\x0d\x56\x24\xe5\x0c\x7d\xbc\x81\x5b\x9d\x1a\x58\x37\xd5\x76\x06\x87\x99\xe7\xb4\x9a\x45\xa4\xf1\x7f\xce\xb4\x2a\xd3\x99\xb8\x41\x6c\xf6\xb4\xdc\xe0\x91\xde\xd5\xa2\x2e\xdd\xd5\xee\x06\xea\x86\x3b\x15\x15\xef\xff\xb3\xcb\xd4\x60\xbb\xfa\x5d\xe7\xc2\x77\x36\x59\x4a\x7a\x39\x33\xa2\x56\xac\xb4\x87\x7e\xb1\x84\xc9\xdf\x59\x3d\xd8\x7f\x41\x44\x5b\xdb\x07\x9b\xad\xad\x37\xab\x4c\x96\x42\xb9\xcc\xc2\x8a\x6b\xaa\xe5\x94\xf1\x7e\xc4\x87\x4b\xda\x1a\xbb\x9d\xea\xed\x59\xfb\xb8\x61\x55\x32\x0e\xfb\xcb\x5c\x52\x8e\xc3\x68\xbb\xfe\x4e\x75\xea\x09\x9d\x30\xbf\x61\x56\x3f\xbd\xbf\x10\x4c\x96\x3a\xe4\x57\x38\x8e\xfa\x7d\xc1\x92\xa4\x82\x86\xbe\x58\x32\x5c\x3f\x87\x15\x3f\xd7\xc4\xf8\xb9\xde\x9b\xdd\xdc\x1f\x5d\xdb\x67\x98\x35\x63\xb6\x6d\x9e\xf4\x32\x3d\x4e\x13\xc9\x44\xe3\x4c\xa7\x7f\x35\x35\x59\xfb\xf8\x3a\x15\x87\xbd\x53\xe2\x38\xd9\x1d\xfd\xa4\x22\xef\xca\x56\x0d\x62\x31\xc9\xec\xfe\x8a\xae\x5a\xb6\x31\x8c\xc7\xed\x64\x52\x71\x52\x9a\xfe\x72\x96\xba\x28\x03\xed\x78\x75\xc6\x56\x3d\x80\xaa\xb0\xff\xb9\x8a\x4d\xbb\x7f\xa7\xea\xdc\xeb\x8a\x38\x31\x01\x98\x01\xed\x0f\x99\x03\x5b\x5e\xa5\xc7\xd6\x47\x3e\x98\x02\xb9\xfa\xe4\xac\x0c\x83\xc8\x60\xf2\xed\xb9\xd5\xc1\x0f\x19\x50\xe6\xe9\xd0\x3e\xc7\x05\x52\x54\xfe\x18\x67\x7b\xbb\x79\xdc\x2c\x53\x06\xea\x78\x00\x27\x69\x16\x2b\x57\x34\xfc\x77\x8c\x37\xc1\x68\x7f\xc9\x74\x4b\x98\x54\xeb\xe9\xf2\x44\xea\x47\x09\x0d\xc6\x7a\x26\x21\x69\x0b\x81\x2a\x3b\xb1\x35\xec\xc4\xfe\x95\xec\xf4\x8e\x31\x51\x0e\x6a\xd4\x5f\x3d\xa8\xba\x8b\x9e\x85\xc0\x73\x7e\xaa\xee\x9f\x2d\x79\x1a\xb2\xe9\x93\xdc\x94\xd3\xc7\x9e\x36\xff\x75\xb2\x3f\x4e\x65\xf4\xdb\xf3\xe0\x2a\x51\x62\xe5\x73\xd1\xcd\x4a\x0e\xb8\xf4\x3d\xce\x02\xb9\xbf\xc1\xba\x29\xff\x89\x89\x44\xeb\x87\xdc\xc4\x1d\xa9\x15\xf9\x5f\xd4\x80\x27\x82\xf2\x70\xf4\x9b\x0d\x10\x2e\x7d\xbb\x6a\xa1\xf8\x4f\x56\x9d\x46\xe3\xbe\xb2\x16\x7e\xbf\xf6\x93\x7f\x71\xed\x1f\x13\x26\x7e\xbf\xf6\xd3\x7f\x5d\xed\x2f\xe2\x6c\x4c\x7f\xbf\xf6\x47\xff\xba\xda\x3f\xb0\x9b\xe8\x6f\x55\x1e\x3c\xfb\xd7\x55\xfe\x77\x1b\x1e\x7c\xb5\xdd\x1f\xda\x1a\xa7\xe7\x6e\x30\x05\x7b\x86\x57\x16\xc8\x98\x0f\xa2\x61\x8e\xfe\xb6\x20\x66\xda\xde\x2b\x37\x80\x69\x78\xad\x28\xe7\x7d\x07\x9c\x7f\x1b\x3c\x1c\x3c\x1c\x3c\x2e\x3e\x0e\x62\x2e\xdb\x03\x3a\x89\xc6\x4a\x79\x9c\xc4\x3c\x4e\xa6\x34\x64\x65\x03\xef\xca\xda\xb8\x45\xdc\xe5\xf2\x51\x89\xd7\xb6\xeb\xd2\x1c\x14\x22\xdc\x0d\x3f\xe6\x31\x36\xb4\x88\xb1\x29\xa2\x5c\x66\xe8\xcf\x31\xfc\x0c\xc1\xd8\x48\x1f\xea\x57\x82\xad\xdc\x2a\xfb\xd3\x28\xcf\xaf\x8c\xea\xfc\x54\x07\x84\x05\xaf\x96\x03\x79\x76\x7d\x93\xc7\x47\xd8\x69\x65\xa6\xdb\xc0\x40\x02\x85\xad\xad\xb4\xb8\x81\xbb\xba\xcc\xbd\x58\x0a\xb4\x39\xa7\xe6\x34\xf9\x02\x71\x37\xfc\x56\xbf\xb8\x7b\x86\x82\x1b\xd5\x80\x5d\xa0\x2e\xad\xdf\x0e\x28\x8d\x4d\x14\x7e\xc5\x2e\xfd\x6e\x4e\xad\x9e\x83\xbe\xb9\xea\xd6\x44\x58\xbf\x82\x35\x8a\x7e\xa0\x46\x30\x1f\x84\x57\x7d\xeb\xfd\x8e\x83\xe1\xdb\x4d\x7e\x4e\xae\x1d\x50\xd1\x2e\x82\xfe\x4a\xd6\x7b\x9e\x9d\xa2\x33\x9c\xf1\x69\x6c\x4e\xc1\xdc\x51\x78\xc9\x34\x27\xf6\xe1\x3e\xfc\xac\x4f\xdf\x98\xe5\x10\xee\x90\xdc\x86\x9b\x6d\x9d\x22\x09\x6e\x11\xdb\x86\x64\x1b\xcc\xbd\xa5\x97\x18\xd7\x1c\xe9\x1a\xe3\x37\x06\xb7\x48\x6c\xc3\xb1\xd4\x99\x5d\xb8\x1b\x46\xea\xcf\x17\x5c\x8f\x94\xac\x40\xdf\x4a\xa4\x3a\x0a\xde\x8e\xc1\x71\x2a\xf0\x0f\xd7\x0f\xc1\xf8\xc6\xce\x6b\x51\x5d\xfb\x8e\x0b\xae\xe7\xf4\x26\x50\xcb\xb5\xfe\xa7\x2d\xe3\xe1\x70\xcc\xd4\xaa\xdf\x9e\xf4\xf3\x97\x63\xed\x7e\x2c\xa2\x19\x26\x41\x7b\xbf\x31\x95\xed\xdd\xc6\x34\x68\xef\xd6\x63\x26\x82\x58\xca\x78\xe2\x80\xd3\x99\xde\x36\x92\x78\x1c\xf5\x1b\x62\x18\x50\xe4\x41\xc3\xfc\xe7\x76\x76\xf6\x71\x39\x4c\xd7\x96\x30\xa8\xf9\xcb\x6c\xdb\x3f\x23\x25\x10\x94\xf7\xf3\xbd\xfb\x8a\x6a\x3d\x66\x42\x4e\x28\xa7\xc3\x72\x00\xa3\x7a\x69\x4e\x6f\x4a\x35\xe1\x74\x1b\x71\x0c\xdf\xb7\xf1\x2a\x65\xaf\xcc\x6b\xb6\xe3\x57\x47\x31\x53\x71\x6a\x7d\xbd\x24\xf6\x22\x3e\x8e\xb8\xe5\x6a\x5c\x6e\xd1\x9a\x6d\xb2\x5a\x54\x02\x67\xb3\x8a\x56\xcd\x66\x0d\x3b\xf2\x41\xe9\x37\x46\xcd\xa9\x68\x3a\x2f\x6b\x7e\xa4\xba\xb9\xf5\xbe\xf6\xbd\xbe\x89\xfa\xa3\xf6\xbd\xbe\x7f\xf5\xd6\xe6\xa6\xaf\x1c\x45\xa2\xea\x5a\x78\x12\x56\x12\xea\x54\x26\x6b\xf6\x74\xbe\x0d\x3a\xf6\x2d\x7c\x8e\x5b\xce\x38\x0a\x1e\x04\x71\x2c\x13\x29\xe8\xb4\xbd\xe7\x7a\xae\xd7\xa6\xe3\xe9\x88\xba\x07\xed\x7e\x94\xc8\x07\x61\x92\x94\x00\xee\x24\xe2\x6e\xa8\x14\xee\x97\xa1\x1a\xcc\xf3\x6d\xe0\x06\x87\x96\xcc\x74\xc6\x92\x78\xc2\xda\x7b\xee\x43\xd7\xd3\x25\xed\xd7\x65\xe1\x1f\xb5\xc2\x6c\x3c\x69\xf7\xa9\x64\xd3\x28\xbc\x66\x42\x17\xac\xbe\x32\xc5\xde\x87\x75\x1d\xd8\xa8\xbb\xdf\xd5\x5a\xf4\x18\x84\x1b\xde\xa8\x3f\x63\xdc\x2d\xb2\xbc\xde\x8b\xe2\x49\x2e\x65\x7e\xa5\x96\xf4\x2d\x5e\x06\xab\x5e\x86\xd6\xcb\xa5\xbe\xfd\x16\x22\xe1\x86\x1f\xeb\x2b\x66\x36\xa5\xd4\x54\x2d\x64\xe3\xeb\x50\xbb\x0b\x4a\x11\xb5\xe3\xeb\xb4\xfa\x0d\x51\xb0\xb5\xf9\x2d\x17\x95\xcf\xbc\xf8\x1c\x0d\xd0\x8e\x76\x1a\x29\x92\xf2\xb2\xb5\x32\x65\x3a\xa5\x6d\x93\x51\x5c\x49\xfa\x25\x04\x41\x81\x40\x1f\x2d\x5d\x8f\xb5\x32\xfd\x16\x75\xe4\xa1\x95\xb9\xa4\x78\x19\xe8\xab\x3b\x8a\x65\xd7\xa4\x4c\x7f\x1e\xc2\xbb\x10\x5e\x84\xf0\x29\x84\xaf\x21\xfc\x19\x02\x8f\x41\xc4\x20\x63\xf2\x99\x23\xe7\x9c\x26\xd7\x0e\x06\x16\xaf\x4b\x8e\x24\x63\x54\xe4\x47\xca\xd2\x28\xd5\xef\x02\xd2\x6c\x7d\xa2\x97\xac\xf0\x14\x2a\x37\x94\xe9\x6b\x3a\xd0\x17\xa1\x6f\x1b\x2b\x4e\x0a\x09\x74\x4f\xad\x3b\x6d\x98\x75\x97\x15\x25\x0c\x3e\x8a\x55\x17\x07\xc5\xd3\x44\x52\xc9\x1c\x90\x18\xfe\xe3\xa3\x70\x39\xbd\x89\x86\x54\xc6\xc2\x4d\x13\x26\x8e\x86\x8c\xcb\xf2\x12\x9b\x73\x11\xf5\xb5\x33\xaa\xd9\x5c\x89\x6d\x44\x93\x51\x1e\x2e\x24\xf1\xea\xe3\x4e\x5d\xe1\x86\x52\x8c\xff\x64\x77\xf3\xb9\x70\x27\x4c\xd2\xec\x31\x19\x45\x03\xa9\x9f\x3b\x87\x6a\x7d\x4e\xa5\x8c\xf9\x7c\xce\x5d\x49\xc5\x90\x49\x7d\x44\x38\x9e\xf1\x71\x4c\xfb\xf3\x39\x12\xee\x54\xe8\x6b\x76\x9f\x19\x5e\x40\x58\x2b\x27\x23\xc1\x06\x20\x88\xea\x1a\xe0\xe4\x8c\x21\xa9\x8f\x98\xa0\x14\xf1\x66\x53\xb8\xc1\xcc\xb0\x8b\xbe\xbc\x32\x08\xcc\x8f\x40\xff\x48\x5c\x6a\x7e\x26\x2e\xed\x15\xee\x6c\x3f\xf3\x9f\xcb\x85\x39\x33\x01\xe1\x91\xbf\x32\x2e\x4d\x5f\x9a\xc9\x41\x57\x2c\x15\xdc\x43\xdf\x5c\x8a\xb9\xef\xeb\xab\x34\x43\x4f\xfd\xeb\x29\x14\x69\x9c\x25\x31\xa7\xfa\x61\x77\x01\x61\x4c\xc6\x1c\xe2\x98\x50\x0e\x91\x7e\xe9\x2d\x60\xa0\x1f\xda\x3b\x0b\x08\x62\x32\x88\x21\x89\x49\x10\xc3\x78\x1d\x4b\xdd\xd3\x67\x7e\x12\x03\x3d\xd6\x11\xd9\x2f\xfd\x28\x06\xfa\x4a\xfd\x0d\x63\x5f\x00\xfd\xee\xbf\xd1\xa9\xbd\xe8\x63\x3f\xcb\xa8\x45\xbf\xf9\x8e\x03\xa1\xf4\x4f\x80\xee\xe8\x23\xc1\xc7\x3e\x87\xf0\x85\xc2\x12\x5c\xfb\x27\x10\x8c\x15\xaa\xe0\xa9\x56\x55\xf4\xc7\xe0\xce\xbf\xcf\x8a\xe9\x9f\x1a\xea\xa7\xfa\xf3\x54\xe7\xe8\x7a\xa9\x0b\xe8\xb0\x7d\x08\xce\xfd\xec\xea\xf9\xac\x3d\xdb\x31\xb9\xa7\x67\xfe\x33\xa6\xa6\x1e\xd0\x8f\xf9\xd3\x8d\x7f\x4f\x23\x4d\xf4\x95\xfe\x1b\xeb\xbf\x3f\xf4\xdf\x3b\xfd\xf7\xa7\xce\xb7\x97\x14\x45\x3f\xe7\x4f\x41\xa8\x2b\x3c\xcd\x7f\x9b\x9a\x27\x31\xb9\x0f\x3e\xf8\x51\xbc\x80\x51\xfc\x1b\xc9\x39\x3d\x68\x67\x49\x36\x87\x31\x61\x1c\xa6\x8a\xd0\xa1\x6a\xd0\x77\x8d\xfe\x56\xfd\xfd\xa0\xfe\x9c\xa9\x3f\xe7\xea\xcf\x4b\x4d\xd2\x4c\xb7\xed\x60\x01\x77\xfa\x61\x67\x01\xb7\xf9\x90\x1e\xc7\xeb\xf3\xe6\x1f\xd8\x79\xf3\xaf\x73\x6e\x38\xca\x91\x9d\xc5\x1b\xee\xc5\x43\x1c\x49\x2d\x31\xae\x62\xb2\x32\xed\xdc\xf2\x6d\x98\x1c\xdf\x4b\x71\x77\xff\x51\x14\x97\x7f\x12\x51\xde\xb9\x63\xdf\x09\x2a\x98\x9a\x5d\x48\x9f\xa0\xc1\x78\x01\x4f\x63\xf2\x9a\xc3\xab\x98\xfc\xe4\x70\x1a\x93\x57\xb1\xe2\xa0\xef\x31\x39\x17\xf0\x61\x3d\x91\xf7\xf4\x95\x2f\x21\x18\xa9\x36\xde\x9a\x46\x7e\x59\x3b\x0e\xc2\x0d\x7a\x3a\x01\x5d\x68\x92\xe2\xe9\x44\x35\xe7\xeb\xc1\x4d\xee\x3a\x5e\xe4\xad\xfb\x12\xc3\x89\x19\xbc\xd7\x9c\xc4\x2b\xd3\xbe\x42\x08\x31\x44\x25\x75\xaf\x7d\xa9\x78\x28\x06\xea\xf9\x29\xd0\x03\x5f\x64\xc4\xfe\xf4\x99\x62\x26\x0a\xc1\x7b\x3f\x82\xe0\x93\xaf\x33\x4f\x9f\xaf\xbf\x25\xf0\x3e\xe8\xab\x96\xbe\x55\x18\xde\x29\xd5\x06\xc3\x49\xbc\x3a\x01\xee\x43\x08\x74\x02\xdc\x8f\x31\x19\x30\x14\x61\x88\x36\x25\xef\xfb\x18\xc3\x0c\xc5\x26\xfb\x97\x49\x8b\xf8\x26\x26\x31\x87\x67\xf1\xe6\xfb\x1f\x62\x4e\x66\xe8\x4d\xbc\xe1\x78\xb1\x93\xf2\xa9\x88\x43\x96\x24\xac\xef\xe4\xab\x6b\xc0\x50\xe6\x73\xcc\x9d\xe4\xd6\x97\xec\xf4\x95\x93\xa4\xd3\xa9\x58\x2a\xb7\xb3\xa4\xc4\x3e\x8b\x91\xf3\x91\x5f\xf3\x78\xc6\x1b\xf2\x6e\xca\xfc\x86\xd3\xe2\x78\xa1\x66\x8b\xee\xcc\x3b\x14\x41\x99\x04\xe4\xc9\x9d\x03\x27\x31\x52\xdf\xf4\x87\x3c\x7d\xc8\xd2\x7b\xb3\x4a\xc5\x1c\x02\x86\xce\x85\xce\x05\xf2\x39\x36\xc8\xcc\x89\xf7\x59\xac\x3e\xad\xe8\x9e\x60\xa2\x87\x06\xc3\xcf\xf8\x37\xef\xb2\x78\xbd\x81\xb9\x6b\xa9\xc6\x75\x9a\xf1\xb8\x7a\xc5\x1f\x2f\x52\xeb\xae\x4f\x3e\x46\xb3\x3b\xd7\xb2\x63\x1c\xd8\xce\xfd\x96\x1d\xca\xc9\x2e\x11\xd5\x31\xc9\x5a\x19\x00\x4e\x52\xa5\x1e\x62\xab\xac\x36\x31\xf2\xdc\xf9\x96\xa7\xf4\x67\x0c\x72\x3e\x67\x26\x4c\xba\xf2\x4d\xe7\x57\xca\xbf\x29\xa5\x47\x4d\x76\x78\x19\x93\x6f\x31\xbc\xff\xdd\x1e\xfa\x11\x6f\x4a\xc7\x6e\x12\x37\x0d\xd5\x74\x92\x26\x65\x93\x9e\x1b\x6f\xf3\xa5\xe0\x49\xbc\x3e\x69\xe4\xfb\x18\xde\xc6\x70\x8b\x7e\xc4\x56\xfe\x29\xad\x9c\x69\xb1\xf7\x3c\x26\xe8\x79\xa8\x45\xa6\xb7\x32\x7b\x9e\x49\x61\xa7\xcc\x5c\xd5\xf4\x5e\x9e\x09\xcf\x53\xb2\xa5\x96\x6d\xea\xb9\x41\xf9\x2e\x26\x3f\x05\xbc\x88\x7f\x91\xc6\xdb\x0c\xa9\xce\x6f\xa5\xbe\xa6\x4a\x11\xd5\x29\xf3\x28\xb4\x3b\x45\x82\x5b\x5d\xdf\x11\x4a\x75\xca\x86\x43\x2f\x4b\x63\xc5\x20\xf4\x53\x9d\xc6\x2a\xd5\x69\xac\x84\xea\x13\x09\xd4\x4f\x5d\xba\xc0\xdd\x94\x20\x46\x34\xa2\x1d\xdc\x43\x59\xaa\xef\x56\x07\x24\x91\xad\x0e\x74\xb0\x9f\xbd\xa3\x26\xf7\x77\xab\x83\x21\xd5\xa3\xf6\x53\xac\x5a\x04\x56\xf4\x4a\x82\x5e\xc4\x46\x9b\xb5\xb3\x7a\xa9\x59\xf9\x49\xb7\xfe\xeb\x7f\x37\x0b\x07\xda\x05\x13\x56\x98\xd0\x04\xdb\xa7\x60\x52\x64\x2b\x10\x4d\x1e\xd5\xe9\x66\x98\x6e\xe0\x9f\xf1\xea\x3c\xbe\x5f\x63\xb8\xd1\xb7\x4e\x00\x8f\xc8\x13\x10\x11\x09\x53\x90\xd1\x2a\x60\x4f\x1f\xdf\x34\x41\x4a\xcd\xe6\xd6\x83\x8b\xbf\x92\xdb\x20\xbe\x7c\x60\x0e\x0e\x71\x7d\x55\x1f\x69\x71\x4c\x08\xd7\x79\xa1\x4c\x76\x5f\x16\x91\x55\x09\x12\x1f\x1f\x9a\x24\x85\xab\xd2\x1b\x52\xd9\x98\xc4\x89\x6c\x3c\xde\x98\xdd\x30\xdb\x8d\x96\x11\x72\x3c\x57\x49\xc6\x75\xc9\x15\x07\xe3\x98\xca\x5a\x6a\x45\x1e\xa1\x0e\xdb\xfd\x43\xe8\x93\xfd\x76\x96\x44\x48\x23\x92\x4a\xa0\xd1\x2f\x52\x9f\x37\xd2\x08\xf1\xd6\x81\xf7\x87\xf8\xe3\xc0\xfb\xa3\xc3\x76\xd5\x33\x92\x6d\x8a\xf5\x0f\x85\x9c\xb5\xf4\x25\x66\x61\x65\x81\xa2\xe5\x4d\x50\x21\xe1\x10\x13\xb1\x9e\x2f\xc2\xbf\x27\xda\x4c\x42\x39\xb5\x78\x0b\x37\x5c\x96\x6b\xe9\x2a\xb9\x96\x1a\xb9\x66\xf2\x3d\x6a\x37\x5f\x5d\xba\xa5\x30\x43\x14\x8c\xfc\x33\xf7\x5c\x43\x1c\xad\xbf\xd5\x26\x8c\x34\x3f\x19\x4d\x22\x8a\x48\x1c\xc1\x60\x33\xf8\xf3\x02\x3c\x88\x96\x32\x16\x67\x02\x79\x7d\x1f\xb1\xac\x8f\xd6\xf6\x0f\x5f\x39\x5b\xa4\x16\xff\x7a\xe2\x62\x48\x22\x12\x44\x30\x8e\x7e\xf3\x26\xa7\x7e\x44\x3e\x0b\xb8\x51\x73\x04\xb6\x23\xf2\x1e\x26\x11\xb9\x86\xcf\x62\x99\xcf\x8b\xe5\xc4\x78\x78\x8d\x33\xd9\xe4\x85\x2f\xae\x31\x48\x52\x44\xad\xcb\xdb\x44\xd9\xb4\xdd\xce\xfe\xee\x01\x3b\xf8\x03\xb1\x76\xe7\xf1\x43\x4f\xd9\x65\xd9\x49\x7a\x94\x1e\xee\xce\xe7\x5b\xe3\x14\x31\xdc\xa3\xed\x8e\x4f\x71\x0b\xf5\xd5\xaf\x76\x3f\x45\x1a\xb8\x0c\x39\x51\xdc\xde\x12\x2d\x89\x17\x99\x02\x93\xd6\x33\x29\xec\x76\x0e\x69\x4f\xd3\xe1\x8b\x5c\x7f\xb1\x2e\x08\x78\x7c\x48\xe7\xf3\x9d\xc7\x84\x10\xda\x6c\x66\x95\xe6\xd0\x3b\x07\x0f\x1f\xed\xb1\xfd\xba\x6f\xb5\x82\x71\xdf\x7b\xfc\xf0\xa0\x80\x29\xf3\x31\x78\x16\xcc\xc3\x87\x0f\x0f\xd8\x41\xdd\x79\x5e\x41\xd3\xf1\x76\x0f\x1e\x15\x30\x07\x2b\xd1\x74\x76\xbd\xbd\x83\x92\x9e\x87\xab\x11\xed\x1f\xec\x5a\x44\x3f\x5a\x0d\xf4\x68\xb7\x73\xf0\xa8\x00\x7a\xbc\xb2\xba\x1d\xef\xf1\xe3\xfd\x9d\x02\xa8\x4c\x05\x51\x41\xb5\xb3\xbb\xff\xe8\xa1\x05\xd5\x59\x8d\xeb\x60\xe7\x60\xbf\xec\xa6\xce\xce\x6a\x5c\x8f\x1e\xed\x9b\xce\xac\x29\x8b\xb6\xc0\xd3\x21\xaf\x5a\xe0\x5d\x49\x94\x9a\x44\x79\x4a\x6d\x44\x83\xc8\xfa\x23\x22\xd4\x47\xdf\xf3\xf4\x81\x71\x8a\xf6\x30\x04\x29\x72\xda\x0e\xb6\x5e\xee\xd8\x2f\xf5\x6f\x8c\x61\xb4\x61\xaa\xec\x54\x2e\x3d\x8b\x7e\xff\x82\xaf\x5c\xa7\x10\xd9\xcd\xae\x3a\xdd\x9b\x9a\x33\xc8\x03\xe9\xd2\x5c\x57\xdb\xb2\xae\x22\x60\x44\x5f\x4f\x84\x18\xa1\xd8\x2d\xae\x58\x60\xd9\x1d\x1f\x33\x34\x8a\x40\xe8\xfb\x5d\x41\x9a\xab\x3d\xa6\x11\xb9\x49\x61\xb6\xc1\x64\xe0\x7f\x20\x25\xd4\x5b\xda\x56\xb8\x5b\xb1\x76\xd5\x72\x3c\x56\xba\xd4\xfe\x63\x52\x32\x52\x93\x43\x46\x75\xe1\xb9\xb3\xdc\xad\xfe\xaa\xbe\xf6\xad\xbe\x86\x4a\x3d\x06\x67\x2a\x0d\x9c\xab\x10\xb2\x08\xab\x77\x9e\xd9\x6e\xcb\xa1\x93\x68\x4d\xce\x72\x55\xee\x9b\x21\xa4\xce\x0c\xb3\xe8\x57\xe5\x3b\xa6\x7c\xcb\x94\x5f\x09\xd3\xce\x60\x14\xbf\x5c\xfe\xa2\x6d\xe5\x77\x11\xa1\x01\xa2\x11\x70\xf0\xf2\xff\x63\xa5\xa1\x9a\xf3\xbf\x9f\x05\x86\xdb\x68\x83\x95\xac\x94\xe4\x8a\x7e\x7c\x1c\xfd\xad\x2b\xbe\x8a\x1b\xe6\xac\x1b\xb4\x74\x8c\x8f\x39\xb1\xac\x59\x29\x94\xfa\xb2\x04\xfb\xa6\xac\x7c\x06\xd6\x41\x8f\xa3\x3a\xa8\x76\x4f\xac\x98\x33\x84\x23\x45\x7d\x07\x42\xff\x04\xfa\x7e\x07\x02\xdf\x53\x8d\x50\x16\x82\x95\x67\x45\x29\x34\xdb\x12\x69\xcc\x6a\x89\x3f\xc1\x3a\x73\x88\x5a\xc1\x16\x18\x8e\x56\x21\x9e\xa1\xeb\x48\x2f\xab\x4b\x38\xbe\x33\xd8\x4e\x41\x2b\x3e\x06\x09\xd5\x57\xd7\x2f\x9b\xbb\x66\x11\xd2\x09\x53\x6c\x24\xca\x2a\x2d\xf3\x25\xcb\xcc\x2a\xf5\x03\x66\x50\x69\xe3\xf4\x2c\x5a\x63\x02\x14\x52\xa2\xdd\xa9\x5f\x33\x05\xcc\xdc\x18\x73\x15\xad\xbf\xf2\x44\x67\x3a\x4c\xdd\xed\xf9\x5c\x19\x0d\xc5\x1b\xa6\xde\x30\x97\x66\x19\x12\xb3\x64\x92\x26\x95\xa4\x9d\x5a\xb0\x44\x96\xdd\x8f\xc2\x74\x22\x49\xb6\x94\x81\x90\x69\x15\xba\xc8\x40\x98\xa7\x78\xa4\x6e\x1f\xa8\x95\x7b\x50\x61\x0b\x21\xcd\x52\x1f\xc6\x44\xd9\x2a\x11\x49\x75\x8e\xca\x54\xdf\xab\x99\xba\xac\x5b\xa7\xb2\x46\x52\x0c\x51\x91\x62\x51\x53\x47\x71\x4e\x60\x58\x23\x4b\x7d\xcf\x93\x47\x66\x14\x16\x2d\x28\x88\x52\xf8\xa8\x26\x0a\xc3\xd3\x88\x3c\x85\x57\xd1\xea\x7b\x4d\xea\x37\x82\x66\x95\x28\x80\x41\x0c\x83\x18\x17\x17\x2e\xd2\x42\x14\x4b\x9d\xb8\x51\xea\xc4\x8d\xd2\x65\xdd\x22\x47\xd3\x53\xd5\x94\x14\xd7\xa7\x54\x82\xae\xa2\xcc\x77\x74\x87\x5e\x99\x11\x08\x31\xc4\xf5\xb8\x2c\x5d\x77\x96\x10\x53\x7d\xad\x2d\x6e\x16\x9a\xb0\x44\x14\x63\xad\xbd\x9d\xae\x6a\x9f\x24\x05\x98\xb9\xa0\x45\x0f\x41\x7e\x26\xb5\xbc\xf1\xb1\x9a\xf6\x32\xcf\x3c\xa9\x1a\x28\x5d\x7d\x2b\xcc\xf7\x68\x8d\xa3\x68\x27\xf3\x43\x5d\xaf\xb6\xab\x22\x06\xa3\x14\xbe\x47\xe6\xf6\x8b\x0f\x6b\xb0\x3c\x86\x50\x23\xf9\xb2\xae\x96\x7d\x18\x6a\x80\xcf\x62\x55\x35\x5f\xb4\xb8\xfe\x10\xa1\x63\x89\xcd\x49\xd4\x5b\xa5\x35\x6b\xa9\x79\x1e\x29\xdb\x7e\x7f\x01\x27\x95\xfe\xa9\xf8\x17\x1a\x4b\x93\x9e\x2b\x51\xa7\x0d\xdc\x7a\xea\xdb\x80\x21\x96\xa5\x70\x80\x0c\xca\x54\x2f\xe1\x43\x84\x98\xde\x84\xe3\x56\xc4\xb8\x96\x14\x9f\x4d\x7a\x60\x2d\x1e\x74\x4a\x19\x38\x8f\x34\x63\x6e\x70\x7a\x18\xaf\xdd\x1d\x3a\x89\x72\xd7\xdd\x79\xa4\xef\xf1\xc9\xae\xb9\xfd\x18\x91\xc2\x91\x95\x26\x0e\x9c\x0b\xe3\xd8\x62\xbc\x9f\x1c\x49\x07\xde\x9b\x9f\xe9\x54\x49\xa7\xbe\xf5\x26\x91\x54\x48\x1b\x64\x10\xf1\x21\x13\x53\x11\x71\xa9\xbd\x5e\xfa\x65\x9e\x14\x37\xd1\x6e\xb3\xcf\xb9\xdb\x8c\x72\x1e\x4b\xed\xdc\x4d\x1c\xb8\xd6\xee\xb4\x5b\xf4\x14\x9c\x21\xe3\x4c\x50\x19\x8b\x8f\x1f\xde\x38\xf0\x59\xe8\x2f\xc7\xd2\x14\xd2\xe9\x02\x0a\xf8\x80\xa1\xd7\x45\x42\x3d\x8c\xe1\x4d\xd6\x10\x9d\x30\xc4\x54\xf7\x31\xc2\x55\x2a\x1c\xf8\x1c\xaf\xc1\xf5\x41\xdf\x31\x03\xcf\x56\xae\x8c\x84\xa3\xaa\xec\x97\xbe\xb9\xcc\x54\xd9\x3e\xfa\x9a\xcc\x0d\x46\xd9\x29\x83\x67\x91\xce\xb5\x61\xec\xb2\x9f\xeb\x6f\xe0\x2b\xf3\x44\x23\x63\x4b\x63\x78\xbd\xd2\x79\xc0\x78\x18\xf7\xd9\xc7\x0f\xaf\x9e\xc6\x93\x69\xcc\x99\x49\xb9\x0e\xdf\x14\xea\x53\x0c\x2f\x37\xac\xef\x7a\x8b\xe4\xd4\x1c\x4d\xd7\x6e\xc2\xf7\x51\x16\x20\x4a\xfe\xc3\x81\x1d\x9d\xad\x6f\xeb\x3f\x1c\xd8\xd5\x4f\xc4\x01\xcf\xbc\x22\x8e\x3e\x98\x03\x3f\x22\xf2\x0e\xde\x46\x2b\x59\xce\xf6\x49\xa5\x44\x22\x81\x51\x7e\xb5\xd4\x36\xbe\xa7\xf6\xd5\x52\x5a\x3f\xe5\xf3\x39\x85\x54\x2d\xbc\x66\x05\x49\x5d\x0a\xa1\x16\xfc\x4a\xd8\x6b\xdb\x2e\x2c\xe7\x8c\x36\x37\x75\x91\x50\x09\x51\x3d\x7d\xd4\x2f\x41\x42\x37\x73\x41\x19\xbd\xf4\x49\x54\xbd\x59\x47\x6e\x72\x41\xdd\xa2\xb7\x91\xb9\x93\x42\x66\x06\xec\xf3\x68\xbd\x1b\xf0\x89\x92\x82\x2b\x45\x54\x12\xc1\x59\x0a\x26\x54\x40\xbb\xef\xb4\xc4\xe8\x74\x16\xf0\xa2\x66\x88\xb3\x95\xe4\x18\x05\x41\xfb\xf1\x64\xe6\x10\xa3\xb8\x4c\x65\x2f\x56\x5d\xf0\xc0\x30\xf6\xf5\xa7\x9d\xdc\xa3\x98\x7b\xf6\x3a\xb5\x04\xf5\xdc\x0d\x5a\x1d\xad\xd1\xb9\xc1\x71\xab\x93\x67\xaa\xf7\xab\xa5\xb8\x4b\x3f\xb4\xea\x45\x45\x5e\xac\xbc\x27\x02\xc3\x27\xdd\xba\x87\x0b\xf8\xba\x41\x39\x2c\xd6\x45\x51\xb9\xef\x2c\xf7\xb1\x84\xda\xc7\xd2\xd7\x5b\xc9\xf5\xc5\x4f\xe7\x7a\xca\x97\x3f\x63\x80\xb0\x15\xb7\xb3\x96\x37\xa2\xc9\x72\x99\x33\xe0\x69\x45\x63\x5c\x60\xf8\x73\xdd\xb0\x6e\xcd\xd0\x57\xa3\xde\xb9\xdb\x0b\x0c\x7c\xb0\x7e\xf8\xff\x8c\x72\xef\x8a\x18\x90\x74\xcd\xb6\x4e\x75\x1e\xc4\x96\x6f\x36\xf3\xcc\xc6\x96\x67\x36\x77\xc3\xaa\xfe\xa6\x15\x17\xec\xa2\x1b\x12\x44\x09\xca\xbc\xb0\xf1\xaf\xbc\xb0\xb1\xed\x85\x55\x7a\x8f\xf1\xa0\xcb\x01\x59\xb6\x4e\x5e\x46\x18\xd0\xa7\x90\xdc\xd3\xaf\xfe\xa7\x08\xc2\x27\x3e\x7a\x11\x92\xfb\xf0\x89\x7f\x97\x42\x78\xa6\xf7\x5a\x4f\xfc\xbb\x74\x81\xdd\xf0\x89\x7a\xf1\x22\x74\xc3\x33\xf5\xee\x45\xe8\x06\x27\x0b\x58\xc3\xb9\x9f\xd4\xd7\x92\x7b\x75\x6b\x85\xed\x87\xae\x31\xf0\xa7\xd0\xa5\x5f\x71\xee\x76\xcb\x59\x39\x40\x62\xa0\x3e\x85\x4f\xc0\xf0\x6d\xc6\xb4\x9d\x2c\xed\x70\x5f\xbb\xdd\x7d\x0b\x4c\x94\xee\xe3\x56\x15\x0a\x14\x65\x99\x1f\x57\xd6\x66\xd5\x0c\xf1\x01\x08\x8d\xe2\x6c\x69\x6e\x65\xa4\x15\x33\xc4\x5c\x26\xa7\x8d\xba\xb5\x59\x3b\x2c\xe9\x66\xec\x30\xed\xe6\xbb\xd5\xe9\xe0\x17\x26\xd1\x3e\xa0\x77\x21\xf9\x87\xf3\x0f\x63\xf2\xe9\x10\xbc\x11\x43\x1d\x0c\xa3\x18\x75\x30\x86\x3f\x63\x54\xb9\xd0\xc0\xcb\xef\x3a\x78\x17\x1a\x03\xef\x79\x04\xef\x42\xf8\x90\x66\xcb\x1e\x5b\xcb\xad\x62\x81\x21\x1d\x6c\x74\x16\xb2\x41\xce\xce\x74\xb0\xd1\xc6\x68\xcc\x50\xaa\xba\xea\x26\x35\x16\x6d\x3a\xd0\x17\xb8\xe9\x77\x33\x14\xac\xb4\x6f\x8f\x52\x94\x5d\x2b\xaf\xd4\x67\x9d\x68\x7f\x2d\xec\x75\x8a\x3e\x4a\x94\x9a\xab\xcd\x2e\xcd\x35\x5f\xe1\x60\x93\x5a\x93\x53\xf2\xab\xba\x99\xc2\x39\x43\x71\xa4\x29\x8d\x23\x9d\xfd\x3b\x8e\xb4\xb0\xff\x35\x45\xba\x74\xa8\xaf\x9c\xb8\xd4\x44\xc5\xff\x2f\x77\x6f\xa2\xdd\xb6\x8e\x2d\x0a\xfe\x8a\xc2\xa7\x4e\x01\xc7\x5b\x8c\x64\x3b\x13\x73\xf8\xb4\x12\xdb\x99\x63\xc7\x53\x1c\xe7\x54\x6e\x16\x48\x82\x36\x1d\x8a\x74\x40\x50\xb2\x92\xe8\x5f\xfa\x5b\xfa\xcb\x7a\x61\x03\x1c\x44\x91\xb2\xeb\x54\xdd\xdb\x6f\x75\xad\x3a\x31\x45\x80\x20\x08\x6c\xec\x79\xe8\x5a\x27\xc5\xc4\xdd\xe1\xdb\x9e\xdd\xbe\x9e\x77\xfd\x32\xb3\xaa\x90\x60\xa8\xac\xfe\xb3\xa0\x10\x2e\xcd\x70\x55\xd3\x5e\x0c\xff\xa5\xd5\x1c\xcb\x9a\x32\x07\x92\xbc\x5d\xf2\x57\xf2\x55\xb1\x68\x2c\x2c\xbf\xb8\x29\x74\x2c\x77\x4c\xeb\x1d\x57\xdc\x09\xd3\x48\x7f\xac\xd4\x7f\xf2\xea\x57\x35\xc6\x9c\xf8\x61\x31\x02\xa5\x5a\x59\x0e\xbc\x7b\xc3\xf6\x17\x20\xcc\x26\x79\xa1\xfb\x53\xb4\x55\xed\xe8\x99\x0a\x35\xd6\x3f\x93\xf2\x4a\x98\x9a\x20\xf0\x53\xb8\x05\x7a\xcc\xa5\x51\xa5\x90\x73\x85\x15\x27\xce\x41\x4e\x8a\x4b\x6b\x61\x81\xbf\xeb\xc8\x10\xfc\x4f\x8e\x05\x16\xf8\xdf\x1c\x2f\x54\xa8\xd1\xfa\x65\x81\xbf\xe5\x0c\x15\xd2\x9c\x50\xd5\xe9\xdc\xb7\xfd\x5d\xd5\xef\x20\x27\xea\xfa\x13\x55\xbd\xd5\xd5\x37\xf5\x80\xbe\xeb\xed\x53\xf5\xd8\x5d\x52\x6f\xaf\x56\x9f\x6d\x2c\xed\xe6\x62\x81\x2f\xda\x42\xa0\xcf\x43\x08\xcb\x57\xe0\x4f\xfd\xea\x8c\x84\x45\x8b\x9a\xa8\xbe\xa9\xe7\x1a\x96\x13\xd5\xc3\x50\xad\x39\xa2\x90\x29\x32\x72\x9c\xc2\xf3\x88\xfc\x14\x14\x3e\xe7\x14\xe2\xf0\xef\x94\x44\x54\x9c\xea\xd8\x80\xb0\xa0\x58\x1f\xda\x94\x45\x0c\xc2\x75\x8c\x71\x82\x49\x11\x29\x4c\xc3\xc2\x71\x27\x74\xb3\x04\x26\x61\xb7\xf2\x74\xbb\xae\x3c\xbd\x0c\xdb\x85\x42\x93\x39\x05\x2e\xc2\xf5\x25\x82\xaf\x43\xe3\x4b\x33\x0b\x8d\x67\xcc\x3c\xec\xac\x3d\x7c\xd3\xd6\xd4\x33\x06\xa4\x05\xec\xad\xfd\xd2\xbe\xd6\x23\x61\x3a\xdb\x52\x0e\xf8\xbe\x8e\xdf\x28\x24\x72\xa6\x24\x72\x66\xb3\x92\x07\x1b\xa8\x57\xfa\x76\xff\xfe\x7d\x7d\xc1\x0c\x53\xa6\x7f\x05\x8a\x2b\x8f\x42\xa2\x1e\xf5\x51\xbf\x53\x38\x2b\x16\x39\x27\x15\xe5\x5a\xb4\xfd\x10\x8b\xba\xac\x2f\x97\x94\x3a\xf5\xc2\x20\x36\x07\x6d\xcd\x7e\xbe\xbc\x22\x1d\x55\x39\x0a\x36\x21\x4c\x6b\xc5\xfe\x51\x23\x88\xce\xfd\x3e\x5a\x7e\x02\xa3\x10\xf2\xb1\xda\x85\x99\x7f\xde\x50\x05\xe9\xf5\x79\x1e\x92\x5c\x31\x42\xcf\x56\x57\x48\xf3\x66\x58\x67\xa5\x6c\x4d\x97\x66\x90\xb8\x69\x5d\x97\x75\x15\x41\x6a\xb3\xb2\x06\xc9\xf3\x90\xa4\x76\x80\x46\xaf\xee\xf7\x2e\xe0\x38\x6c\x2b\x3e\xdb\xe4\x8a\xcd\xf7\xd6\x2d\x8f\xe8\x60\x09\x0c\x39\x64\x1f\x39\xe4\x28\x24\xcf\x89\xa2\x68\x7f\x0e\x1b\x5b\xae\x2f\x96\x17\x40\x83\xc7\x8c\x1c\x87\x58\xc0\x4a\x2d\x82\xfe\x64\x66\x07\xf5\x0f\xfe\xfd\x3b\x2d\x96\x23\x52\xcb\x21\xaa\xe5\x88\x56\x26\x18\x2e\x97\x6c\xb9\x8a\x20\xb2\x19\x44\xb6\x07\x91\xed\x17\x2f\x8b\xd4\xb2\x84\x74\xb1\x7e\x36\x95\x43\xeb\x95\xba\xe7\x91\xef\x21\x2c\x83\x35\xba\x93\xb5\x2f\x9f\x9a\xdd\xdf\x58\xbe\x0b\x5c\x3e\x25\xd0\xb7\x94\xc8\xbd\x0d\x26\x13\x54\xd7\x04\x8b\x05\xf1\x0b\x32\x5e\xcc\x63\x1c\xa6\x0e\xae\x86\x2c\x0b\xbb\x30\x05\x03\x7e\xbd\x46\xf2\x55\x54\x7e\x9b\x59\x07\xfd\x89\x3b\x61\xab\xc6\xf9\x58\xf3\x66\xcf\x6a\x9f\x8b\x31\xf4\xb4\xd4\xaf\x2f\x6b\xdb\x9a\xf5\x79\x16\x14\xde\x84\xad\xca\x0c\x41\x8c\xc0\x23\x69\x23\x1f\xe2\x4e\x88\x77\x1d\x5d\x83\xae\x52\x6f\x1c\x74\xe2\x46\xcf\xc1\x0c\xa0\x96\x41\x92\xdf\x42\x37\x97\x70\x74\x0b\x33\xf9\x2d\x24\xbf\xbc\x27\x4a\xec\xb9\x76\x08\x73\x73\x38\x08\x49\x53\x6d\xd6\x0f\x01\x2b\x8c\x3c\xa9\x32\xcb\xea\xad\x19\x5f\x9b\x22\x59\x7d\x59\x2a\xc4\x14\x5f\xee\xcf\x1c\x01\xfe\x5b\x27\x01\x7f\xe4\xec\x49\xf0\x1f\x39\x12\xfc\xa7\xce\xbd\x51\x51\xc1\x7e\x41\xe1\x73\x27\xe2\xcd\xc8\x51\x08\xd6\xab\xbd\x13\x4b\x91\x23\x98\x86\x9a\x2f\x3e\x31\x64\xaf\x1f\x12\x54\x9f\x59\x5c\x88\x54\x58\x30\x53\x34\x51\x11\xc1\xfd\x70\x8d\xf5\x8b\x08\xe3\x15\x79\x1a\xba\x57\x09\xbc\x0f\xd7\xfa\x5d\x9f\x86\x30\x23\xfb\xe6\x9f\x1d\x0e\x09\x85\x3e\x32\x22\x47\x7c\xa5\xe1\x5a\x96\xe5\x70\x77\xd7\xc8\xab\xef\x43\xe4\x77\x7f\x08\x10\xb0\x87\x53\x21\x6d\x74\xc9\xac\x6c\x9e\xeb\x95\x3d\xe7\x64\x6d\x35\xfc\x32\xb8\xf6\x24\x9a\xf0\x34\x97\x3d\x7e\xe3\x73\x1e\xf0\xa0\x19\xc4\xbe\xcf\xe5\x2c\x15\xdf\x7b\x7a\xd1\x9e\xd5\xa2\xb0\x1a\xd2\x52\x10\xc2\x15\x9a\x60\x0e\x6d\x3f\xa4\x1b\x56\xcf\xda\xd0\x3f\xde\xc0\x49\xa8\xee\x3f\xa1\x0d\x83\xb7\x75\x9a\xf0\xc2\xc3\x45\xf0\xec\x3a\x4d\x32\xde\x0b\x45\x3a\xe9\xb1\xeb\x08\xad\x28\x36\x6b\x86\xfc\x7e\x60\x71\x98\x8a\x09\x0f\x7a\xb9\x88\x4d\x1f\x8c\xe1\xd2\x24\xf6\xac\xfd\x20\x56\x29\x42\x92\x4a\xbb\xa8\x2b\x7b\x65\xd6\xc6\xfb\x5c\x87\x1b\x14\xdf\xb2\xab\x00\xe5\xb3\xe2\x7e\xf7\x53\xf2\x3e\xd2\x5b\xf4\xf3\xee\x23\xaf\x1f\xf0\xd4\x0c\xf8\xd6\x80\x65\xcb\x66\x16\xfd\x93\x0d\xeb\xc1\x8a\xf6\x55\x7b\xcf\x92\x75\x8c\x0e\xa5\xf0\x65\x8d\xd4\x78\xa0\xed\x8a\x78\x38\x5e\xff\x2d\x06\x70\x4e\xde\x84\xba\x26\x36\x7e\xc2\xcf\x88\xf8\x98\xc1\x45\xff\x0c\x42\xa2\x05\x00\x0a\x37\x92\xd2\x7a\x05\x4c\x0a\x87\xab\x13\xab\x9b\xbf\x45\x69\xfe\xbe\x37\xaa\x3c\xbc\x50\xf3\xae\xf9\xa8\xa2\xcc\x9f\x31\x69\xa3\xfa\xf3\x47\xd8\x5d\x86\xfb\x30\x6c\x77\xe5\x43\xa1\x59\x4f\xe9\x43\xe8\xce\x12\x78\x11\xba\x3f\x12\x62\x5d\x73\x91\x45\x99\x7c\xa5\x60\x63\xef\xe6\x9a\x25\xc1\xf3\x38\xb6\xe0\x43\x48\xe1\xe5\x9a\x53\xba\x53\x8a\xe1\x1f\x3b\x7b\x1d\x11\x01\xbf\x7c\xe9\x34\x98\xa8\x46\x76\x93\x5d\xf2\x97\x06\x24\xf4\x3d\xfd\xba\x92\x24\xba\xa8\xff\xd7\xe7\x90\x73\x2c\xa4\x38\x23\x6f\x24\x28\x31\x26\xa1\x54\x1d\x85\x6f\x5a\x7b\xf9\x2a\xbc\x45\x49\x9d\x14\xb6\xce\x4f\xeb\xa7\xec\x5d\x39\x65\xfe\xf7\x57\x21\x64\x21\x24\xf6\x6b\xa3\x22\x3d\x5f\x7d\xb4\xdc\xb5\x1b\xa4\xe3\xec\x02\x09\xf9\x31\x12\xf2\x23\x24\xe4\xaf\x21\x75\x85\xfd\x0d\xeb\x73\xdc\xa9\x06\xe0\xfa\xfa\x7f\xc2\x3e\xb9\xb5\x02\x60\xbe\xbe\x02\x20\x2b\x9a\x4d\x9d\x42\xb8\xce\x15\xe5\x5f\xa9\x09\x28\x97\x6a\x02\xa6\xf8\xc3\xcf\x33\x99\x4e\x10\x60\x30\x81\x02\x1f\xdf\x48\xa2\x4b\xba\x62\xaa\xfb\xaf\x8d\xb2\x81\x9c\x8e\x4f\x48\x02\x4b\xa5\x03\x39\xd5\xee\xe9\xef\x50\x30\xd9\x5e\x40\xe2\xb9\xbb\x82\x58\x61\xea\xe7\x99\x45\x41\x78\x2b\xcb\xdc\x6d\xb6\xff\x46\xf4\xb6\x45\x4e\x62\xb3\xb1\x65\x39\xc2\xf6\x22\xc0\x5d\xfc\x11\x1a\x56\x5c\xd8\xde\x15\x1d\xab\x7f\x9d\x13\x85\x9c\xaf\x8a\x74\x2b\x0b\xaa\xd6\x03\xc9\xce\x54\x92\x77\x21\x85\xc4\x6b\x0f\x4b\xad\xd4\x07\x95\x7b\x40\xeb\x0c\x4e\xd5\xba\xd5\x26\x11\x87\x30\x89\x88\xa2\x7e\x6a\x12\xff\xfa\x0b\xb7\x5a\x3f\x54\x8d\x33\x82\x83\xb4\xe9\xad\x65\x3a\xb1\x77\xcb\x9d\x1a\x52\xf7\x37\x22\x4c\x0b\x2a\x5c\x3d\xb5\x0d\x4f\x16\xc0\xbd\x56\xf6\x0b\x47\xfc\xe6\x60\x21\x3e\x5f\x52\xdb\x1b\x5f\x10\x01\xcd\x93\x4b\x8d\x25\xc8\x54\x96\xb3\xb0\x5a\x87\x82\x0d\x8b\x56\xae\x0c\x07\x44\xc2\x9b\x54\xeb\x73\xbe\xa5\x20\x6d\x7f\x0f\x66\xe4\x5c\xa3\x13\x5a\xae\x8b\xf4\xf4\xba\x20\xe4\x0d\xbc\xf9\x20\x8c\x78\x1c\x18\x9f\x92\x05\x85\xdc\x5b\x87\x56\x93\x3f\xdd\x61\xc5\x74\x2e\xe3\xd9\x9e\x50\x18\x66\x30\xd2\xa1\xcb\xea\xeb\x99\xb7\xde\x47\xa9\x3e\x9a\x6c\x8e\x26\x8d\x91\x5c\xd8\xac\x3e\x2e\xe6\xbd\xf4\x15\x0f\x29\x71\x85\x7d\xaf\x0b\xe5\x9c\x4a\x32\x27\xcc\x43\x41\x62\x5f\x2b\xec\xbc\x76\x7b\xfd\xf0\x4f\x51\x78\x14\x1e\xa0\x89\xa8\x35\xa0\x51\xcd\x8f\xdb\x5e\x7d\x86\xe5\x2d\xdb\x2b\x31\x30\xaf\x89\xce\x26\xb4\xaf\x70\x0f\x54\x4d\x05\xd7\x54\x74\x31\x30\xa6\xad\x64\xbc\x82\xfc\x5d\xf2\x57\x6e\xa3\x41\xcc\x66\x5f\x4b\x78\xc5\x24\x24\x5e\xf1\xc6\xc2\xd5\xc2\x03\xdf\x65\x4b\x8f\x32\x5b\x1b\xc6\x7c\xfd\xf8\xa2\x19\xf5\x58\x1f\xe7\xfe\xfd\xf2\xb2\x18\x34\xc5\x41\x23\x37\x35\x15\x3b\x3d\x94\xb2\xf5\x6b\x18\xb0\xfa\xcb\x70\x33\x52\xf4\x92\xf6\x25\x4a\x80\x78\x91\x17\x17\x3e\x8c\xf8\xd6\x9f\xc9\x78\x46\x7c\x0f\xc4\x60\x1b\x98\x12\x29\x52\x0f\x92\x8d\x91\xf9\x8d\xf5\xd4\x8b\xa5\x2a\x64\xa4\x32\x0e\x72\x97\xfc\xc5\xcd\x57\x94\xda\x3d\x0a\x91\xd7\xcd\x96\xa4\x9e\x76\xb6\x40\x0d\x68\xdb\x96\x97\xde\x17\xab\xc4\x50\x17\xc3\x95\xb6\xa7\x1d\xa8\x3c\xe0\x1b\x8a\x69\xce\x3d\xe0\x03\x84\x41\x7d\x4a\xbc\xce\xd7\xeb\x7a\x25\x09\xba\x10\xa9\x9e\x99\xe7\xbe\x82\xd8\xeb\x24\x6e\x46\x46\x2d\x66\xf2\x49\xe8\xca\xbc\xad\xc9\x0d\x4c\xa7\x7b\x17\x44\x97\xa6\xbe\x7f\xff\x82\x70\xc3\x7e\xa0\xd2\x6c\xcd\xa2\x7c\x93\x80\xe6\x60\xb3\x2e\x53\xaf\x4d\x96\xd6\xdc\x0c\x2d\x35\x8a\x7a\x92\x7d\x4e\x04\x45\x1a\xfb\x8d\x83\xe7\x91\x7d\x14\x44\x02\x0f\x66\xe4\xb3\x84\x11\x48\x0a\x6f\x4d\xfd\xe5\xdc\xd5\xc5\xf9\x85\xab\xab\x8b\xe5\x20\xe9\x83\xcd\xdf\x43\x8a\xc7\x77\x46\x4e\x52\x98\x91\xd0\x03\xa1\xb5\xb7\xed\xe3\xe5\x38\x9e\x56\xc9\x27\xee\x27\xa1\xde\x9e\xbb\xc9\x83\x5c\x91\x9c\x07\x12\x4c\x21\x99\xa2\x40\x70\xec\x11\x6e\x18\x46\x29\xe1\x9d\x24\xf6\x43\xad\x23\x06\x2c\x68\x37\x97\xc8\xa2\x56\xfe\x13\xe3\xd1\x83\xad\x3f\x48\xbe\x21\x37\x48\x32\x10\xf4\x41\x42\x9d\xe1\x82\x42\x7f\x1d\xb2\x2a\x90\x43\x21\x85\xdf\xe3\x36\xb3\xbd\xdf\xbf\x9b\x08\xa1\x38\xc6\x0c\x96\x0d\xd8\xe8\x54\xa4\xae\x8b\x70\x8c\x5c\xb1\xa0\x89\xeb\x83\x70\x39\x48\xf7\x84\x48\x85\xf5\x99\x22\xf1\x3c\xce\xb8\xc6\x4a\x9f\x50\x64\x2c\x07\x37\xa6\x4f\x90\x8a\x59\x5d\x50\x98\x78\x6d\x8a\xc5\x33\x92\x50\xfb\x2a\x8d\x12\x24\x0f\x70\xd9\x05\x13\x56\xa1\xf5\x56\x7f\x85\xa2\xee\x7a\xf7\xc7\x89\x33\xf1\xc8\x9c\xf4\x3d\xbd\x0b\x7a\x6f\x0d\x06\xbd\xe8\xc0\xa0\xcb\x83\xd1\x52\x45\xdd\x02\x53\xc5\xe7\x6c\xd8\x9b\x0f\xff\xe8\x73\x32\x23\x97\x9e\xa6\x51\x7f\x90\xd1\x00\x55\x0c\xd7\x77\x9c\xf3\xb0\x98\xf2\xc8\x99\x93\x0b\x8d\xe9\x67\x64\xea\x95\x21\xcb\x33\xcf\x7d\x0b\x73\xaf\xd5\x76\x54\x24\xbb\x2a\xd9\x9e\xc7\x55\x96\x2a\x5e\xa7\xa4\xbc\x46\x49\x35\x07\x70\xe1\x48\xc5\x01\xdc\x95\xaa\x36\x52\x82\xcc\x09\xd7\x53\x3d\x22\x1c\x7e\xb1\x2f\x6a\x30\xb5\xfc\xdc\x66\x5f\xc0\x97\xce\x09\xe1\xb6\x8f\xe0\x80\xbe\x61\x4a\xfa\xf1\xbe\x3b\xfb\x8b\x65\x36\x09\xdb\x9e\xad\x1b\x32\x2f\x47\x2c\x38\xa5\x9c\xaa\x8f\x91\x8d\xf1\x6a\x25\xcb\xf0\x71\x2f\xd6\x9f\x57\xb1\x40\xdb\x8d\x1e\x6c\x53\xf5\xd0\xc1\xb7\xf5\x7e\x0f\x9b\x23\xed\x34\x47\xda\x6a\x8e\xf4\xae\xd9\xa3\xf4\x3e\x67\x6e\xf1\x31\xea\x53\xb1\x0f\x39\x22\x0c\x7e\x79\xdf\x1d\x86\x59\x42\xbe\xd0\xf1\xbe\x33\x23\x53\x8e\x49\xc3\xf4\x0d\x66\x7b\xdf\x1d\x44\xdc\xa3\xa1\x2e\x6f\x33\xf3\xe0\xda\xd3\xdd\xa1\xc2\xf3\xad\xa2\xc2\x0f\x54\x48\xaa\x15\x5a\x40\x94\x53\xf0\xd4\x6b\xfc\x57\xda\xe6\x54\xff\xda\x95\xb4\x66\x1c\x19\xbe\x42\xb3\x74\xe3\x19\x87\x88\xbd\x2e\x40\xc6\x80\x3f\xf0\x8f\x4c\x7c\xad\xb7\x2e\x52\xcd\xff\xe9\x08\xf0\x5f\xab\xfe\x1f\x1c\x09\x7e\xdf\xe1\xea\xa1\xe7\x9e\xfb\x41\xc0\xb1\xe7\x3e\xb0\xff\xeb\x01\x5c\x79\xee\x0b\x41\x46\x0f\x86\x14\x76\xee\x08\xef\xb5\xf4\x6e\x5b\xe8\xc1\xc7\xec\xfe\x98\x3b\x7a\xe7\x7e\x3a\x88\x60\x5f\xe7\x20\x31\x9f\x41\x7d\x8b\x9a\x00\xa3\xc3\xad\x71\xdd\x7d\x09\xea\xd3\x2c\x14\x75\xfd\x23\xc7\xb2\x16\x50\xec\x06\xb7\xbd\x9f\x6a\x1d\xcb\x08\x6c\x85\x75\x8b\x03\x74\x63\x0e\x50\x21\x24\x95\x07\xe8\xce\xa0\x55\x1d\x0b\xf6\x6c\x65\x7e\xf9\xed\xd3\x2b\x80\xa5\xbb\x1e\xb4\x37\x59\x28\x50\xca\x29\x35\xdf\x42\x69\x2b\xe4\x16\x53\xc4\x50\xef\x35\xc7\xc8\x7b\xbd\x7a\x3c\xda\xe6\x6f\x22\xd7\xcd\x70\xed\x38\xc9\xbb\x71\x10\x29\xe6\x46\x54\xc8\xa9\xc2\x4f\x9d\x90\xba\xa0\xf0\xc6\x5b\x2b\xab\x17\x05\x19\x0b\x17\xa6\x03\xaf\x43\x8f\x5b\xe6\x51\x7a\x8a\x22\x39\x7b\x83\x32\x39\xfb\x06\x91\xe2\x6b\xe6\x10\xaa\x5f\xaf\xc1\x73\xf3\x0d\xeb\x7f\x3d\x28\x74\x66\x99\x7b\x1e\x12\xaf\xac\xb0\xb6\xaa\x4f\xc4\x8f\x57\xc8\xf6\xb5\x11\xb7\xca\x45\x1a\xe2\x46\xc7\x68\x0e\x0a\x5a\x0a\x37\xc6\x8d\x24\x4f\x89\x1b\xa3\xfb\x38\x22\x0f\xff\x95\xf3\xd3\x54\x99\xc4\x04\x83\xc8\x90\x74\x6e\xf8\xe5\x02\x42\x49\x4d\x31\x4a\xf4\xcd\xec\x2a\xd1\x52\x61\xe2\xa3\x14\x2e\xb5\xee\x54\xc3\x5a\xae\xe3\x8c\xad\x05\x32\x6a\xa1\x82\xb2\xd7\xe1\x2d\x2f\xd5\xad\x12\xe2\x90\x74\x22\x2a\xc5\x63\x20\xaa\x42\x36\xa9\x16\x95\x74\x40\x72\xa4\xdb\xa2\x49\x78\x0e\x48\x94\x02\x5b\x4d\xdc\x98\xe3\xdd\x15\x48\x39\xe7\x24\x56\x5f\x81\xb9\xad\xe8\xb3\x95\x6d\x09\xd4\x0c\xde\x38\x31\xb0\xa7\x4e\x60\x7b\xcb\x18\x42\x2d\xcc\xd4\x9d\x91\x8f\x21\x48\x60\x14\xfa\xee\x8c\x7c\x52\xd7\xe9\xca\x48\xcb\x39\x21\xa4\xcd\x2e\xc6\xa1\x93\x63\x6a\x08\xfc\x91\xa7\x8e\x0f\xec\x9b\xd3\x57\x2f\x9a\x2a\xc8\x45\xda\x8c\x6d\x33\xb2\x63\x36\xf0\x13\x87\x77\xc8\x06\xfe\x0c\x51\x36\xa4\x4e\xd5\xf6\xaa\x68\x3b\x33\x6d\x50\xb5\xc9\x1c\xdb\xde\x86\x84\xd7\xa9\x75\xed\x30\x0b\x7d\xe4\x84\x3a\x72\x19\x39\x22\x12\x7e\x61\xd9\x52\xf4\xba\xa5\x74\x25\x3f\x55\xeb\x13\xc7\x2d\x4f\x8c\x86\x0d\x58\xf7\x4e\x9a\xb0\xbe\x8d\x0b\x39\x71\x67\x44\xa0\xbb\x12\xa4\x6a\x29\x27\x08\xf9\x13\xdb\x83\x4b\x77\x62\xfb\x70\xe1\x56\xdf\x93\xe8\xef\xb9\xa4\x30\x71\x33\x32\x23\x6f\x3c\xe8\xdb\xde\x15\x7e\xf5\x85\x1b\x8c\x6b\x8c\x0d\xce\x6f\x42\xe1\xe2\x2b\xa5\xce\x45\x73\x5b\xd4\x82\x63\x82\x0c\x0a\x17\x25\x12\xbe\x70\x6f\xc8\xdc\x03\x0f\xdd\x12\x99\xda\xd8\x4b\xf7\xa2\x5e\x09\x5a\x3f\xfb\xd4\x99\xba\x17\xf8\x25\xd5\xc4\x44\x31\xb1\x12\xd7\x4d\xdd\x1b\xb2\x53\x0d\x16\xa9\xc1\xa6\x2b\x83\x79\x73\x67\xda\x18\x4a\x36\x86\x7a\xd2\xc4\x19\x7b\xcd\x75\x7c\x8a\xeb\x78\x5d\x57\x80\x56\xe0\x87\xae\x5f\xd7\x20\x6c\xb6\x4b\xc7\x33\xf2\xb2\xfc\xe1\xcc\xc8\x97\xf2\x07\xf8\xa9\x46\xc0\x2f\x42\x72\x6f\x54\x43\xad\x97\x88\x89\xae\xdd\x2d\xd7\x75\xc9\xb5\x7b\x40\x42\x35\x33\x45\x4d\xef\xdf\xbf\xb6\xbd\x31\xe2\x9b\xcf\x4a\x0a\xf9\x24\xc8\x35\x52\x53\x4a\x9d\x2c\x5d\x9d\xc8\xb5\x7a\xc7\x65\x09\xe1\x2f\x42\x72\xa9\x3e\x9b\xa7\xc0\x73\xd8\xe1\xe4\xd2\x68\x69\x16\x14\xbe\xad\x65\x19\x4c\xb1\xa0\x7a\x10\x88\x7a\xe8\xc8\x33\x6e\x0b\x1f\xc4\x5a\x17\xe5\x54\xbb\xa9\x2f\x28\x7c\xf6\xdc\x19\xf9\xa0\x35\x52\x46\x6b\xfa\x66\xd7\xfa\x4a\x75\xa2\x04\xcf\xbd\x49\x60\xdf\x73\x67\x09\x9c\x76\x52\x93\x57\x92\x24\x0f\x50\xe6\x7c\xef\xdd\x3d\x64\x4d\x6c\x24\xcd\x20\x09\x6d\xf3\xe6\xb6\xb7\x8f\x61\xeb\x55\x4f\xae\x63\x84\x40\xb8\xa2\x88\x46\xdb\xed\x12\xb8\x97\x50\xf5\x7b\x7d\xa6\xb0\xfe\xff\x39\x4a\x39\x8f\xf8\xb6\x49\x66\x71\xd6\xf9\x41\xef\xd4\x7e\xee\x1a\x21\xc3\x66\x9f\x16\x14\x7e\x7a\xdd\x0a\xfd\x7e\x04\x9b\xdb\xfa\x1d\xe5\x63\xf0\x68\xa8\x4d\x37\xeb\x9f\x1b\xf1\x2d\x3d\x2f\xd5\xf7\xcb\xfa\xbe\x8f\x86\xd5\xf0\x68\x8f\xe9\x54\xf4\x36\xe6\x9f\xfe\xff\xa5\xfa\x37\x85\x43\xaf\xdb\xfe\x3a\xfc\x33\x19\xcf\xc9\xa1\x07\xc9\xff\xfe\xdf\x23\x38\x21\x42\x6d\xc3\xe8\x7e\x32\x56\xa2\xb7\xa0\x8e\xa4\x8e\xe2\xee\x7f\xac\x51\xa1\x1c\x6a\x19\x0b\xb5\xae\xf0\x61\xcd\xbb\x4e\xc8\x8c\xfc\xf0\x20\x19\xf4\x39\x91\x14\xf6\x70\x0b\x35\x58\xbd\x58\x33\xfe\x07\x35\xbe\x35\xc4\x2a\x06\x7a\x13\x5f\xde\x61\xcf\x4b\xd8\x1d\xf1\x2d\x9d\x2b\xe2\x8e\x90\xeb\x21\x32\x13\x77\x0c\xcd\x7e\xa5\x30\xc1\x0b\x01\x43\xd8\xa7\xf0\xc9\x73\x13\x06\xe7\xea\xd6\x7e\x08\xfb\x1e\x7c\xf2\x28\xbc\x5b\xf3\x69\x07\x12\x88\x74\x13\xe8\xb4\x23\x5d\xe7\xd9\x25\xd1\xa6\x3f\xb1\xa0\x14\xfe\xfa\x5a\x44\xab\xaa\x5d\x49\xb2\x3b\xe6\xfc\x10\x59\xf7\xae\x68\xab\xfe\xc7\x83\x63\x63\xd6\xd7\x4e\xbc\x20\x57\xc7\x4e\x9a\xf6\xd8\xb2\x18\x84\x2e\x00\xf1\x0a\xdd\x26\x0f\x88\x65\x2c\x0a\x99\x05\x33\xf2\xce\x83\x4f\x39\x10\xe1\x0a\x6a\x7b\x57\xda\xbc\x53\xc5\xc5\x9c\x7b\x44\x60\x29\x43\x75\xbb\x88\xa7\xc1\x9b\xec\x4c\xdf\xd4\x45\x1b\x74\x52\x19\xbc\x7f\x6a\xee\xa7\x93\x09\x4f\x64\x71\xf7\xd8\xd8\xa7\x02\x4b\x17\x4e\x3e\xf1\x74\x31\xe5\x7d\x34\xc5\xf8\xa6\x50\x8a\x70\x67\x24\xc9\xc0\x62\xd7\xd7\x71\xa4\x33\x27\x3d\xb8\xca\x52\x2c\xc2\x77\xdc\x50\x88\xed\x86\x64\x4e\x44\x86\x2b\xf2\x59\x2b\x2d\x79\xe7\x7a\xb3\x63\x45\x5a\x8f\x81\x9d\x3a\x58\x77\x7b\xea\x08\xc8\x9c\xc4\xce\x80\x9d\x39\x58\x7b\xdb\xf3\x1d\xac\x98\xe1\x1d\x38\x58\x92\x1b\x05\xbd\x05\x85\xbc\x6b\xcc\x5e\x1e\x11\x84\xe0\x8d\x44\xc7\x74\x51\x60\x99\xdb\xca\x1b\x4b\x11\x4d\x08\x5d\x80\x9f\x99\x30\x95\x99\x05\x8f\x86\xdb\x4f\xf8\x43\x5c\x94\xc0\x02\x0c\xe3\xc7\x1f\x97\x16\x6c\x3d\x32\xd7\x13\x4b\x63\x76\xb5\x25\x16\x1e\x94\xaf\x14\xd2\x6c\xed\xe1\x7a\xbc\x12\xbd\x85\x37\x6b\x81\x58\xbf\x7f\x3f\x5e\xa8\x69\x3f\x1e\xdc\xc5\x5f\xf3\x7f\xa2\x1a\x99\x59\xbe\x28\xeb\xa0\x7e\x9c\x91\x84\x8e\x47\xce\xb0\x4c\x88\x7b\x9b\x57\xe9\xd6\xa8\x31\xcd\x87\x4f\x37\x64\x63\xa6\x4f\x87\xc5\xad\x72\xb2\xa3\xcd\xf2\x5e\x39\xe1\xd1\xc3\x51\x71\xaf\xa4\x0d\xa3\x27\xe5\xbd\x92\x3e\x6c\x8e\x36\x8b\x7b\x25\x8d\xd8\xdc\xde\x2a\xee\x95\x74\x62\xf3\x71\x79\xaf\x96\x1e\x60\xb8\xbd\x21\x9b\x2b\xb3\xb5\xb5\xbd\x81\xf6\xfd\x30\xbb\x9d\x4b\xf1\xba\xe1\x54\xad\xde\xc6\x8c\x44\x78\x56\xe8\x86\xc2\x9f\x59\xd6\x45\x66\x57\x96\x76\xab\x09\x01\x7a\x37\x36\x9f\x3a\x9b\x4f\x9a\xb0\xd0\xa4\xb9\x5b\xc3\x26\xd1\x1d\x35\x13\x3a\x0c\x8b\xa5\x6d\xe6\x67\x68\xe6\x62\x18\x36\xf3\x2e\x8c\x56\x17\x71\x65\x05\x91\xbe\xc6\xd9\xad\x2a\xf2\x19\xc9\xf4\xea\x40\xee\x32\x66\x1c\x08\x47\x9b\x7f\xba\xf9\xef\xdf\xcf\x31\xde\xa9\xb2\xf9\xfd\x62\x9f\x1c\x4c\xa6\x27\xc0\x1b\x39\xc9\xc2\x30\x71\x3e\x23\xf9\xc6\x88\x82\x74\xe5\x00\xd9\xd6\x20\xeb\x36\xb6\xcc\x48\x68\xde\x87\xe7\x53\x18\x4b\xc8\x34\xd3\x3e\x31\x11\x6b\x17\xb2\x59\xba\xa0\xd0\x37\x9d\xa6\x59\x4b\x27\xc2\x70\xb7\x37\xe9\x83\xad\xdf\x4a\x06\x9a\x74\x9c\x29\xe0\xae\x78\xd6\x75\xe6\xa5\x9b\x32\xb4\x5e\x30\x22\xe9\xc6\x68\xc5\x4b\xde\xcb\x40\x77\x20\xd2\xed\x67\xea\xc2\x67\x64\xeb\x0f\x39\xd8\xa4\x2d\x69\x92\xcb\xde\x53\xd5\xb5\x14\xa3\x96\x01\x83\x0f\x66\x24\xcd\x10\xc3\x37\x20\xc4\xb4\x8c\xca\x96\x47\x8d\x96\xcd\xb2\xe5\x71\xa3\x65\xab\x6c\x79\xd2\x68\xd9\x2e\x5b\x9e\x36\x5a\x1e\x96\x2d\x15\x60\x99\xa6\x47\xaa\xa9\x01\x61\xda\x2f\x7b\x0d\xf9\x7e\x8e\x4e\x45\x7f\x0e\xc7\x89\x33\xfc\xf3\x39\x32\x6d\x63\x81\x3c\xdb\xc5\x9a\xa7\x9a\x67\x76\x4e\x2e\xd5\x12\x54\x60\xaa\x0d\x0e\x59\x37\xeb\x72\x91\x01\xd6\x11\x34\x40\xf6\xba\xbc\x3a\xab\xd9\x19\xb2\x75\xe1\x29\x8a\x0c\xfd\x91\x6c\x3c\xe2\xdb\x7f\x08\x4c\x5f\x24\x37\xf8\x82\xc2\xbc\xf3\xa5\x37\x64\xa6\x5e\xfa\xb3\x7c\xd5\x97\xf2\xea\x65\x79\xf5\xb6\x7a\xfd\x4d\x17\xbe\x47\xb2\x6a\xe8\x15\x09\x19\x99\x91\x6b\xfd\xd9\x6a\x49\xe6\xfa\x72\x20\xe9\x83\x47\x7c\x1b\x81\x7c\x2f\xeb\xb4\x96\x86\x0a\xf2\x36\x24\xe4\xee\x8c\xdc\xa8\x27\xf3\x88\x70\xe4\x34\xd4\xc5\x40\x7d\x5c\x4e\xd1\x38\x7f\x93\x95\xce\xaa\x17\x24\x87\xca\x82\x25\x9e\xf1\xaa\xb3\xac\xc5\x33\x4a\x30\x4f\x71\x4a\xc7\x1c\xc3\x00\xbe\xaf\xd9\xd3\x39\xd9\xcb\xd0\xd2\x33\xc9\xd0\xa3\xf1\x3a\xc3\x66\x0a\x43\x74\x2e\x6f\x7b\xb2\x59\x6f\xa7\x3c\x14\x05\x01\xd9\x76\x9a\x83\x9b\x51\xa1\xda\x0c\xfc\x89\x9b\x61\xae\x5e\x9a\xab\x61\xa9\xd1\xd9\xfa\xd7\x87\x19\xd6\x1e\xdf\xbc\xeb\xe3\xc3\xa5\xc7\x46\xd5\x63\xdf\x33\x18\x8d\x74\x68\xfe\x32\xfa\xc0\xa6\xcd\x5a\xcb\x70\xa9\x65\x58\x6b\x69\x0c\x57\x6b\xd9\x5a\x6a\xd9\xaa\xb5\x6c\x2f\xb5\x6c\xd7\x5a\x1e\x2e\xb5\x3c\xac\xb5\x3c\x5a\x6a\x79\x54\x6b\x79\xbc\xd4\xf2\xb8\xd6\xf2\x64\xa9\xe5\x49\xad\xe5\xe9\x52\xcb\x53\xdd\xd2\x40\x32\xfa\x83\x4c\xfc\xde\x82\xc2\x71\xd7\xc1\x41\x96\xda\x63\x15\xb7\xf7\x8b\x45\x8e\x00\x76\xe5\x14\x2d\x82\x2a\xca\x95\x54\x59\x60\x13\x60\x3f\x75\x10\xf5\x55\xa6\xa4\xa3\x9d\x35\x10\x7c\x44\xfa\x29\x98\x4c\xb3\xa2\x48\x34\x9b\x50\x98\x3a\x33\x72\x9c\x01\x86\xb0\x02\x3a\xfc\xe4\x19\x3c\xde\xe4\x0f\x75\xc4\x1c\xa6\x97\x45\xae\xdf\xb2\x40\x32\x82\xa9\x8f\x68\x91\x6d\xf6\x2a\x6b\xf6\xd7\x59\x67\xaf\x32\xf4\x36\xd3\xe6\x83\x05\x85\x37\x9d\x78\x07\xd1\xc5\xe0\x3c\x27\xa8\xfb\x39\xc8\x6e\x09\x4c\x31\x8e\x94\xb5\x5c\x27\x37\x89\xcb\x3a\xf3\x9b\xd6\x65\x88\x5c\x7d\x33\x57\x13\x97\x4a\x60\x60\xe0\x5d\x29\x21\x0e\x43\xd1\x0f\x1d\x1f\xbc\x4f\x4e\x8a\x4b\xb9\x4e\xcb\xe5\x85\x0e\x07\xef\xc2\xc1\xf2\xea\x09\x78\x9f\xb5\x0c\xf8\x51\xb8\x98\x3f\x20\xca\xf6\x7e\xe4\x2c\xc6\xd4\x01\x2c\xc5\xf5\xbc\x37\x34\x49\x00\xa2\xec\x88\x5f\xf0\x1b\x0b\x98\xc9\x01\x30\x65\x71\xce\xab\x54\x05\xf5\x74\x9d\x57\xda\xfe\xb7\x2f\xdc\x7a\x96\xcf\xdb\x72\x98\x16\xb5\xb9\xfe\xa5\xfc\xa5\x45\xa1\xaf\xbf\x9f\xbc\xb4\x23\xbd\xe8\x47\xe3\xcd\xfb\x2d\x73\xbb\x53\x38\x60\xce\x87\x7d\x93\xf3\x01\x05\x4c\xb3\x1a\xa5\x08\x5a\xde\xa8\x64\xd5\xe2\x56\x23\x47\xc4\x4a\x46\x88\x4a\x54\xde\x4f\xc9\x47\x81\x33\xbd\xa9\x72\x36\xc0\x51\xeb\x71\xe9\x10\xc4\x2d\x10\x5f\xeb\x95\xff\x76\x52\x90\x50\x39\x47\x7f\xcb\xb4\xc1\xe6\x73\xe6\x92\x77\xbe\x9b\x47\xcd\x5c\xc3\x09\xd9\x4b\xc8\x3b\x9f\xec\x32\xc9\xed\x24\x9d\x11\x6d\xcb\xa3\x70\x92\xd5\x7d\x77\x4d\xde\xee\x1d\xf5\xb5\xa9\x50\x12\x38\x85\xfd\xec\xb6\xf4\x85\x45\x1c\x88\x49\x8b\x32\x36\x19\x6a\xc6\x26\xc3\xce\xd8\x44\xea\x8c\xaf\x25\xc9\x88\x09\xac\xb0\x19\x70\xf4\xf6\x62\x58\x27\x04\x03\x1a\x98\x09\x6c\xc8\xcd\x5f\x6e\xfe\x4a\xf3\xd7\x9c\xb9\x2b\x25\x0a\xdf\x90\x2b\x85\x13\x2c\x0b\xee\x8d\x0c\xa0\x2b\x11\xf7\x34\xc3\x7c\xd6\x96\xa5\x8e\x9b\xfa\xf7\xcc\xd1\xf5\xa2\x4d\x56\xea\x2b\xe7\x4a\x1d\x3a\xbc\xb7\x80\xf7\x6d\x9a\x90\x03\x92\x29\x16\x0a\x32\x86\x3c\x27\x66\x2e\xc1\x38\x23\x9d\xd1\xbe\x32\x5b\x3d\x43\x3b\x58\xa5\x5f\x57\x4c\xda\x39\x1a\xc2\x6c\x46\x8d\x1a\x53\x61\x0d\x6b\x87\x25\xff\x90\x3d\x8f\xf7\xa2\xa4\x27\x2f\x79\xef\x9a\x65\xd2\xd2\x58\x04\x43\x18\xda\x71\x32\x73\x85\xed\xf9\x80\x2e\x53\xc7\xda\x1f\xf8\x14\xfd\x81\xbd\x03\x74\x0c\x64\x67\xc5\x8b\x3f\xe7\x24\x24\xfb\x19\xac\x0d\x3b\x39\x22\xa7\x19\x62\x60\xa1\x96\x06\x93\x2d\xe7\xcb\x98\x88\x23\xb2\xec\xe3\xc7\x07\x8c\x48\xdb\xfb\x8c\x17\x1c\x2f\x32\x46\x72\xbc\x98\x91\xf7\x19\xa8\x4b\x10\xea\xb7\x0e\x1e\xe8\xc4\x9c\xcf\xf1\x17\x32\xb1\x8a\xcb\xf9\xd9\xcd\x6f\xc9\x4e\xa1\x62\x4e\x7e\x2a\xaa\x3c\xda\xfc\x43\x00\x6f\x18\xbf\x23\x46\x38\x05\xe6\x8e\x36\xff\x50\xf3\x1b\x0d\x46\x74\x43\x49\x32\xb9\xcd\x52\xaa\x7e\x08\xf0\x95\x80\xa5\xd5\xcb\x9b\xc0\xa8\x92\xb4\x98\x8b\x62\x14\x83\xd1\xa6\x92\x52\x2a\xd6\x99\x19\xd6\x99\x81\xaf\xae\xce\xd4\x97\xb2\x4f\x9a\x7b\xc6\xa8\xac\x06\x93\xc1\x37\x1e\xff\x21\x9a\xf8\x8a\x6f\xe8\x84\xc5\xed\x3c\xf2\x4a\xf6\xb0\x15\x16\x4d\xab\x87\x38\xdd\x28\xe5\x89\x6d\x27\x71\x47\x0f\x41\xb8\x98\x68\xd4\x64\xfb\x68\x49\x09\xb1\x55\xf4\x43\xd6\xbb\xbb\xdf\x66\xd1\x0f\x39\xf5\x35\x1d\x47\x4b\xfc\x99\x04\xdc\x8a\xad\x82\x51\x53\xdf\xa3\x00\x62\x6e\x2e\x57\x38\xb0\xea\x99\xd1\xfa\x67\x86\x4e\xe2\x6e\xaa\xef\xdb\x5c\x37\x1b\xd3\x69\xdd\x12\xe0\x0a\x8c\x40\xb8\x8f\xdb\x3b\x55\x69\x32\xd6\xf4\xc2\xfc\xd1\x6b\x84\x95\xb7\x99\x02\xa5\x44\xd7\x0a\x5f\x50\x78\x7d\x4b\xdf\xad\x5a\xdf\xc3\xee\x13\x80\x45\xf4\x4d\x9a\xa9\x52\x54\xe0\x20\xe9\x58\x3a\x38\x50\x82\xcb\x88\xd1\x6d\x3f\x5a\x50\xd7\xc0\x45\x7e\x19\xdf\xf4\xac\x73\xb2\x1f\x3a\x9e\xfc\xd2\xfa\x64\x7d\xea\x2f\x1a\x4f\xe6\x85\x3b\x40\x8b\x8e\xa4\xc8\x41\xca\x97\xcc\xf8\x3f\x3b\x42\x16\x74\x9a\xf9\x45\x2d\x9c\x34\x19\x1f\xa8\x33\xfc\x13\xb8\xfa\x73\x45\xa9\x73\x40\xf0\xf2\x27\x05\xbc\xb1\xd0\xb2\x57\x10\x82\x46\xf0\x78\xe0\x9f\x75\xbb\xf6\x1f\x91\x1c\x7e\xb1\xd4\xb9\x91\x64\xc2\x74\x60\x5a\x83\xf9\xaf\xf5\xc0\x9d\xd8\x54\x1f\xae\x57\x61\x04\x83\x91\xfa\x55\xdd\x97\x9a\x80\x36\x20\x5e\x0f\xf1\xa3\x32\x7d\x37\x64\x85\xa2\x7d\x4f\x2e\x68\x3d\x20\x8c\xb7\xf9\xa6\xd6\x42\x3c\x2f\x31\xad\x73\xe9\x41\xfb\xac\x42\x14\x44\x28\x99\x17\xdb\xd5\x82\x9c\xe7\x44\xdd\xc4\x0b\x9c\xec\x08\xe7\x9d\xb8\x8a\x21\x46\x3e\x77\x80\x8c\xb2\xd8\xd0\xa1\x79\xac\xb3\x14\x83\xdd\x1f\x0b\xed\x82\xcf\xd1\x66\xa8\x05\x6f\xdf\x5d\x29\x8e\x93\xdb\xec\x47\x23\xbf\x17\x6e\xdd\x5c\x6d\x49\x54\x2b\xf4\x0b\x8a\xb6\xd6\x1a\xca\x00\x1e\x56\x44\x62\x97\x65\x63\xca\x71\x74\x79\xe3\x3d\x59\x7c\xfc\x12\xc1\x2d\x9a\x51\x7a\xa0\x0b\x13\x0b\xdd\x39\x82\x2e\x32\x5c\xaf\xfb\x1c\x81\x5e\x3b\xa1\x17\xe7\x12\xe1\x82\x8e\xf1\x21\x2d\x8f\x08\x04\xbc\xa5\xd7\x10\x0a\x29\xe6\x5c\xf2\x5d\xbf\xb2\xad\xea\x9d\x8d\x1c\x5f\x8b\x4c\x8a\xff\x47\x30\x55\x82\x52\xaa\x04\x25\x75\x2f\x05\x04\xe0\x45\x29\x28\xea\xb8\x63\xb5\x36\x21\xc6\x96\x78\x18\xe6\xdd\x2d\x86\xfc\x39\x1c\x0f\x1d\x24\x65\xc6\xab\x39\x75\xe7\x44\x40\x04\x61\x97\xa1\x29\x19\xcf\xc8\x07\x25\x48\x67\xf0\x68\x08\x98\x76\xd9\x99\x91\x1f\xfa\xce\xe6\xb6\xbe\xb3\xa8\x58\x4b\xfd\x1d\x57\x4e\x6a\x7b\x6a\xd6\x29\xa6\x42\x2f\x4b\x07\xe1\xf4\x42\xfd\xce\xda\xb4\x57\xe2\x27\x8a\xd3\x2f\x97\x02\xc7\x9a\x8b\xad\x98\x61\xe4\x0b\xca\xe4\x08\x33\x92\x8c\x5f\x67\xce\x97\x0c\x3c\x0d\x70\x2b\xf3\x0a\xf5\xbc\xc2\x22\x15\xd3\xcb\x35\x42\x67\x93\x2f\xbd\xc6\x17\x14\xfc\x27\x6d\xe7\x2d\x75\xa2\xbb\x8f\x99\xfb\x5a\xc0\xab\xcc\x3d\x14\xf0\xa9\xdb\x36\x74\xe4\x08\x9b\x1d\xa1\x5b\xb5\xed\x51\xf0\x1d\x81\xa9\xa3\x84\xce\x3a\x05\xde\x9e\x23\x74\xea\x28\x61\x33\x35\xdb\xf3\xcc\xfd\x22\xe0\x5d\xe6\xbe\x15\x90\xc4\x6d\x87\x7e\x4e\xde\x29\x81\x7d\x84\x1b\xfc\xfb\x37\xfe\x7c\xf4\x54\xab\xb0\x0a\x8b\x3e\xa6\x3f\xc2\x96\xed\x2d\x4c\x0f\x6a\xfa\x6d\x3f\xc4\x5f\x63\xb9\x31\x72\x24\x5a\xc4\xce\x33\x9d\x3e\xb4\xa6\x78\x12\x74\x3c\x10\xd5\x9a\x27\x0b\x0a\x22\xee\x56\x06\xaa\x61\x1f\xe1\xeb\xc7\x33\x92\xc4\x80\x63\x62\x24\x83\x01\xa5\x24\x2e\x60\x51\xc6\x6b\x1d\xe6\x38\x7a\xfe\x73\xed\xd6\xd0\x96\x86\x1c\x63\x99\x5e\xa4\x60\x32\x8a\x3e\x13\x75\x54\xa7\xa6\xce\x4c\x9a\x22\x9d\xc9\x5f\xd2\x3f\xcb\x07\x92\x7a\xaa\x22\xed\x1c\xff\x29\x2b\x53\x57\xf2\x78\x7d\x8a\xc0\x7a\x75\x03\xd3\x88\x81\x41\x71\xa7\xe0\xa3\x53\xae\x2b\xe1\x25\x75\x67\x44\xc4\xe0\xeb\x6a\x87\x51\x48\xd2\xca\x51\xa3\xf8\xaa\x1b\xc2\x63\x60\x08\x08\x36\x3b\x1a\x90\x74\x83\x61\xc2\x70\x60\xb6\x4f\x8b\x7a\x05\xb6\x07\x69\x7b\xa2\x28\xc4\xbd\xba\x97\x5f\xeb\x93\x11\xa9\x96\x5e\x67\x9f\x50\xb3\x7a\x56\xc6\xe3\xb4\xaf\x2b\x53\xcb\xf4\x2c\x37\x1b\x21\x23\x82\xa9\xa1\xf0\xc5\x5a\x06\x7b\x56\xcb\xd4\xc1\x31\x24\xbe\xf9\x74\xb9\xc8\x39\x0a\x67\x7a\x99\x53\xd0\x11\xc4\xaf\x3b\x7d\x6c\xb8\xfb\x8b\xbd\xc7\x53\x46\x81\x9d\x3b\x09\xb0\x91\x92\x7e\x30\xf9\x30\x33\xf7\x3d\xae\x6e\x49\x49\xc1\xdf\x71\x04\x78\x89\xbe\xbf\x68\x26\xfb\xd7\x20\xf9\xa4\x4c\xb3\x55\x73\x49\xc4\x73\x21\x2a\x7e\x5e\x9f\xa5\xcd\xa1\xae\x51\x46\xc7\xb8\x62\xdc\xf6\x77\x80\xdb\x1e\xc3\x8c\x3b\xaf\x32\xed\x8b\x45\x21\x51\x1f\xa7\xfa\xa3\x36\x71\xb5\x7f\x82\x9c\xec\xc7\x0c\x9e\x34\x9f\x78\xfa\xa4\xed\x01\xf6\xbe\x78\x60\xb3\xf6\x40\x48\xf2\xaa\xc7\xb9\x1a\x97\xab\xab\x11\x1c\x10\x01\xc3\x7a\x3e\x8d\xce\x9e\x38\xe6\x68\x58\x2e\x80\x7a\x46\x7b\x0e\x29\xee\x12\x33\xc7\x0b\xb8\x02\x45\x7a\x62\x77\x29\xef\xd9\x3b\xa9\x3a\xfc\x9d\x24\x5e\x9e\x49\xe2\xe5\x67\x8a\x79\xf8\xd2\x92\x2b\xc8\xfc\xf3\x32\x82\x21\xb4\x54\xbf\xa8\xa5\x9e\x16\x51\x9b\x8b\xe7\x4e\x4e\x92\x0d\x44\x23\x38\x12\x8b\xc1\x0b\x29\xa6\xa3\xbe\xc2\x77\x17\xb9\x8d\x4c\x42\x1e\x3f\x2e\x13\xf2\x7c\x11\x14\x0e\x32\x32\x95\xc4\x3a\x13\x69\x72\xd1\x0b\x72\x81\xf6\xff\x9e\xae\x98\xac\xb3\x44\xc7\x6b\x7d\x7f\x59\xe0\x4c\x53\xf0\x3e\x3b\x09\xca\xc0\x51\x6c\x92\xdc\x84\xb7\x3e\x46\x30\x3d\x26\x0a\xc2\x76\x7f\x7c\xa1\x4b\xa3\x3a\x51\x8c\xe3\x78\x8d\xc7\x65\x6b\xec\xa9\x5e\xfa\x8c\x11\xe6\xe2\x7e\x72\x75\x2d\x6d\x76\x86\xe2\x76\xbe\xca\x71\xcd\xc9\xcb\x0c\xde\x64\x28\x0a\x34\x38\x2f\x69\x67\xb6\xf7\xf9\x59\xe2\x4a\x46\xea\xc9\xde\x14\x3b\xa7\xdb\xd0\x51\x90\xd4\xe8\xaa\x84\x5f\x99\x33\x23\x69\x0c\x39\x48\x3b\x43\x25\x2a\xfe\x54\x54\xd2\x3b\xa8\xf1\xc9\xa6\xf8\x08\xfa\x16\xe3\x14\xbd\x03\x9c\x22\x4e\x98\xfd\x7f\x31\x55\x76\x56\x4d\x95\x9d\xd5\xf8\xf1\xae\xa9\xfa\xae\x1f\xeb\xa9\x92\x5c\x4f\x2e\xcf\xc0\x47\xef\x20\xf5\x56\x5c\x73\xe7\x2a\x43\x15\x54\xdb\x7b\xd9\xf2\x7b\xf3\xe5\xf7\x6e\x39\xcd\x47\xc2\x18\xfc\xb8\xf6\x4c\x18\xc3\xfb\xac\x9a\x0e\x3e\xad\x17\x3c\x8c\x21\x6b\xac\xf8\xf6\xd2\x70\xec\x54\xbf\x33\xd1\xec\xcb\x69\x8d\x8d\x5c\xe9\x16\xc6\x10\x34\x7a\x3d\x5a\xee\x75\xbc\x34\xd8\x71\xd9\xed\xf1\x6a\xb7\x72\xb0\xaa\xd7\x13\x47\x07\x4d\xae\xec\xf7\x01\x91\xf6\x54\x89\x6d\xf8\xe7\xaa\x53\x7d\xf6\x0d\xd7\x00\xbf\x1f\x57\xa7\x92\x12\x50\x4e\x58\x62\xf3\xbf\x91\x62\x75\xb4\x6e\x5f\xe8\x38\x24\xbd\x94\xfa\xb7\x69\x49\xca\x16\x3f\x86\x65\xcb\xc1\x8c\x20\x14\x1a\xfd\xeb\x2a\x54\xf9\x6e\x6a\xfb\x6a\x8f\x38\xc6\x80\x7a\x07\x8e\x70\x53\x9b\x19\x23\x42\x61\x70\x49\x5d\xe2\xd7\xe1\x49\x43\x8d\xfa\x60\x25\x2a\xf9\xea\xf4\xfa\xee\xe7\x9c\xd4\x0e\xf0\xf2\x8b\x8c\xbd\x23\x05\x9f\x6a\x3f\xa1\x15\x65\x37\x76\x33\xa6\x8b\x05\x85\x2c\x5e\x17\xdb\x52\x8f\x22\xd0\xfa\xb0\x19\xd9\xcd\x00\xa3\x11\x84\xcd\xb6\xcb\xf4\xb3\x0d\xcf\xdb\xbe\x33\x43\x4c\xf5\x8d\x02\xdb\x76\xae\xf1\x7a\x9b\x82\xf7\xde\x39\xe7\xc4\xda\x49\xf3\x38\xe8\x25\xa9\xec\x65\xb9\x37\x89\x24\x2a\x2d\x15\x32\x85\xa2\x40\x6d\x2f\xca\xb0\x7d\xce\x65\x0f\xd3\xb7\xdb\x56\x11\x19\x52\xe6\xb2\x6d\x7a\x35\xbf\x77\xf2\xb4\xf4\xf4\xad\x5c\x9b\x5f\xe6\xf0\x05\x19\x36\x99\x61\xc2\x72\x0a\x27\x19\x61\x0a\x72\xb5\x23\xf0\x0b\x74\x04\xc6\x1b\x35\x6f\x75\xed\xde\xec\xbb\x5b\xf7\x5c\x97\x60\x64\x84\xda\x8a\x83\xd4\xa9\xfc\xbc\xe5\x46\xbd\xd8\xae\xb5\x11\xd7\x8e\x74\x35\xa7\x78\x81\xc9\xa1\xcc\x11\xd1\x50\x9d\xd4\x5d\xb2\x85\x9e\x86\x2e\x17\x4d\xbe\x19\xa7\x74\xdb\xbb\x82\x54\x9d\x08\x9c\xfc\xe7\xac\xc5\x27\xfe\x17\xdb\x41\x43\x19\xd6\x7b\xd4\xa5\x1e\x91\x96\xb2\xbe\xa2\x35\x6c\xdb\x99\x93\x9d\xcc\x64\x6a\xf0\x21\xb1\x03\x0a\xbe\x92\x17\xfc\x3d\xb5\x0f\x51\xba\x68\x0d\x33\x13\x30\x27\x47\x6a\xa5\x74\x38\x08\x4e\xeb\x23\x2e\xe1\x8a\x6f\xf8\xd6\x3d\x23\x5c\x8f\xd5\x63\x07\xa9\x92\x64\x5b\x26\x85\xac\x80\x60\xa0\xc9\x8f\x5a\x48\x74\xe6\x2b\xa7\x49\x3c\x97\x84\xae\xba\xed\x2b\x0c\x6f\xb3\x53\xf0\xd5\x9f\x63\x48\xd5\x9f\x33\x88\x0d\xc4\x7f\xf2\x48\xe6\x86\xe8\xf3\x47\x21\xac\x6e\xa6\xea\xb7\xb1\xfd\xf9\x85\xed\x2f\x2f\x6d\x7f\x31\x84\xab\x86\x3e\x3c\xae\x19\xe8\xb4\xd4\xa5\xb5\x4f\x51\x7e\xcf\x57\x47\xcd\xab\xd9\xfa\xb2\xd2\xd6\x57\x5f\x40\x61\x7b\xef\x17\x7a\xef\x72\xa9\x40\xe8\x0b\x27\x47\x1e\xad\xb9\x99\x64\x6d\x87\x25\x5b\x3d\x2c\x6f\xf4\xa1\x60\x09\x82\x7c\x10\x65\xd7\x31\x9b\xf7\x58\x18\xea\xfc\x3b\x58\x9e\x3a\x5b\x7b\x34\xa0\xe3\xc0\x99\x23\xc3\xdc\x6c\xd5\x4b\xff\x8d\x3e\x2e\xd5\x41\xf9\x51\x1c\x14\x8c\xf7\x88\x99\x12\x19\xae\xe8\x6a\x6a\x87\x6a\x80\x64\x4d\x50\xa3\xea\xb1\xb3\x1a\x68\xb4\x32\x46\x84\x30\x30\x23\x9e\xa6\x15\x25\x9e\x88\xd2\xa5\x50\xb4\xa1\x51\x65\xcc\xc8\x8b\xac\xec\x68\x4f\x57\x42\x5f\xd4\x50\x1c\xd7\x7c\x1b\x22\xda\x08\x2e\x64\x55\xd4\x87\x5e\xaa\xc8\x65\x55\xbd\xa5\x2a\x4c\x03\x43\xf2\x15\x78\x2e\xad\xcf\x07\xbd\x3e\x91\x29\xa1\x10\x82\x07\x19\xa8\xe3\x1d\xc7\xeb\xd3\x5e\xed\xee\xbd\xdf\x3b\xd9\x5b\xce\x7c\x15\xc4\xb5\x30\x00\x6d\x28\x34\x31\x00\xd3\xf8\x5f\x33\xdb\xd9\x9e\xdf\x61\xb9\x8b\x63\x10\x10\xc4\xda\x82\xd2\xff\x57\x86\x6d\x49\x94\xb4\x62\x12\xdc\x4f\x09\x5a\x05\xd5\xe0\x93\xce\x15\xb8\xd0\x69\xa7\xbc\x8f\x3a\xfa\xe1\x32\x6e\x75\x0d\xde\x63\x3a\x8b\x7f\x11\x9f\x79\xe9\x7c\x12\x24\xa1\xc0\x6e\x9c\x04\xbc\x13\x53\x9f\x27\xee\x74\x0c\x5c\x2d\x6e\x9d\xe7\x85\x31\xbd\x01\x79\x51\xda\xd0\xca\xe6\x69\x93\x42\x9e\x9b\xf2\x20\x0b\x0a\xd7\xb1\xbb\x4b\xfe\x1a\xc1\x26\x0c\xd5\xf6\x74\x29\x20\x5a\x2a\xbf\x37\x80\xea\xc6\x99\x91\x8b\xb8\x5d\x39\xfd\x8d\xc3\x65\xac\x3e\xf7\x3a\xa6\x0b\x48\x1a\xb1\xa1\x2b\xe7\xee\x9b\x63\x02\xbf\x34\x0c\x7b\x6f\x10\xe1\xde\xe8\xe3\x3c\x27\x7d\xe4\x1a\xe1\x66\xf5\xd3\x0b\xa2\xf4\x46\x21\x38\x23\x0c\xd8\x9e\x4f\x57\x63\x3e\x35\x67\xeb\x2f\x13\xaa\xe2\xe1\xda\xdb\x34\xad\x9d\x93\x69\x8c\x2e\x09\x53\x49\x6e\x50\x78\x1a\x57\x54\x92\xd7\xa9\xa4\x45\x9d\x83\xb4\x22\xb2\xdb\x86\x1e\xae\x9c\xca\x14\xc3\xfd\x53\xcc\x8f\x81\x80\x37\x57\x24\xd1\xa7\xa8\x0d\x39\x0f\x1b\x83\x02\xc6\x64\x45\x45\x4c\x96\xef\xb2\x95\x98\xac\x94\x82\x3a\x25\x4e\xcb\x61\x8f\x34\x81\x5e\x09\xd6\x5b\x89\x1f\x53\xf0\x30\x8f\xd7\xb8\x42\x75\xef\xfe\x0a\xaa\x1c\xdd\x8a\x2a\xb7\x57\x77\x0d\xc3\x7b\x57\x71\x69\x9d\x0c\xd7\xb0\xaa\xf7\xce\xc1\x72\x6e\xfa\x19\xa7\x8e\xf6\x55\x5b\x9e\x13\x6e\xe8\x71\x81\xe5\xbe\x33\x8d\xfc\xa5\x42\xfe\xbc\x40\xfe\x28\xf9\xd7\x6d\x09\x79\x4b\xcc\x97\x9a\xdd\x08\xc1\xef\x28\x03\x09\x39\x3c\x67\xb5\x78\xae\x75\x4f\x54\x7b\xb4\xcc\x4d\xe5\x3a\x2a\xeb\xad\x58\x5b\x72\xf4\xd0\x49\x80\x9d\x38\x12\xbc\x33\x87\x83\xf7\x45\x63\x8a\xd7\xc6\xb1\x24\x15\xd1\x45\x94\xb0\x78\x5d\x99\xd8\x6b\xe3\xc7\x71\xb5\xa6\xde\x2f\xfa\xc2\xfc\x50\x6f\x39\xd4\x2f\x38\xec\x8c\xf1\x60\xaf\x1c\x01\xa6\xf6\x2c\x7c\x31\xf3\x60\x41\x20\x78\x96\xb5\x3b\xc3\x1c\xa2\xb8\x81\x7a\x95\xf5\xfe\x2f\x82\xb3\x60\xde\xe6\xfd\x92\x71\x29\xe3\x86\x93\x4b\xe1\xff\x12\x44\x19\xf3\xe2\x7f\xab\x7a\xef\x17\xe3\xfd\x73\xcd\xd1\xd9\xe4\x4c\x90\xfd\x94\x7c\x51\x42\x51\x51\x3a\xa4\xf0\x74\x39\x14\xba\xce\x88\xfe\xbc\xaa\xbe\x48\xe1\xf2\xa3\xbe\x72\xbd\xab\xc7\x2f\xf6\xc1\x91\xc0\xf6\x9d\x1c\xd8\x81\xc3\x81\x3d\x71\x18\x78\xbb\x6a\x4d\xcf\xf5\x9a\xee\xe8\x25\xbd\x48\x3f\x71\x91\x45\x18\x7f\x61\x16\xd5\xcb\xa3\x38\xd8\xd5\xae\x44\xf5\x5b\xa7\x19\x17\xb5\x5b\x82\x25\xfe\x65\xbd\x84\xca\x34\x5a\x1e\x67\x5a\x1b\xb8\xd8\x1f\x9c\xfd\x4d\x5c\xb8\xfa\xc8\x48\x7d\xe0\x61\xe1\xcb\x93\x84\xd1\x85\x05\xaf\xc5\xd2\xf3\x6f\x92\x30\xb5\x60\xc7\xf4\x89\xf3\x0c\x93\x97\x7d\x11\x58\x5a\x45\x8f\x09\x7b\xf1\x1d\x22\x65\x0a\xee\x40\x87\x4b\x7f\x0e\x21\x81\x9b\x78\x99\x2e\x0b\x45\x97\xb5\x9f\xf6\xf7\x78\x9d\xc5\x4d\x8b\x1b\x7b\x31\x94\xbc\xfd\x31\x83\x1f\x8a\xb7\x2f\x10\x84\x77\xe8\xfc\xf2\xb0\x56\xc5\xc2\xa0\x3d\x0a\xcf\xe3\xee\x4c\x72\x8f\x75\xb6\x9d\xed\x55\xcb\xe8\x43\x83\x2d\x32\x72\xe0\xc1\x5c\x1d\x72\x6f\x0b\x52\xa4\x2c\x1c\x24\xd5\x09\x90\x9a\x58\x61\xcb\xc1\x24\x2f\xb9\x93\x82\x7f\xea\xfc\xe4\x04\x27\xc0\x96\xd9\x40\xdf\xcd\xc8\x2c\x86\x1b\x44\x1c\xe7\x66\x4c\x85\x17\x80\xd5\x4d\x66\x66\xcc\x62\xb0\x97\x6a\x30\xf0\xcf\x1d\xbf\x0a\x7e\x7d\x8d\x12\x61\xd3\x17\xa1\x78\xf0\xd4\x79\xae\x59\xc0\x92\xc7\xff\xc1\x89\x09\x90\xe2\x4b\x81\xca\x85\x5c\xa7\x88\xe6\x9c\xcc\x63\x30\xa9\xaf\xd3\x85\x9a\xe2\x97\xce\xa9\x9d\x3a\x1f\xf5\xac\xbe\xd4\x67\x75\x58\x9b\xd5\xd3\xca\x86\xb7\xfa\xf0\x0b\x4e\x22\x5a\xcc\x10\xb7\x73\x0f\x0b\xaa\x46\x5a\x6c\xdc\x51\xcd\x95\xd8\xd6\x31\xc8\x87\xfa\x20\xa5\x28\xf3\x56\x3f\x4b\xdb\x52\x31\xe8\xe7\xbe\x2f\x73\xe8\x9b\xed\xd1\xda\x49\xc5\x83\x8d\xb6\xea\x5d\xae\x52\x52\x6f\xdb\x6e\x7f\x5c\xd6\x52\x1c\x34\x93\x94\xf6\x66\xe4\xbb\x91\x1a\x96\x12\x19\x84\x1a\xe4\x34\x43\xe1\x6d\xa9\x81\xf2\x02\xe4\xc2\x36\x90\x43\x63\x5f\x09\x64\x5b\x4e\xa8\xe1\x4b\x0f\xe0\x9f\xeb\x01\x0c\x7c\xad\x0e\xe0\x9f\x9b\x01\x56\x21\x0a\xa7\xe3\x69\x88\x30\xa3\x19\x58\xf0\x56\x87\xf9\xe2\x78\xed\x20\xb0\xe9\x78\xee\x0d\xc9\xca\x21\x3e\x17\x73\x69\x19\xe4\xb3\x19\xa4\x3c\x31\x0f\x9b\x7c\xc4\xd3\x15\x5e\x64\x65\x6f\xa7\x2b\x5d\x1e\x37\xbb\xc4\x2b\x5d\x9a\x21\xe3\x7e\x54\xeb\xd2\xce\x5d\xf9\xe9\x32\x77\x75\x1c\xaf\x2f\xc5\x37\x73\x38\xcc\x1d\x09\xb9\xae\xaf\x94\xc3\x4f\x4d\x13\xae\xe2\xf6\xe0\x36\xef\xfe\x7d\x62\x59\x9a\x3d\xd2\xfa\x41\xaf\x48\x8e\x6f\x33\xb8\x8a\x51\x5f\x42\x9d\xfd\x05\xec\x74\x62\xcd\x1b\x59\xd8\x12\xc7\x3a\x73\xaa\x53\x64\xd7\x37\x79\x6d\xdf\x2c\xbf\x5c\x8a\x79\xed\xd1\x80\xb7\x54\x92\xa2\x0b\x9f\x19\xc2\x5e\x96\xed\x59\x2c\xe0\x20\xee\x74\x9d\x7e\x23\xc1\x72\x2d\x53\xe7\x4c\xaa\xef\x92\x3a\xcf\xdc\x3d\x59\x24\x99\x4b\x5c\x75\xc9\x40\xba\x6f\x62\x34\x3a\x17\xd6\xba\x5a\xd5\x3c\xf1\x4c\x62\xf0\x77\xa2\xfa\x24\x4b\xe6\x38\xed\x8e\xa1\x93\xc7\xc2\x4e\xac\xb3\x6d\xd4\x13\xd2\x53\xf8\xd6\xb9\x46\x1d\xe9\xbe\xef\x25\x55\xf4\xb9\x29\x4c\xb4\x9c\x72\x5e\xd8\x79\x91\xf0\x8f\x56\x6b\x86\xc6\x8e\x67\x45\x0a\x2c\x6e\xb3\xfb\xf7\x97\x93\x82\x95\x7d\x12\x57\x2e\x16\x24\x21\x19\x39\x8e\x61\x7f\x25\xba\xf0\x8d\x26\xa3\xd5\x87\xea\x1d\xbf\x7f\x5f\x27\xbd\xb2\xd9\x38\xb1\x3d\x27\xa1\x0b\x25\xee\x49\x0a\x6a\x1d\x08\x96\x60\x3e\xa4\x76\x7f\xec\xa5\x0e\xc6\xee\x1f\xc4\xe0\xa5\x26\x93\xeb\x7d\x4b\x23\x34\xb5\xfb\x0f\x21\x97\x5a\xfc\x3d\x8a\xd7\x54\x0f\x28\x18\x9b\x0b\x47\xc2\x37\x05\xb8\x37\x0e\x87\x23\xc7\x87\x63\x87\xc1\x89\x93\xc3\x6b\x0d\xc4\x9f\xe3\x7f\xa1\xac\x96\x24\x65\xda\xd7\x73\x13\xa3\x85\x27\xe8\x64\x8d\x2a\xe2\x38\x06\x61\xff\x04\x61\xe7\x20\xec\x39\x08\x7b\x06\x85\x65\x69\x41\xe1\x6c\x89\xa5\x65\x45\x3d\x6b\x7f\x5d\x3d\xeb\xc4\xfe\x89\x7b\x99\x63\xf9\x80\x39\x96\x0f\x98\x3d\xab\xcb\xb5\x27\xb8\xe2\x9f\x29\xf8\x7a\x93\x2a\x86\x4f\xcf\xf7\xaa\xbd\x20\xe1\xe7\x18\xc3\x50\x8e\x53\x38\xc9\xe1\x67\x44\xf0\xf2\x8a\xc1\x24\x22\x56\xc8\xe2\x8c\x5b\x5a\xcb\x0e\xfb\x5d\x67\x26\xc1\x28\xb2\xee\x05\xc4\x3c\x35\x1c\xf4\x71\x5c\xc7\xd1\xf7\x66\x64\x5f\xcd\x86\x48\x57\xb4\x1a\x34\xe7\xb5\x24\x91\xf8\x8d\x6a\x55\xd4\x9a\x08\x48\xec\x19\x10\x34\xa4\x7e\xa6\x04\x7d\x7d\xe8\x57\xf4\x39\x29\xa2\xb0\x0f\xab\xcf\xf7\xd7\x94\x2d\x6b\x5d\x67\x2c\xd0\xed\x7d\x36\x18\x41\x23\x00\xac\xd3\x56\x11\x85\x0b\x92\x80\x4f\xc7\xc5\xc4\xca\xb2\x20\xe5\x26\x7c\x55\xc8\xaf\xcc\x01\xb9\x50\x82\x53\xf7\x7e\xb4\x56\xe3\xf0\x0c\x5e\x2d\x2a\xeb\xa1\x38\xb9\x80\x1d\x77\x46\xce\x04\x1c\xa9\x97\x1e\x8a\x5b\xfe\x39\x24\x45\x7a\x6e\x25\xba\x95\x89\x7e\xa9\xfe\xa5\x13\xfa\xa2\xdd\xe5\x73\x0c\x2b\x29\x7d\x0b\x28\x99\x60\x61\x49\x6a\x1e\x2a\x33\x03\x2b\xc9\xae\x96\x9e\x58\xff\xac\x52\x11\xeb\xdf\x26\x7c\x40\x3d\xdc\xa8\x62\x96\xfe\xbb\xbb\xa2\xcf\x90\x46\xba\x58\xb1\xd3\x4d\x89\x6f\xd0\x78\x89\x97\xf7\x35\x5e\x6e\x02\x92\xc9\xf6\x29\x28\x24\x7a\xbf\x88\x44\x00\xaa\x36\xcc\x98\xca\x23\xb0\x8e\x4f\x8e\xde\xec\xbf\xb2\xe0\x46\xa2\xcc\x8c\x40\x7b\x48\x2a\x7d\x9e\x5a\xbe\xfd\x58\x89\x1f\x87\xc4\xe2\x41\xa4\xad\xd6\x57\xcb\xd5\x36\x93\xca\x55\x34\x81\xdc\x95\x6b\xcf\x8f\x00\x0c\xb0\xc9\x8b\x13\x14\xad\x09\x70\x2f\x14\xef\x82\x82\x77\xe5\xcc\x31\x83\x50\x48\xb2\x90\x42\x10\x92\xfd\xa2\xb4\x7d\xd4\x3e\x6d\x0d\x21\x09\x9f\x59\x14\xe6\xe4\x0a\xa2\x25\x20\xc1\x1f\x45\x38\x85\xd9\x7f\x0d\x7c\x2d\xb3\xf6\xc8\x91\x82\xe4\x3d\x09\xf7\x46\xea\xdf\xe2\xff\x58\xcf\x47\xbf\xa7\xf6\xea\x25\x58\x34\x4b\xbd\x32\xbf\x2f\x42\x1d\xe3\x43\x52\xc8\x69\x14\x4e\x63\x97\x24\xa9\x8b\xfa\xa7\x33\x01\x2f\x39\xbc\xd6\xd1\x8d\x02\x9e\xa7\x70\x68\x2e\x5f\x70\x88\xcc\xe5\x47\x8e\xa3\xe0\xf5\x07\x0e\x6f\x8b\xeb\x9f\x1c\x76\xcc\xa5\x4e\x7e\xd7\x96\x8f\x1e\x79\x92\x05\xfd\x4a\xdb\xbc\x22\x10\xcb\xb5\xb1\x46\xe8\xf8\x92\xa0\x6c\xf5\x3e\x76\x5f\x0a\xd8\x45\x8f\x84\x47\x0b\x38\x8b\x8b\xc2\x8a\x3f\xf5\xd5\xf6\x02\xde\xc6\x77\x29\x1d\x74\x26\x3a\x1c\x8f\xfb\xc0\xdd\x1d\x1d\xb5\xeb\xce\xc8\xdb\x58\x3b\x1e\xf9\xa9\x2e\x99\x6a\x5d\x8b\x34\xc8\xf1\x21\x0b\x58\x8a\x99\xb7\x94\x6c\x38\x56\x44\xd9\x51\x04\xdc\x6f\x7b\xea\xa2\x91\x05\xbe\x78\x32\x6d\xc9\x5f\xc6\x6b\xce\x10\x8d\xd8\xde\x95\x32\x06\xbd\xeb\x74\xb1\x20\x5a\xd6\x7e\x1b\x2b\xa0\xad\xbd\x34\x68\x84\xaf\xcc\xcc\x4b\x73\xf7\x8c\x93\x62\xc6\x97\x52\x5e\x3b\x0f\x1e\xc4\xa9\xcf\xe2\xcb\x34\x93\xce\xd3\xe1\xd3\xad\x07\x56\x4d\x62\x7f\x1e\xc3\x1b\x1d\xa4\xdc\x77\x7f\x69\x23\xd9\x3e\xda\x96\x64\xa5\xcd\x8d\x52\xf0\x4e\xd0\x45\xa4\xf8\x9c\xe9\x2a\x2e\x0a\x56\x6f\xc5\xab\xb7\xb2\xd5\x5b\xde\xea\xad\x70\xf5\x56\xb4\x7a\xab\x05\x21\xb6\x50\x2e\xb6\x7a\x2b\x5f\xbd\xd5\x52\xa9\xb3\x85\xe9\x11\x6b\xf8\x20\x25\xbc\xc5\xe0\x6d\x3b\x29\x78\x8f\x9d\x08\xbc\xa7\x0e\x03\x3f\x72\x24\xf8\xb1\xc3\xc1\x4f\x1d\x01\x7e\xee\x78\xe0\x4f\x9d\x1c\xb0\x36\xab\xff\xd2\xf1\xc1\x3f\x75\x32\xf0\x3f\x3b\x01\xf8\xe7\x4e\x5f\x49\xdf\x53\xf0\x0e\x9d\x70\xd1\xf8\xdf\xb2\x39\xb3\xd8\xa0\x7b\x23\xf0\xde\xa1\x81\xaa\xcd\xdc\xb9\x5f\x33\x6d\xf6\x8b\x87\xb4\x3d\x8b\xcc\x48\x9c\x82\x04\x9f\x52\xc2\x29\x49\x29\x99\xa4\x54\x33\x9f\x24\xa7\x84\x51\x92\xa7\xd5\x7f\x09\x25\x3e\x25\x86\xc5\x7c\x29\xdc\x9f\x89\x1a\xfb\x4b\xe7\x59\xac\xd8\x78\xb1\x41\x2c\xc7\xda\xb8\x2a\xeb\xb7\xc0\xeb\x78\x8d\x17\xab\x11\x6f\xa4\x73\x42\x24\x9c\x90\x4a\xb4\x39\x8c\xbb\x99\x22\x5f\x91\x8b\x7d\xcf\xa4\xd5\xf9\x11\xbb\x87\x31\xb1\xfc\x98\x65\xd9\x3e\x9b\x70\x8b\xc2\x87\xb8\x88\x94\xf2\xae\x94\x88\xf5\x22\x76\xcf\x13\x62\x05\xd1\xd4\xa2\xf0\x52\xff\xc8\xae\x59\x62\x51\xf8\x18\xbb\x9f\x12\x78\x15\xbb\x33\xf2\x42\x31\xf2\x88\x3a\x5f\x9a\xab\x8f\x31\xb1\xde\xa7\x2c\x88\x92\x0b\xdb\xb6\x2d\xfa\x55\xe7\x63\xf9\x14\xbb\x89\x80\xf3\x0e\xc9\x4f\xa6\xa7\xd7\xd7\x5c\xec\xb0\x8c\x13\xba\x80\x77\xff\x8a\xe9\x48\xdb\x8d\x0a\x09\x7e\x59\xd7\xfe\x2a\x6e\xa2\x8b\x13\x56\x1a\x8b\x92\x00\x3f\xca\xcb\xa5\x4c\xd5\x67\x89\xe0\x8e\x99\x80\xa4\x7e\x30\xb2\x28\xf0\xc0\xcd\x05\xe4\x41\xf7\xc2\xf3\x00\x12\x30\x23\x08\xf4\x02\x63\x81\xcb\x04\xf8\x41\x8b\x2e\x31\x19\x5b\x21\x1b\x4c\xa2\x24\xcf\x2c\x47\x5d\x5e\xc7\x79\x66\xd5\x2a\x74\x06\x6a\x89\x4f\x11\x19\xfd\x88\x89\xe5\xc9\xa4\xe7\xc9\x64\x90\xe6\x32\x8e\x12\x3e\x88\x92\x30\xed\x79\xa9\x08\xb8\x18\x0c\x7b\x13\x31\x18\xf5\x26\xde\x60\x84\x74\x99\x05\x60\x4d\x98\xb8\x88\x92\x41\xcc\x43\x69\x81\x35\xd8\x12\x7c\xa2\x76\x48\xef\xa0\xc4\xc1\xd5\xb0\x21\x43\x85\xf2\x57\x0a\xfb\x45\xa5\xc7\x00\x01\x46\x46\x32\x56\xc0\x12\xe9\x15\xc8\x63\x8b\x42\xa8\xaf\x99\x45\xc1\x0b\xb4\x17\x5e\xe7\x62\x3c\xe7\x45\x5e\xb5\xce\x2e\x87\xa1\xe9\x42\x76\xc9\x5f\x05\x72\xb6\x00\xaf\x32\x75\xf9\x15\x03\x81\x83\xb5\xb5\x4f\x92\x7a\x4e\x7e\x93\xa5\x02\x19\x3e\xa6\x19\xbe\x67\xc2\xf5\x02\x92\x63\xd9\x86\xb1\x3e\x50\xcc\xee\x8f\x09\x46\x53\x94\x55\xb3\x12\xd7\xa4\x70\xbf\x96\x24\xa7\xfa\xb2\x2f\x89\xdc\xb0\x7a\x6a\xeb\x29\xc5\x6a\xb3\xad\x7d\xb0\x87\x68\xeb\x21\x28\x16\x70\xa8\xbf\xb5\xf1\xb6\xbe\x24\xc5\x44\x74\xf1\x9f\x1c\x5f\x56\x1b\x49\xbf\x60\xc3\x8c\xb6\x74\x5f\xdf\xc3\x5c\x25\x81\xdb\xca\x80\x14\x49\x84\xec\xec\x3a\x8e\x24\x79\xf0\xcf\x6c\xe3\xc1\x85\x12\xce\x02\xb3\xc7\x4c\x5c\x70\x69\x51\x98\xea\x8d\x95\x81\x45\xa1\x6f\xae\x2f\x2d\x0a\x13\x73\xad\xb8\xb9\xcb\xa0\xdb\x92\x9c\x60\xc0\x84\xcd\x86\x14\x7d\xc5\x0b\xd0\xdd\x93\x77\x81\xdd\x82\xc7\x6f\x05\xce\xe2\x8c\x28\x08\xdf\xb4\x34\x9c\x82\x42\x3c\xa8\xc4\x57\x20\xeb\x14\x2f\xbc\xf5\x4d\x8a\x03\x64\xa4\x9c\x69\xd7\xfb\xd4\x41\xec\x7e\xdd\x82\xc2\x85\x5e\x95\x58\x61\x85\xeb\xa5\x55\xe1\x50\xca\x20\xdd\x39\xf0\x45\x3d\x07\xbe\xe2\xd4\xc7\x07\x45\x69\x53\x2c\x6e\xe5\x1c\x60\x92\x08\x23\x11\xd6\xaa\x5e\xa0\xce\x43\xc0\x01\xd9\x07\xcd\x99\x53\x98\xad\x3d\x1c\xcd\x6a\x7b\x49\x19\x47\x14\x3c\x53\x5c\xd4\x2c\xd0\xfe\xfc\x36\xa7\x45\x55\xa0\xb9\xd9\x71\xe6\xe1\xe9\xbf\x09\xdc\x84\xc1\x5e\xd0\x15\xb8\x06\x89\x7b\x42\x08\x77\x67\xe4\x3a\xd0\x02\x1f\x97\xf0\x51\x92\x5a\x49\x0f\x5a\x4f\x45\xdd\x89\x0b\x7e\x94\xb8\x20\xa1\x98\x8f\x3a\xc4\xca\x04\x97\x4a\x6e\xc0\x80\x61\xaf\xc6\xa5\x5d\x04\x7a\xe7\x14\x9e\xbb\x4e\xb3\x48\x73\xa8\xc8\xe7\x47\xbe\xa5\x61\x8e\xc5\xd1\x45\x32\x88\x24\x9f\x64\x03\x0c\xd0\xee\xc5\x51\x26\x07\x3a\x69\xb9\xba\x5d\x01\xe0\xb5\x42\xa0\xde\x60\xbb\x02\xc1\x17\x71\x01\x12\xb3\xc1\x68\x88\xad\x9b\xbd\x60\x10\xc6\xfc\xa6\xb7\x32\x70\xf1\xd8\x0f\x25\x05\xc2\xf0\xcf\x43\xf4\x00\x7c\xab\x4e\xc2\xa7\x56\xbf\x82\x77\x68\xaa\x78\x82\x65\xa7\x50\xbe\xb9\x0c\x74\x52\x0f\x47\x41\x9a\x45\x81\x60\x26\xc8\x47\xd4\xee\x8f\xf5\x1d\xe7\x27\xd3\x5e\xf7\xaf\xf1\x1d\x27\xb9\x5a\x9b\x43\xdb\x0b\x6a\x5d\x66\x24\xbc\xdb\x31\xe8\x49\x7e\x23\xf1\x56\xf7\x99\xe3\x73\x3e\xc8\x62\x96\x5d\xb6\x1c\x84\x52\x42\x37\xf4\xfe\x82\x08\x0c\x90\x2a\x8e\xff\xbc\x1c\x0b\xa1\xa8\x57\x2e\xe1\x08\xc7\x41\xed\xd3\x19\xd3\x1b\xcc\xde\xd2\xea\xb3\x0d\x8f\x81\x1d\xde\x62\xd0\x81\x3e\x6f\xdf\x83\x5b\xa2\x31\xa4\xed\x8d\xbb\xdd\xda\xb5\x53\x69\x05\x3d\xd5\xe6\xaa\x25\x42\x80\xa8\xd3\xcf\x62\x49\x5e\xd6\xbb\xe9\x35\x9b\xe4\xf8\xdd\x86\xc2\xe6\x19\x17\x83\x8c\xc7\xdc\x57\x14\x36\x4a\x22\x19\xb1\xb8\x6c\x1d\x4c\xd2\x9f\x83\x5b\xba\xcc\xb8\xf7\x3d\x92\xb7\xf4\x32\xdb\xe6\xa7\xb1\x92\x6b\xac\xff\xf5\xd0\xf3\x87\x41\x89\x2e\x3f\xc6\x44\x6c\xfc\xc3\xb5\xfe\xb1\x91\x6c\xfc\xc3\xfa\x07\x6e\xc8\x6d\x08\x51\xe3\xc1\x7d\x46\x0e\x88\xd6\x4f\x42\x1a\x10\xeb\x25\xca\xd3\x3d\x6f\xde\x93\x97\x51\xd6\x8b\x99\xc7\xe3\xda\x5b\xac\x8d\x62\xc3\x17\x20\xa9\xd3\xb2\x44\xea\x35\x19\xf7\xd3\x24\x60\x62\xbe\xba\xa2\x6a\x8c\xfd\x54\xf6\x70\xc1\x4b\xf0\x01\xe6\xb2\xdf\xbf\x31\x2b\x6d\x8e\x81\x45\xee\xfa\x53\xf3\xb4\x3a\x35\x7e\xa0\xc3\x7c\x20\x77\xb1\x40\x14\xe4\xee\x08\xb3\x82\x5f\x29\xda\xb8\x61\xf5\x10\x07\x59\xce\xd2\xcf\xcc\x82\xdc\x6d\x4c\x7e\x12\xeb\xa9\x0e\xcb\x35\x9f\x5d\x46\x92\x0f\xb2\x6b\xe6\x73\x0b\xac\x24\x9d\x09\x76\x5d\xfb\x8e\x5c\xcf\x7d\x09\xa4\xf6\x9b\x98\x63\xe2\x0d\xb6\x0c\xc0\xfb\x12\x04\x9c\x10\x1f\x0b\x6a\x01\x1b\xcf\x48\x54\x6e\x50\x85\x95\xcc\x14\x8a\x23\x32\x23\x7b\x01\x56\xeb\x46\x8e\x40\x1f\x11\x7d\x1e\x9e\x07\x6b\x54\xc9\x11\x60\x00\x19\x84\x29\x84\xa8\x45\x38\x0e\xea\x7e\x1a\xfa\xe4\xe8\x78\x9a\xc4\x54\xf2\x49\xea\x2a\xfc\xfa\x27\x5c\xc7\x83\x87\x66\x42\x27\xb2\x0b\x77\x87\xe4\x7b\x80\x07\x32\x44\xe5\xff\x25\x6a\xe0\xdf\x60\x9d\xee\x05\xca\xe0\x75\x07\xef\x37\x90\xb8\x21\x0a\x3f\xaa\xe7\xb3\xe5\x21\x66\xe4\x79\x80\xb9\x7e\xf0\x03\x86\x6a\x80\x8a\x95\xb7\xf6\x53\xbd\x83\x1a\x80\xb2\x5e\x98\xe6\x49\x80\xbe\xc7\x4c\xdc\x22\x0c\x7d\x08\x8d\x30\x74\xa5\xf8\x71\x62\xf9\x97\xdc\xff\x8e\x27\x79\xc7\xf0\xf7\xc9\x75\xae\xf8\xa0\x37\x86\xb2\x6b\xd8\x87\x83\xa0\x72\x05\x34\xbc\x12\x94\x0f\x2b\xd8\x4d\x29\x7c\x33\xac\xd4\xfc\x5a\xd1\xcb\xa3\xa0\x5b\x9a\x2b\xe8\x96\x5a\xd9\x84\x4d\x91\x84\x54\xe8\xe6\x4d\xd9\x36\x91\x0a\x1c\x15\x4c\x6a\x8d\xea\xc0\x4f\x13\x29\xd2\xb8\xfc\xa9\x26\xe0\xa5\x37\xd5\xb3\x3b\xf8\xec\xb7\xc0\x7c\x19\xb6\x21\x11\x5c\x1e\x60\x50\x7c\xe6\x55\x50\x95\x81\xa3\x14\x3e\x30\xad\xc6\x97\xf0\x0e\xf3\x82\x03\x56\xcd\xae\xce\xc7\xca\x28\x41\xe4\xa3\x96\xe5\xf6\xbe\x01\xcf\x7c\x11\x5d\x23\x81\xae\xce\x4f\x62\x30\x89\x06\xe7\xcf\xc1\xfa\x18\xbd\xee\x55\x33\x15\x51\xea\xef\x57\x5d\xe2\x28\xf9\x5e\xe7\x2a\x79\xc5\x23\x1a\x94\xca\xfc\xef\x0a\x8a\x92\xc0\x02\x4b\x0a\x96\x64\xd7\x4c\xa0\x96\xd2\x9c\xff\x30\x4d\x34\x2a\xbe\xe4\x22\xaa\x6e\xfb\xb9\xc8\x10\x09\x5f\xa7\x51\xa2\x75\x9c\xba\xc1\x60\x57\xc4\x15\x09\x37\x8b\x5f\x4c\x45\xa3\x5b\xb4\x67\xe0\x64\xf4\x57\x9f\x04\x77\x4c\xf3\xba\xaf\x61\xb6\x74\x17\xa2\x70\x1a\xb8\xff\xe0\xc9\xd4\xad\xeb\xe7\xfe\x01\xef\x35\x20\x46\xaa\xc7\x6e\xe0\x3e\x81\xb3\xc0\x1d\x6d\xc1\x4f\x05\xc3\x07\x52\xb3\x5f\x37\x12\xa6\x12\x43\xa2\xe1\xed\x1d\x04\xd9\x51\x29\xc8\x7e\x69\x3b\x09\x3a\x8f\x90\x71\x8e\x7d\xad\x7a\xa4\x29\x58\xdf\xf9\x7c\x27\x0d\xb8\x05\x98\x7e\x1b\x4f\xa7\x09\x45\x8b\xca\xb8\x31\x2f\xac\xc7\x8e\xc9\xb0\x8a\xee\x3a\x0c\xca\xe8\x2e\x66\xca\xad\xff\xd0\x87\x32\x9b\xb0\x58\x1d\xca\x0f\xfa\x3b\xf5\xcb\x29\xbc\x08\x3a\xb3\x5f\xfb\x67\x5a\xba\xbb\xd2\xb9\x53\x22\x2c\xa6\xc8\xde\x81\xef\x1e\x2a\x89\x10\x52\xf7\x94\xe9\x83\xf0\x2e\xd4\x89\x65\x45\x80\x69\x93\x7c\x6d\x6f\xf8\x19\x28\xaa\x12\xba\x17\xc4\xc7\x25\x33\x35\x11\x3e\x32\x40\x7e\xd9\xd4\xa0\xb0\x2c\xc7\xb7\xfb\x63\xeb\x92\x65\x83\x80\x25\x17\x5c\x58\x0e\xfe\xc8\x72\xdf\xe7\x59\x5d\xa6\xaf\x30\xab\x48\x67\xbd\x24\x1d\x5c\xe4\x52\x72\x91\x75\xf0\x94\x27\xc6\x41\xdf\x57\xef\x6b\x50\x17\x3f\x8d\x7b\xd6\x86\x28\xa5\xfd\x28\x19\xcc\xa2\x40\x5e\x5a\x20\xc7\xd6\xd6\x70\x78\x7d\x63\x39\xd6\x26\xfe\x6d\xe1\x6a\x5b\x5f\xaf\xce\x2c\x4f\xe4\x20\x93\x82\x4b\xff\xb2\xed\x39\xf5\x56\x44\x22\x03\x63\xfb\x59\xc6\x40\xef\x83\xf6\x1a\x84\x78\x1c\xc2\x54\x94\x78\x01\xb7\x51\x6d\xc1\x3b\x46\x1a\x41\x9e\xea\x4c\x9f\x05\xb5\x70\xd8\xd6\xed\x29\x8a\x2d\xed\x06\xd4\xd4\x64\xba\xe7\xba\x79\xf1\xd0\x0b\xa6\xfa\x3d\x4b\xdc\x03\xc2\x20\xaa\x85\x97\x19\x5f\x83\xfe\xf8\x5d\xe8\xcc\xc8\x49\x00\xf7\xd0\x06\x68\xb3\x92\xd2\xbc\x0b\x15\xd1\xf2\x49\x47\xf9\xd3\xdd\x80\x8e\xd5\xe8\x23\xea\x60\xcf\x73\x46\x5e\xb2\x16\x34\x58\x5b\xa4\x81\x27\x93\x6a\xa1\x56\x39\xb3\x6b\x11\x4d\x98\x98\x5b\xea\xa4\x93\x90\x42\xda\xc2\x72\x29\xae\x4e\x8e\x9b\x3b\xe1\xa7\xf1\x80\xe5\x32\xed\x35\xde\xa6\x88\xc7\x66\xdb\xf6\xb5\x6e\x5d\x78\x1b\xaf\xf8\x9e\x91\x57\xcc\x60\xaf\x56\x61\xc1\xe3\x71\xac\xa5\x85\x41\xba\x22\x2f\x98\xd8\x88\xda\x97\x54\xbc\x0c\xcc\xc8\x8f\x6a\x2c\x05\x1f\x8a\xcd\x5e\xe2\xb5\xab\xc5\xd8\x41\xca\xd2\x33\x40\x05\x3d\x6e\x5f\xd8\x56\x3b\xb3\x8b\x04\x00\xd9\xd1\x89\x81\x6d\x8f\x65\x1c\x31\x34\xe2\xe2\x97\x8c\x9c\x06\xb4\x1a\xfb\x34\xa8\x66\x67\x4a\x43\xbe\xbc\xab\x86\xf0\x63\x9b\x72\xaf\xd0\x88\x50\x3a\x2e\x33\xa0\x59\x2b\xca\x3d\x35\xe5\x40\xa4\xd7\x41\x3a\xd3\x87\x1f\x55\x71\x80\x48\xe9\x25\xc2\xa6\xa8\x4d\x52\x18\xc2\xf1\x6a\x2d\x8b\x51\xb1\x11\xe1\xa3\x5e\x10\x79\xbd\x89\xb7\xd9\x9b\x88\x56\xe9\xd5\xe7\x9a\x88\xad\x65\x23\xae\x02\x22\x90\x45\x90\x2d\x60\x7e\x5d\x03\x34\xb5\x47\x46\x97\x68\x66\xfa\xe9\xae\x24\xee\x5c\x91\xab\x77\x81\xbb\x3d\x84\x64\xaa\x88\x96\x98\xba\x5b\x4f\x40\x4e\xef\x58\xf5\xb8\xf0\x61\x29\xcb\x1e\x5f\xe8\xc2\x84\x25\x16\x51\x02\x33\xad\x17\x3f\xe6\xd3\x95\xe2\xc7\x05\xd5\x60\x5f\xd0\x6a\xec\x7d\x47\x2d\xa2\x17\xa3\x1a\xd1\x97\xe8\x73\xcf\xde\xa1\x9f\x7b\x10\x82\xa6\x0d\x88\x95\x3a\xc9\xa8\x9c\x16\x3a\x8d\x53\x89\xf9\x43\x73\xf4\x14\x15\x63\xe9\x39\xc5\x0e\x4b\x6d\x5c\x0d\x42\x25\xd4\xff\x0b\x23\x72\x1c\xad\x8d\xb4\xdc\x82\x9f\x9b\x55\xdb\x3a\x70\xb3\xec\xc2\xcd\xef\x2a\xdc\x1c\xfa\x24\x2a\x31\xb1\x98\xd6\xef\xa7\xe5\xfd\xa4\xba\x2f\xc6\xf8\xa1\xd2\x64\x71\x7f\xa9\xb1\x79\x8e\x2a\xbc\x62\x35\x8a\xc7\xce\xeb\x88\x5d\x16\x23\x44\x7e\x89\xd8\x7f\x06\x84\x61\x78\x40\x85\xdc\x4d\xa8\xa9\x5e\xde\x4f\x06\xb9\xb3\x1a\x72\x97\xde\x5a\xe4\x7e\x1e\xd0\xb1\x7a\xc3\x88\x3a\xd8\xf3\x9c\x11\xdf\x47\x6d\x4c\x8a\xef\x85\x19\xc9\x91\x39\xf4\xf3\xcc\x02\x8f\xab\x3d\xa5\xe0\xf9\x24\xc5\x87\xfe\x83\x64\x40\x20\x16\x48\x68\x3b\x29\x58\x50\xc8\xa7\x9d\x5c\x8f\x81\x5f\x5f\x42\xee\xde\x43\xc1\x5a\x42\x62\xfb\xaf\xa8\x12\xb3\x7f\x84\x85\x87\x22\x93\x44\x52\xc5\xbc\xe4\x6b\x58\x17\xcc\xb3\xd4\x22\xd2\x62\x68\xc9\xab\x00\xac\xbd\x04\x55\x3c\x9a\xeb\xd7\x92\x19\xfa\x7e\x08\x90\x7e\x3d\x46\x49\x8c\x2b\x21\x59\xc2\xdf\xe5\x84\xb2\x6e\x4e\x88\xb5\x70\x42\x4d\xfe\x87\x4f\x51\xf4\x05\x5c\x86\x9a\x86\x12\xb8\x92\x21\xee\x4e\x89\xde\x2b\xf1\xb0\xf7\x9d\xcf\x7b\x61\x2a\xca\x8f\x2e\xf4\x0b\x46\x27\xfd\x1f\x1a\xee\xdf\xa2\x73\xbe\xbf\xac\x88\xad\xde\x59\xbb\x6b\xd8\x0b\xc2\x5d\x92\xbb\x09\x55\x18\x2f\x71\x73\x85\xff\xea\xab\xac\xf8\x0c\x3f\x9d\x5c\xc7\x5c\xf2\xc1\x84\x27\x79\xcf\xda\x20\x24\xb7\xd9\xe6\xef\xdf\xb9\xed\xed\xd0\xfb\xf7\xd5\xd1\xb3\xb2\xcb\x74\xa6\x68\x9d\xc2\x76\x3e\xe1\x78\x6e\x28\x30\x7d\x39\xa2\x2d\x4c\x49\x49\x01\xd5\xa8\x95\xfa\xe3\x23\x22\x58\xad\x03\x2c\xe8\xb2\x5a\x59\xa9\x44\x8a\xa9\x71\x89\xf0\xa7\xeb\x89\x6f\x2b\x91\xad\x24\x43\xa7\xad\x7d\x9d\xb4\x77\xca\x48\xec\x13\x61\xfb\x47\x2b\xfa\x9f\xa4\x20\xd3\xb6\x37\x29\x2c\x69\x53\x53\x82\x28\x5a\x9d\xa6\xd0\x48\x3f\x51\x68\xbf\x2a\xef\xda\x59\x8a\xef\x46\xde\x52\xd0\x31\xb1\xfd\xa3\x45\x29\xe4\x7d\x94\x3a\x20\x1e\x83\x19\x9e\xad\x91\xa4\x15\xab\xa8\xb6\x16\x3f\x3a\x15\x60\x49\xe6\xbd\x49\x02\x7e\x83\x85\x59\x46\xb4\x58\x8b\x9a\x96\x5d\xf0\x98\xe9\x05\xec\x10\x7e\x97\xb7\xf8\x94\x91\x74\xaa\xa9\x8c\x56\x6c\x88\xc1\xf6\x9a\x25\xae\x40\xf4\xc8\x38\x8a\x39\x3d\x6b\x43\x7b\x29\xa1\x37\x89\xa8\x61\xc1\x70\xda\xed\xfd\xe2\xbd\xfd\xfd\x5b\xd8\xde\xce\x58\x01\xb6\x74\x05\x55\x88\x11\x6b\x2b\xbd\xc6\x1c\x90\x9a\x86\xa3\x53\xde\xf7\x7f\x89\x90\x4b\x43\xc8\xfd\x92\x6c\xcb\xbb\x3f\x2f\xf5\xb3\xeb\xb6\x02\x57\xa8\xb7\x7a\xde\xf0\x64\xe1\xa9\x0a\xaa\x53\x15\xe8\x53\xf5\x6f\xec\x52\xc5\x19\x34\xcb\x51\x2a\x36\x80\x23\x7d\x63\x53\x24\x86\x7d\x7f\x85\x29\xa8\x51\xcf\x77\x01\x1d\x4f\x7d\xe2\x53\xc7\xb0\x03\xea\x17\xd3\xbf\x92\x29\x1d\xc7\xbe\x96\xb5\x39\x74\x25\x03\xd4\x30\x9c\x53\x4a\x9d\x1b\x6f\x61\x60\xa6\x0e\x2e\x26\x00\xdb\x02\xcb\x8b\x53\xff\x7b\xa5\xae\x35\xf8\x7e\x34\x1c\xfe\x5f\x95\x52\xaa\x03\xc5\xf4\x96\x7e\x0d\x44\x74\x71\x29\x2b\xb4\xe3\x4f\x95\x58\x2a\x35\xbe\x71\x66\x24\x9a\xa2\x43\xb9\xf7\x93\x56\xc6\x65\x60\xe0\x2f\x28\x78\x6b\x68\xf0\x6b\xa4\xc1\xec\xa9\xb6\x44\x7f\xd3\xa6\xe8\xb9\x8e\xdb\x3c\xd1\x61\x31\x7b\x3a\x8f\xda\x2e\x9a\x27\xfc\x74\xbd\x32\xd9\x67\x22\xe8\xd5\xc9\x6f\xb3\x71\x70\xc9\x59\x50\x67\xe6\xa3\x3a\x80\xf5\x14\x90\x49\xe6\x65\xbd\x5a\x5f\xbc\x51\x3c\x70\x43\x3e\x07\x30\x04\x5f\xa3\x90\x03\x86\x45\xe7\xcc\x49\x7c\x69\x3c\xed\x14\x99\xc0\x7e\xa3\xd6\x7e\xc2\x66\x17\x63\x0b\xfd\x3f\x7b\x44\x73\x04\xd4\x72\xf4\x8d\x42\xdc\xfb\x14\x2f\x55\xb5\x9b\x91\x70\xaa\x56\xf7\x06\xb3\x3f\xcc\xc9\x51\x00\x85\xc0\x18\x58\x20\xec\x13\x38\x62\xc5\xfd\xca\xf0\x04\xc2\x3e\x86\x6f\xac\xa0\x5c\x2b\x4b\x61\x80\x43\x7f\x98\x3f\xae\xde\x2a\x8a\xb7\xe6\x53\xe0\xa0\xe6\x8b\x7b\x5c\x34\x27\x45\xf3\x8b\x00\x7e\xf9\x67\xce\xbd\x21\x42\x63\xfd\x35\xfb\x5d\xec\x5b\x21\x33\x57\x06\x90\x15\xdf\x8c\x2d\x4d\x40\xb4\x1d\x63\xa4\x04\xa0\x7b\xc9\xc2\x50\xc4\x64\xbc\xce\xf0\xbd\x55\x17\xa8\x77\xd2\x38\x66\xd7\x19\xef\xb1\x38\x36\xba\x70\x8b\x7e\x75\xd6\x18\xb2\x97\x1e\xd7\x8e\x79\xcb\x0f\x17\x1f\xf8\x2e\x86\x39\x39\x0e\x20\x85\x48\xf1\x49\xd2\xa0\xd9\x6c\x8a\x0a\xb8\xcb\x91\x45\x21\x9e\x2e\xb9\x02\x65\xd3\xca\x15\x28\x49\x65\xa1\x94\x37\x23\x06\x53\x53\x1b\x78\x3a\x35\xc9\xf5\xfa\x78\x31\x1a\x2e\x60\x82\x57\x4f\x16\x70\x89\x17\xdb\x0b\xb8\x98\x76\x9a\xf9\x6a\x62\xdf\xf0\x4f\x17\x73\x2d\xeb\x3c\xcb\x8a\x03\x37\x07\xe7\x54\x92\x5c\x1f\x52\xdf\x4d\x20\x75\x05\x44\xae\x84\xd0\xe5\xe0\xb9\x68\x82\x61\x90\x53\xc8\x5c\x93\x4e\x55\x60\xad\xce\x67\xba\x2c\x79\x0a\xd2\x8d\x80\xbb\x21\xe4\xae\x07\xcc\xcd\x30\xc2\x7c\xba\x36\xe6\xa5\x17\x92\x8b\x29\x3a\xc3\x9f\x4b\x2c\x34\x82\xd1\xb5\xfb\x30\x27\x87\x19\x26\x67\xd3\xa9\xe4\x66\xd3\x2e\x07\x27\x53\x64\xa9\x51\x2d\x6f\xb4\xbd\x3d\xa4\x74\xa5\x9e\x4e\x23\x38\x79\xbb\x59\xa9\xa8\x11\x91\xfe\xa8\x11\x8f\x35\x6c\xd6\x27\x6a\x3a\x4b\x6d\xa2\xf1\xbf\x93\x9e\x62\xd0\xff\x9f\x8f\xc7\xc2\x99\x91\xf9\x54\x9d\x20\x0f\xeb\x43\xc1\x8c\x7c\xc6\x20\x47\x44\x39\xfa\x9e\xd1\xa0\xdc\xe0\xbe\x6e\x2d\x60\x6f\x7a\xab\x87\xdb\xef\xdf\x26\x1e\xc6\x38\xab\x2f\xe5\x6a\x5c\x50\xf8\xbe\x34\x84\xc4\xea\x19\xc6\xc0\x7b\x41\xe6\x3e\x49\x28\xcc\x7d\x05\x0c\x63\x0b\xad\x97\x93\x34\x91\x97\x8a\x0d\x85\xbc\x5b\xcb\x5d\xe6\x00\x4d\xca\xb4\x94\x82\xae\x4d\x8d\x7a\x2f\xb1\xfb\xf7\xef\xab\xa9\xce\x48\x8e\x91\x4a\x9c\x8e\x85\x63\x59\x0b\xcd\x0e\xe0\x84\x23\xb0\x7a\x5c\x1d\x02\x54\x15\xe0\xad\x1f\x60\xf5\x26\x69\x9e\xf1\x14\x1d\xdf\x51\xe2\xc7\x86\x39\x58\x3d\x23\xe5\x40\xd2\x9e\x52\x89\xcd\x71\xd0\xae\x74\x4a\x96\x55\x9a\xd7\x3c\xf4\x2d\x39\x20\x66\x6e\x38\x3b\xb5\x53\x98\x81\x0b\x9f\x17\x36\xfb\xfd\x7b\x13\x53\x63\x7a\xd5\xf3\xe6\xa2\xe7\x71\x39\xe3\x3c\xb1\x16\x84\x16\x1c\xd2\x01\xc1\x44\xeb\x0a\x05\xef\x4d\xa1\xb3\x30\x51\x04\x98\xaa\xa0\x9a\x6c\xab\x62\x22\x60\x92\xf7\x3c\xe6\x7f\xb7\x36\x08\xb3\x99\xfa\xc7\xdb\x48\x68\xab\x4c\xa0\xba\x86\x22\x4d\xa4\xb5\x91\x6e\x90\x68\x83\xf8\x1b\x26\xf7\x93\x92\xc0\xab\xb5\x54\x52\xb8\xe2\xb1\x4d\xd8\x27\xfa\xf7\x4c\x2b\x26\xf2\x4a\x81\xd2\x19\x66\x37\xe6\xb4\xc6\x38\x3e\x9f\xae\xd3\xa1\x2d\x11\xe0\xfd\xc2\x1a\xfb\x7d\x5a\x14\xd1\x31\x83\x1c\x4f\xbb\xaa\x24\x29\xa4\xe5\x12\xee\x4e\x18\x72\x9e\x0a\xa1\x90\xeb\x29\x8c\x46\x80\x29\x79\x39\x90\xdc\x9d\x29\x76\x68\x36\x35\x53\x2b\x93\x0d\x3f\x1e\xe4\xc5\x6c\x21\x77\xeb\x49\x87\x95\x2c\x5e\x3d\x93\x63\xce\xdf\x32\x45\x31\x79\xac\xce\xcf\x78\xe8\x48\xaa\x5b\x41\x2e\xbf\x53\xaa\x7b\x7c\xf9\x5e\xae\x00\x44\x7b\x96\x62\x4c\xa3\xde\xf6\xf9\x14\x18\xec\xd3\x2e\xab\xc5\x55\x9e\xc9\x28\x9c\x97\xd6\x82\xa6\x12\xb1\xe6\x3a\xc3\xf9\xf7\x1a\x57\x82\xcb\x78\xe3\xa3\x09\xe9\x38\x57\x3c\xea\x07\xe4\x54\x4f\x72\xc5\x94\x9e\x29\xba\x6e\x9d\x5c\xe6\x16\x58\x2f\x45\x64\x81\x75\xcc\xa4\x76\x00\x6c\x02\x06\xbe\x9a\x45\x89\xb1\xbf\x55\x00\x91\x4b\x84\x87\xcb\x29\xad\x19\xd1\x9f\x9b\x6d\x63\xc5\xb6\x5d\x21\x5e\x1a\x2e\x60\x67\x6a\x52\xfd\xbd\xd1\xf4\xcd\x53\xe3\x1d\xe8\xeb\x6b\x8b\xc2\xb7\x6e\x20\x51\xd2\x67\xbd\xe8\x2c\x16\x9c\x5d\xdb\xfd\x61\xb3\xfb\x67\xfd\xa2\x4c\x8a\x34\xb9\x50\xcc\xf6\x1a\x23\xf6\xfb\x36\x91\x8f\x24\x76\x7f\xbc\x9b\x3a\x1e\xa7\x3a\xb8\x07\x66\x24\xe2\x65\x55\xb0\x6e\xa3\x78\x51\xd1\xa4\x56\xcc\x84\x58\x7e\x99\xf8\xcb\x4f\x93\x29\x17\xb2\x97\x49\x11\xe1\xcc\x4e\xa6\xda\xf6\xf7\xc5\x78\x78\xcd\x38\x30\x41\x29\xec\x4f\xbb\xc2\xe2\x9a\x30\x23\xa3\x09\xbf\x8e\xfc\xef\x75\x38\xf9\x3c\x2d\x7a\x64\xb9\x77\xc5\x7d\x59\x93\xfd\xe4\xd8\xda\x4b\x02\xcb\xb1\x8e\x0b\x0d\xd0\x32\x14\x5c\xa6\xb9\x68\xd3\xa5\xe5\xd7\x03\xed\xe2\x6b\xb4\xdd\x25\x70\x2a\xbc\x30\x27\xdf\xa6\x20\x61\x08\xa3\x6e\x4b\x06\x4b\x2e\x62\x3e\x30\x8a\xd3\x7d\xf3\xde\x1d\x63\x39\x56\x80\xe6\xc5\xb9\xb0\xf4\x42\xcf\xc8\x11\x8e\xa7\x16\x88\xa2\xb0\x54\xc3\xdf\x51\x48\x64\x81\xc5\x85\xcd\xae\xea\x19\x0e\xad\x21\xfa\xa4\x14\x09\xdb\xb5\x23\x38\x76\xfb\x79\x6b\x37\x42\x29\x7c\xf7\xc9\xa6\x16\x8e\xa6\x11\x9f\x35\x3f\xb5\x90\x7e\x6a\x26\x06\x25\xeb\xdc\x61\x5d\x06\xb7\x2e\x8c\x1a\xa9\x5a\x9a\x15\xde\x3b\x8d\x57\xdf\x50\xc9\xf3\xce\xea\x3e\x2a\x06\x57\xf2\xbf\xbb\x93\xa3\xff\xf0\x4e\x8e\xfe\xfe\x4e\x7e\xb9\xdb\x4e\x7e\x59\xbb\x93\x7f\x7f\xef\x46\xff\xf6\xde\xa9\x13\xda\x01\x4d\x7a\xfb\x48\xd2\x8d\x9e\x74\x04\x59\x97\x54\xaf\xd0\x8d\x65\x39\x33\x72\x99\xc2\x13\x48\x90\xdd\x5a\x18\x24\x25\xc7\x86\xd3\xbb\x52\xff\x44\xe8\xa7\xa1\xae\x7e\xaa\x7f\xe6\x25\xc5\x46\x16\x27\x59\x50\x38\x9d\xb6\x25\xce\x36\xe1\x15\x95\xc9\xb8\xc5\x2f\x6d\xd2\xf3\x2e\x06\x21\x0b\x78\xb0\xea\xa7\xb6\x89\xfe\x9d\x1d\x7e\x7d\x92\xdf\xc8\x75\x4e\x7d\xcb\xed\xad\x1e\x7d\xba\xcb\xb2\x21\xef\x59\xb2\x9c\x71\x77\x55\xa2\xbc\xdb\x94\x6b\x66\x82\xfa\x77\xb7\x6a\x9a\x45\xc5\x00\xbd\xef\xe4\x5d\xdc\x15\x47\xca\x8a\x61\xd6\xb1\xee\xbb\xe4\x2f\x9d\xba\xd0\x43\x67\xc0\x13\x42\xe4\x2d\xfe\xbf\x85\x67\x18\xba\xf2\x4a\xdb\x5b\xe5\x0f\xd7\xfb\xf2\xfe\x3d\x4f\xdd\x9a\x5f\xae\x30\x7e\xb9\xc2\xf8\xe5\x5e\x06\xc8\x14\x94\xfe\xb6\xe8\x98\xdd\xf0\xb7\x55\xb2\x87\x76\x6f\x5d\xf6\x6e\xbf\xbb\x7b\xab\xe8\x76\x6f\x3d\x9d\xa2\x73\x3c\x2f\xf6\x63\x77\xda\x7d\xbc\x6a\xfe\x78\x6c\xa8\x5f\x57\xbd\xe9\xfd\x54\x09\x3f\xda\xbb\xed\x6c\x7a\x97\x58\x9a\x95\x52\x12\x2b\x8b\x68\x16\x4e\xda\xde\xf8\xaf\x19\x39\x98\x16\xdc\xf0\xe7\x9a\xb0\xff\xbc\xc8\x81\xa8\xcd\x14\x4e\x0f\x83\x9a\x3e\x09\x8c\x8a\x2a\x11\xcc\xee\x14\x9d\xda\x51\x3b\xd1\x3e\xce\x7e\x5a\xa5\x53\xac\x19\x50\xcc\xb9\x5f\x16\x82\xf5\x3a\x36\x24\xe1\x5a\xdc\x4f\xe7\x57\x69\xbf\x41\xfc\x77\x30\x63\x22\x41\xbe\xa6\xe2\x36\x70\x0f\x16\x14\x7e\x4e\xbb\x53\x88\xb5\x44\x23\xd5\xa7\x33\xac\x12\x9f\x7c\x8a\xe1\xd8\x87\x19\x39\xd3\xdf\xde\xac\x13\x62\xa2\x92\x9a\xf2\xf7\xab\x18\x73\x6a\x4d\x5d\x6b\x22\x07\x0f\x2d\xf8\x32\x45\x2f\xa7\x4b\x39\x89\x5f\xa6\x18\x4c\xbb\x6a\xb1\x5e\xfd\xd2\x13\xf2\x76\x0a\xc6\xe9\x7f\x6c\xf5\x96\xec\x7a\x16\x6d\xf8\x1a\x7e\x99\xb6\x7b\xeb\xd4\x38\xb4\x72\x9b\x3e\x98\x8a\x66\x3d\x4d\xc4\x11\x02\x70\xb7\xaa\xdd\x2b\x1c\x87\xb3\x9a\xc3\x87\xd6\xf8\x7d\xc8\x6b\x3a\xb5\x11\xe2\x7e\x58\xa6\xb0\x5a\x26\x2d\x31\xb8\x59\xd9\x92\xe8\xb6\xed\x69\xdd\x64\x3d\x08\x39\x0f\x50\xb2\x6c\xf8\x18\x2e\x88\x39\x5f\x87\x05\xaf\xff\x43\x31\xb3\x6b\xd4\x77\xc6\xf0\x89\x88\xf5\xc8\xab\xb1\x2f\x1f\x85\x26\x90\xcf\x2b\x20\x85\x0f\x77\x3a\xb5\x0a\x61\xf5\xac\x8d\xb7\x85\x5c\xfa\x63\xda\xc8\x87\x81\x3c\x02\x46\xae\x72\xcb\xb1\x4e\xb1\x5c\x5d\xab\xb3\xc8\x24\x1e\x6c\xf6\x56\x0d\xc6\xa7\x8c\x1c\x4e\x57\xbe\x3c\x59\xe6\x24\x96\x9e\x35\x40\x81\x8f\x8a\x25\x9b\x4b\xc6\xa5\x55\x91\x89\x17\xb8\x6c\x4f\x17\xf0\xb2\x50\xe6\x7c\x9c\xde\xe2\x8b\xcf\x6c\x16\x14\x19\xa7\xfc\x5a\x8e\xe2\x36\x85\x80\x76\x49\xd1\x0c\x47\x1a\xe7\x13\x9d\xb7\xdd\x78\x43\xd7\xec\xd0\x3d\x6b\x43\xd6\x81\x77\x15\x8f\x88\x0a\xe7\xa0\x21\xee\x43\x40\x18\x66\xb7\x38\x67\xc6\x86\x91\xaf\x7a\x3a\xf4\x96\x60\xa8\xdc\xfa\x82\x47\x5a\x39\xd9\x77\x9d\xf9\x7f\xcf\x6c\x6b\x13\x2b\x70\x87\xce\x6d\xe5\xb7\x1f\x90\xdb\x57\x57\x43\xc1\xff\xc8\xe2\x16\x00\xb7\x6a\x9b\xb9\xed\x1c\xa7\x05\x34\x2e\x28\xbc\xba\x4d\x6d\x53\x93\x39\x6b\x07\x2e\x24\x1f\xa7\xb0\x13\x80\x11\x27\x41\x49\x2a\x83\xf8\x62\xb0\x8d\x1e\x74\x8f\x4c\xa6\x8a\x37\x3e\x5c\xf8\x14\x2e\x7c\xf2\x72\x4a\x21\xa1\x50\x3e\xb6\x6b\xaa\x0a\x54\x4f\x6e\x2d\x3d\x79\xb5\xf4\xa4\xac\x3d\xb9\x87\x1e\xc6\xad\xaf\xdb\x59\x7a\x48\xb4\x2c\x8a\xde\x28\xf3\xf0\xc8\x3c\xbc\xb2\x53\xea\xd8\xfe\x3f\xff\xb7\xb5\x12\xf8\xb1\xb4\x17\x4d\x34\x67\x70\x47\x4f\xdb\x5f\x07\x4b\x06\xee\x0b\x9f\xbc\x98\x76\x4b\x13\x3e\x8b\xb9\x62\x43\x97\x64\x89\xaf\xf4\x36\x3f\xfc\x54\xc0\x83\xff\x22\x69\xf2\x5b\xcd\xeb\x39\xf6\xe8\xd3\x07\x91\x2d\x79\x26\x49\xe2\x26\x74\x6c\x05\x4c\xb2\x81\xb5\x91\x38\x09\x3c\xf8\xaf\x7f\x66\x7f\x90\x2b\x36\x65\xda\x7d\xdc\xf9\xad\x1a\x1d\xc5\xd5\xfe\xf3\x81\xa2\x89\xe5\xa3\xc2\x15\xe8\xc5\x82\xa9\x7b\x3f\x29\xe4\xce\x04\xe8\xa1\x2e\x04\x9b\x4c\xbe\xf1\x20\xc2\x84\x00\x45\x76\x1a\x38\x9f\x76\x05\x4b\x16\x51\x92\xe2\x9f\xc9\xef\x7f\x8a\xdf\xff\x4c\x74\xb0\xe4\x3b\xad\xa9\x51\x2f\x67\x82\x33\x8b\x42\xd2\x5f\x5b\xee\x06\xeb\x6e\x6f\xc1\xe8\x21\x7c\x12\xe4\x7c\xaa\x8b\x27\xa2\xc5\x38\xff\xef\x41\x8e\x62\xfd\xf9\x4d\x6a\x16\x9e\xa9\x39\xc0\x79\x71\x80\x25\x1e\x60\x7e\x67\xec\x08\x07\x3e\x61\x6a\xa9\xff\x03\x68\xf2\xbf\x65\xde\x6d\x53\xfc\x4f\x23\xcc\xff\x81\x05\x2f\x49\xf4\xd2\xc7\xfc\x6d\xd4\x29\xfa\xad\x41\x16\x3a\x0f\x24\xb7\xd9\x36\x60\x99\xa6\x6f\xe0\xab\x3f\x7d\x48\x5d\x6e\x7b\xef\x21\x52\xbf\xde\x60\xf1\x26\xb6\x83\x39\x75\xfa\xe3\x03\x62\xed\xf3\x59\x91\xf4\x5c\xa1\xb3\xbd\x14\x6b\x4d\x51\xe7\x80\x58\x7b\x41\x24\xab\xb6\xb7\x9c\x98\xac\x5c\xf5\xec\x47\x1d\x16\xc4\x42\xe2\x9a\x93\x57\x53\xc8\x6d\xef\x00\x72\x9b\x9d\x41\x6e\x67\x58\x8c\x7e\x0a\x3e\xb0\x1a\x72\x2d\x73\x7d\xbc\x9d\x6a\x94\x7a\x59\xa2\xd4\xe9\x14\xab\x02\x9e\x52\xc8\x48\xd2\x07\x6b\xa7\x28\xea\x5b\x74\x9d\x94\x5d\x03\xdd\xf5\x78\x69\x69\x4b\xd2\x31\x27\x3f\xa7\xc0\x21\xd5\x19\x61\x3e\x68\x5d\xb2\x9a\xe3\x73\x5f\x47\x81\x2d\x28\xc8\xbe\x2b\x05\xf0\xbe\xcb\x05\xe4\xfd\x36\xe4\xf2\x2e\x21\x91\x40\xbd\x87\x09\xa4\x67\xfd\x3b\x7a\xc6\xfa\x9d\x1d\xbd\x27\x28\x57\x3e\x17\xbc\x37\x4f\xf3\x5e\x96\x9b\x8b\x19\x4b\x64\x4f\xa6\x3d\x5d\x93\x79\x89\x25\x1f\xab\x37\x6f\x39\xeb\xbd\x0f\x4f\x19\x79\xcd\xc9\x8c\xb0\xbe\x0e\x3f\xae\x7b\x60\xa7\x49\x18\x89\x89\xa6\x35\x5e\xea\xbc\xe6\xe4\x26\xa5\xe0\x6f\x3a\xd6\x9e\x7e\x5b\xb1\xef\x6a\xea\x69\xbf\x9b\x1a\xb4\x29\x2a\x8c\x07\x5b\xc6\xe3\xd0\xa8\x9f\x2a\x5f\x72\x73\xac\x36\xd0\xc3\xf8\x26\x30\xf9\xba\xb0\x5e\x4f\xe7\x5b\xd0\x67\x7d\x83\x58\xf7\x4d\x0a\x22\xd7\xda\x78\x1b\x99\x38\xb0\xb0\xaf\x2d\xda\x0f\x2d\x0a\xde\x9a\x79\xfe\x9f\x19\x68\xbc\x9a\x46\xc7\x64\x72\x5f\xa5\x27\x69\x1f\x99\x90\xcc\x02\x61\xb3\xb3\xa6\x2c\xaa\xdb\x91\x2b\xc2\x1e\xde\xc1\x8a\x5c\x6a\x86\x30\x05\xbe\xf5\x28\x98\x85\x07\x75\x26\x9e\x12\x74\xc3\x3b\x47\xdf\xbf\x67\x64\x39\x95\xb4\xac\x41\xd7\x27\xa3\x02\xa5\x14\x8e\x7c\x22\x68\x6b\xc2\xe5\xa5\xef\x5c\xaf\x18\x58\x27\xe6\x19\x44\xbe\x9c\x18\xe0\x35\x27\xdf\x7c\x92\x2c\x81\xbc\xfe\x70\x6b\x89\x88\xfc\x27\xc7\xdf\xe5\x31\xd7\x1a\x70\xb5\xaa\x62\x45\x2f\x5b\x29\x7a\x4e\x7c\xb5\x41\x57\x94\xc2\xbe\x4f\x92\x31\x1a\xc9\xfd\x3e\x08\xb8\x37\xa2\xd4\xd9\x93\xa5\x9e\x54\x2e\x28\x64\xfd\x96\xf4\x61\x66\x0d\x65\x43\x17\xc4\x6b\x09\xce\x30\x92\xdf\x64\x2f\xec\xf2\xd1\x62\x37\x0b\xed\xb4\xb8\xd6\x1b\xd1\x3b\x59\x18\x2f\x44\xb4\x65\x52\x8a\x21\xda\x79\x7f\x35\xec\xb5\xfa\xc4\x96\x81\x0e\xd4\x96\x2b\x20\xf3\xfa\x45\x9c\xb9\xba\x41\xd1\x33\x74\x81\x91\xb1\x26\x36\xb4\x80\xaa\xc2\x05\x65\x55\xed\x22\x3b\xd5\x2e\x71\xbf\x5e\x4b\x20\xe8\x77\x4b\x19\xe8\xfe\x24\x8b\x14\x88\xa7\x3e\xbc\xe6\x4d\x5d\x86\x28\xad\x05\xb5\xac\x71\x1f\x63\xf2\x99\x91\xf7\x3e\xaa\xc7\x96\x5c\xc4\x3d\x16\x5c\xf0\x1e\xfe\x3b\xb8\x8e\xe2\x38\x9d\x99\x1f\x66\xa6\x06\x0d\x20\x9e\x94\xe9\xf5\x52\x6c\x8f\xb6\x70\x27\x86\x05\x5f\x74\xbc\xee\x2b\x1a\x1b\x16\x14\xa6\xfd\x15\x15\x52\x14\x12\x9d\xa4\xbe\xa6\x3d\xef\xf0\x1c\x2b\x11\x97\x31\xac\x06\x0a\xf8\x86\x6a\xf1\x56\xb5\xd9\x77\x1a\xa2\x65\xbb\xe7\x7a\xd4\xc4\xf6\x2f\x41\xc1\x50\x69\x28\xe8\xff\x9f\x4c\x07\x0f\xd1\x07\x01\x2d\xbc\xb6\xe7\x2f\x3a\x29\xe2\xa4\x6f\x3c\x9d\x2e\x35\xf9\xf1\x2c\x0a\x17\xfd\xbb\xa8\x90\x6a\x7c\xa8\x48\x67\x6d\x41\xc3\x4a\x3a\xdc\x44\x19\x11\xfb\x2e\x45\xef\xcf\xc8\x65\xbf\x8d\x37\x5d\x32\xdd\x0d\x46\xc3\xdb\x63\x84\x44\x29\xeb\x5d\xb7\xf3\x93\xdd\xe1\xfb\x87\x65\x7e\x6e\x13\xbe\x7f\xa6\x99\x29\x7a\x0b\x53\x58\x05\x90\x41\x23\x66\xbf\xf4\x85\x3c\x52\x70\xfe\x3f\x4e\x2e\x0e\x39\x99\xf4\xff\xbb\x68\xc5\xca\xe0\xcb\x84\x42\x56\x9b\x78\xd1\x07\xeb\xcd\xae\x05\xc8\x3f\x23\x66\xd4\xf7\x34\x55\xef\x31\x89\x4d\x37\x01\x16\x2e\xa3\x65\xb3\x62\x0a\x96\x1a\xd9\x59\xd5\xa8\x55\x90\xc1\xf2\xc3\x9f\xaa\x76\xad\xab\x0c\x7a\xde\xdc\xbc\x97\x9d\x56\x6d\x05\xab\xad\x1b\x8e\xeb\x13\x92\x1c\x6f\x2b\xbc\x64\x76\xa6\x6c\x2d\x14\xcc\x16\x2c\x7b\xdd\x1c\xa7\xf0\xd9\x37\xe6\x12\x0a\x52\x97\xbe\x00\xa1\x88\x20\xd7\x44\xb0\xdf\x07\x09\xf7\x86\x25\x11\x5c\x50\x98\x19\xf6\x6e\xd3\xa2\x30\xd7\xd7\x7e\x1a\x60\x3e\x1b\xfd\xeb\x5a\xed\x13\xec\x99\x6e\xc6\x47\x05\xbe\xeb\xdf\x09\x9b\x5a\x14\x9e\xf7\xdd\x5f\xbb\x4e\x95\xe2\x1c\xbc\x89\x63\x74\xde\xd6\x02\x8e\x75\xab\xbe\x6b\x2d\xe0\xaa\xe8\x5d\x95\x3e\x51\x2d\xc7\xc5\xaf\x05\xec\x94\x3d\x4c\x49\x06\x6c\xd7\xd7\x0b\x78\xd3\xef\x4e\xae\xd4\x0a\xd8\x2b\x45\xce\x9f\xf7\x1b\x00\x7d\xdc\x2f\x48\xa0\x21\xf6\xc6\x23\x70\xd9\x11\xf0\xaa\xdf\xc8\x22\xbf\xd3\x6f\x02\xef\x71\x7f\xb1\xd0\x46\xa6\xb1\xd5\xab\xa2\x19\x17\x14\x0e\x6e\x65\x9d\x97\xa2\xf6\x37\x66\xe4\x8d\xc1\xa6\xab\xc1\xa7\xb5\x68\xf9\xf7\x8c\x08\x7b\x17\xb3\x92\x98\x68\x8a\x46\x6c\x85\xde\xe5\x6f\x48\xb5\x9f\xf7\xe1\xaa\x0f\x3b\x7d\x50\xab\x5b\x64\x0e\xbb\x16\xe9\x84\xcb\x4b\x9e\x67\x76\x94\x3e\x08\x52\x3f\xd3\xdb\x18\x25\x17\xfa\x62\xc2\x12\x76\xc1\xc5\x03\xbd\x0d\xaf\x79\x7c\x6d\x2d\xbe\x52\x38\x5a\x8f\x8d\x97\xdc\xb2\x8d\x54\xc0\x82\x00\x23\x9e\xac\x6d\x8c\x35\x2a\x02\x1b\x4c\xb6\x16\xb1\x6a\x80\x85\x2f\x3e\x49\x6c\xff\xb4\x89\x81\x8d\x4f\x53\xef\xba\x26\x20\xbc\xf5\x49\x2d\x9e\xf2\x73\xdf\x4d\x31\x83\xae\x12\x21\x4f\xfa\xdd\x95\x9b\x3f\xf8\x60\xe1\x5a\xa2\x6b\xbf\x92\x83\xe0\x73\x9f\x58\x99\x9c\xc7\x3c\xbb\xe4\x5c\x96\x8e\x53\x71\xca\x02\x74\x9a\x12\x24\xc7\x7c\xb1\xa5\x8f\x1d\x17\x22\x15\xa6\xe9\x9c\x13\xeb\x25\x8b\x62\x1e\x28\x8a\xaa\x9e\xe9\xed\x1c\x1f\xf7\x42\x91\x4e\x74\x3d\x12\x6a\x82\xdf\x74\x3a\xc4\xdd\x84\xfc\xf2\x9f\x3b\x67\x02\xfc\x7d\xe7\x0d\x03\xff\xc0\x69\xe5\x11\xfb\x63\x45\x11\xb6\x90\x6e\xb2\x85\xa3\x7e\x6c\x02\x73\xe6\xe4\x75\x0c\xd6\xff\xb2\x80\x24\xba\x10\x12\x7b\x08\xfa\xde\xd8\x52\xac\xc1\x21\x86\x83\x7d\x89\x15\x87\x90\xc1\x89\xc2\xf2\xb3\x71\x95\x34\xce\xa9\x32\xc9\x25\xb6\xe7\x29\x56\xd1\x66\x98\xab\x71\x01\xfe\xd0\x99\x4a\xf2\x52\x50\xf0\x1f\x3a\xcf\x63\xf0\x1f\x3b\x6d\xa9\x2b\x9f\x38\xbb\xe4\xaf\x17\x7a\xed\x91\x98\x3f\xaf\xc1\x8c\xb5\x58\x2c\xe8\x33\x91\xba\xbf\x3e\xb0\x28\x71\x7e\x45\x49\x24\x9d\x97\x82\x9c\x44\x94\x0c\xd5\x4b\x12\x7b\x2f\x9e\x8c\xcb\x42\xe8\xc6\x35\x2a\x4c\x05\x41\x76\xbb\x17\x25\x3d\x49\xf1\x8f\x18\x63\xb6\x1f\xcb\x75\xf9\xf8\x92\x3c\xa2\x4e\x42\xc4\x5f\xfc\x2b\xc8\xbf\xf8\x57\xea\xa8\x4b\x57\x5d\x2e\x08\x0e\x09\x22\xa5\x0e\x5e\xb9\x22\x5d\x10\xc5\xd1\xd0\x67\xff\x6f\x00\x00\x00\xff\xff\xc1\x46\x06\xd3\x92\xa7\x01\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xdc\xbd\x09\x7b\xdb\xba\x92\x28\xf8\x57\x64\xb6\x5b\x17\x38\x2a\x31\x94\xb7\x24\x94\x71\x35\xce\xe2\x2c\x27\xb1\x13\xc7\x59\x7d\xfc\xf2\x40\x0a\x92\x69\x4b\xa0\x02\x82\x92\x1d\x4b\xfd\x37\xe6\x07\xcd\x1f\x9b\x0f\x00\x17\x90\x5a\x92\xd3\x7d\xfb\xbd\x37\x73\xef\xf9\x1c\x8a\x2c\x14\x0a\x5b\xa1\x36\x14\xb6\x06\x29\x0f\x65\x14\x73\xc4\xf1\xbd\x93\x26\xac\x91\x48\x11\x85\xd2\xe9\xe6\x1f\x1a\x02\x71\x10\x20\xf1\xbd\x60\x32\x15\xbc\x21\x5d\x4a\x38\x48\x77\x40\x04\xc8\x45\x01\x36\x42\x25\x88\x40\x3b\x20\xa1\x40\x2d\x8a\x0f\x76\x6d\x39\x3a\x24\x80\xe3\xc5\x02\x97\xa8\xfa\x88\x59\xa8\x76\x81\x95\xa8\xe4\x32\xaa\x8d\xd8\x19\x92\x60\xf0\xdb\x15\x48\x94\x5a\x15\xec\x41\x5a\x56\xc0\x96\xb1\xfd\xdd\x3a\x53\xc4\xa0\xa8\xd5\xae\x36\x44\xd4\xaa\x76\x1f\x68\x59\x6d\xba\x8c\xf0\x5f\x40\x09\x45\x29\xd8\xb4\xd8\xc4\x30\x14\x5a\xc4\x1c\x40\x58\x12\x43\x97\x71\xfe\xf7\xd0\x17\x22\x0a\x35\x0a\x6d\x12\x53\x14\x5b\x24\x3e\x84\xb8\x24\x31\x5c\x46\xfb\xbf\x8c\xea\x18\x85\xb0\x4c\xb7\x4d\x38\x45\x91\x45\xf8\x23\x88\x4a\xc2\xe3\x65\xcc\xff\x3b\xdb\x12\xa1\x18\x56\xb6\xc6\x6e\x4e\x8c\x06\x56\x73\x1e\xc3\xa0\x6c\x4e\xb4\x8c\xfc\xff\xb0\x16\x0e\x50\x04\xeb\xda\x68\x37\x72\x56\x63\x73\x3b\x84\x10\xee\xd2\x1e\x77\x07\x48\xbd\xf7\x55\x3d\x48\x5a\x25\xee\x4c\x09\x28\x89\xdc\xad\x96\x01\x56\x94\x42\xcc\x2a\x78\x9b\x17\x84\xb2\xc9\x7b\xf5\xa2\x90\xda\x85\x51\x6a\x95\x9f\x96\xe5\xa1\xec\xc7\xfd\x65\x0c\x40\x6b\x38\x10\xb5\xd0\x0c\x6c\x34\x50\x8e\xd1\xc1\x2a\x44\x10\x2e\xa3\x42\xa1\x85\x2d\xa8\x62\x83\x72\x1a\x3c\x5c\x8d\x0f\xe2\x95\x18\x51\x8c\x17\x53\x2a\x1a\x11\xe9\xa3\x72\x38\xcd\xd0\x0c\x62\x81\xd4\x37\x46\x8e\x84\xa0\x77\x88\x63\x48\x89\xd7\x4d\x0f\x79\x37\x6d\xb5\x30\xbb\x48\x2f\x89\x44\xa2\x95\xe2\x6e\xce\xff\x17\x18\x12\x32\xaa\x60\x2a\xf1\xc8\x12\x0f\x23\x5e\x97\x1d\xf2\x66\x53\xb8\x41\x97\xb5\x5a\x58\x5e\xb0\x4b\x22\x5c\x0a\x82\xa8\x57\xc5\xe6\x37\x62\x7c\x28\xaf\x08\x83\x53\xb5\xb5\xe0\x05\x86\x6d\x82\xea\x15\xe4\x8b\xe5\x82\x5f\x2e\x30\x6c\x6a\x48\x8e\x10\xd2\x8c\x16\x86\x81\x12\xaf\x4b\x0f\x59\x97\xb6\x5a\x38\xbd\xa0\x97\x44\x5e\xd0\xcb\x9c\x82\xf4\x82\x5f\x12\x01\xe9\x02\xc3\xfa\x66\x89\x1c\x6b\xde\x53\xb2\xd5\xc9\xfb\x4a\x96\x7d\x25\x2e\xd2\x02\x2f\xbb\x90\x97\x84\x03\xfb\x7d\x7a\x15\x32\xa6\x91\x09\xa2\x56\x8f\xbc\x48\x2f\x41\x14\x5d\x2f\x7e\x13\x53\xbb\xd3\xf5\x0e\x09\xeb\xb2\x76\xbb\x40\xc4\x6a\x88\x70\xf7\xef\xb4\x75\x45\x4b\x39\x52\x6d\xad\x4c\x8b\xff\xf2\xa8\x68\x8e\xd1\xa2\xa0\x46\xa7\xc0\x9c\xae\xc4\x5c\xcc\x9e\x64\x14\x85\x4c\xb7\xe0\x17\x14\x88\x92\x02\xde\x66\x40\x73\x3a\x5a\x28\x2d\xa8\x3b\x4c\x7b\xf9\xa3\x9f\x62\x0c\x21\xf1\xba\xe1\x21\xeb\x86\xad\x16\xa6\x17\xa1\x1a\xdf\xf0\xb2\xab\x70\x9a\x2f\x69\xfe\xa5\xc5\xd4\x9c\x0a\x8b\xb1\xa7\x2b\x26\x93\x35\x8a\x6b\x3e\x85\x31\x4f\xe2\x11\x73\x47\xf1\x10\xf1\x96\xe3\x37\x9c\xd6\x18\x61\x0c\x62\x81\x4b\xe9\x71\x8c\x72\x78\xe7\x30\xe2\x92\x09\x4e\x47\xc9\x3f\x9d\x92\x6f\x5c\x29\x5e\x2d\xaf\x44\x3c\x6b\x3c\x17\x22\x16\xc8\xb9\x92\x72\x92\xf8\x0f\x1e\x0c\x23\x79\x95\x06\x6e\x18\x8f\x1f\xb0\xd1\xf8\x41\x18\x0b\xf6\x20\x18\xc5\xc1\x83\x8e\xeb\xb9\xde\x83\xab\x88\xcb\xe4\x81\xd3\xe2\x2d\xc7\x1d\xf7\x1d\x8b\x15\x0d\x6b\x53\x04\x18\xb9\xb8\x84\x94\x4c\x74\x37\x7b\xc0\x70\x37\x6d\x36\x91\x24\xcc\x9d\xc4\x13\x84\x71\x57\x7d\x93\x2e\x05\xe9\x06\xfa\xbb\x35\x9c\x05\xd6\x49\xc9\xee\xa3\x01\xea\x78\xde\xa1\xc4\xf9\x74\x72\x27\x69\x72\x85\x4e\x75\xbd\x18\xb6\xbc\x6e\x34\x40\x9c\x10\x22\x32\x08\xf3\xc6\x89\x83\x6b\x16\x4a\x67\x8b\xc8\xbb\x09\x8b\x07\x0d\x3e\x9f\xf3\x74\x34\x52\xfc\xb1\x78\xca\x8b\x38\x79\xc5\x0e\x29\xc0\x9b\xcd\x2b\xb4\x8f\x61\xab\xd3\xcd\xdb\x96\x36\x22\xde\xe0\xee\xf6\xa1\xd7\x6c\x22\x4e\x06\x52\xf1\x32\xa1\xfe\x55\x84\x70\x1c\x0d\xd0\xd6\x04\x71\xbd\x32\xd5\x1f\xd9\xea\xa8\xe6\x65\x54\x75\xba\x39\x79\x9a\xe5\xde\x90\x11\x1a\x62\x38\x22\xab\x07\x7c\x6b\x98\x4d\xdc\x72\x74\x3f\xe4\x13\x77\x65\xeb\xf2\xee\xd1\x3d\xd1\xf3\x7c\x7e\x28\x7a\xed\x8e\xdf\x51\x7d\xb1\xc5\xdd\xed\xec\x3b\x92\xe4\x03\xe2\x8a\xd3\xba\x14\xe3\xf9\x3c\xfb\x1d\x80\x70\x03\x8c\x7b\xd2\x57\xbf\x42\x10\x6e\x88\x75\xbb\xbb\xdc\x0d\x34\x9f\x6e\x36\xb7\xaa\x65\xbb\x9c\xe8\x72\x8a\x63\x17\x43\x28\xe7\x73\x85\xad\xd7\xf1\x85\x1b\xa8\xfa\x3d\xb3\xc1\x3c\x5f\xd3\x4c\xd3\x28\x7c\xe8\x2d\x30\x5c\xaf\x65\xed\x19\x50\x67\xfd\xf2\xf0\x0e\x3f\xe4\x0b\x7d\x2d\x04\xc9\x41\x30\x3c\x5d\x26\x27\xfb\x5a\xf0\xc1\x43\xaf\xd7\x97\xbe\xe8\x8d\xa4\x9f\xc8\x05\x86\x57\xc4\x2b\x87\xe2\xd4\x46\x7d\x4f\x7d\x0e\x81\x2f\x16\xe5\xf4\xfd\x5e\xe5\x45\x39\x04\x84\xbe\xb4\xa0\xce\x0d\x96\x7c\xd9\xdc\x2f\x74\x8f\x4b\x3d\xcb\xb0\xd9\x22\x2e\xe4\x65\xf9\x52\x98\x97\x42\xbd\x2c\x78\xab\x2a\xfd\x85\x8c\xd0\x99\x35\x53\xce\x0c\x62\x35\x4f\x94\x86\xc9\x87\xd6\xb4\x2e\xe6\x49\x4b\x64\x53\x23\xc8\x5f\x89\xae\x61\xf2\x27\x66\x90\xb3\x11\xee\x5a\x9c\x5a\xcd\x06\xf3\x16\x33\xc2\xdc\xa0\x04\xcd\xc7\x5f\xd3\x73\x4c\xee\xb7\x7d\x6f\x51\x12\x74\x52\xe9\xaf\x6d\xbf\x03\x45\x9f\x29\xf8\x37\x64\x84\x4e\x2c\xfa\x3f\x2b\x36\x95\x57\x2b\xc8\x31\x48\xc2\x33\xde\xdb\x95\xed\x76\x17\x0b\x55\xf1\x85\xac\x6e\x5c\x45\xf1\x67\xd5\xe2\x17\x97\x16\xd9\xc2\x70\x0f\xee\x52\xab\xa4\x82\x7b\xbd\x51\x04\xba\xb8\xec\xea\x35\x20\x95\x74\xa2\x66\x3c\x48\x22\x55\x2f\x18\x74\x7a\x77\xd2\x2c\x8d\x96\xec\xec\xb3\x92\x43\x31\x7c\x24\x48\xd6\x30\x2b\xb6\x56\x30\x15\x1b\x77\xb3\xc9\xaa\x15\x80\xee\x66\x9c\x9a\x6a\xee\xca\x6a\x80\x55\xab\x4a\x55\x55\xe1\x52\x3d\x4a\xe8\xcd\x6b\xa2\x4b\x35\x35\x9b\xe9\xaa\xea\x20\x25\xa9\x1b\x60\x6a\x2a\xbd\xad\x56\x0a\x69\xb5\x62\xaa\x2a\x66\x2b\x2a\x56\xd2\x72\x5e\x75\xb8\xba\xea\x66\x93\xae\xaf\x1f\x28\xa1\x6e\x80\x43\x43\xc5\x74\x99\x0a\xa0\x55\x4a\xc2\xda\xea\x97\x50\x2a\x26\x9f\x91\x9a\x14\x6e\x12\x0b\xb9\x8e\xc5\x68\x6e\xae\x59\xf9\x42\xff\x07\x3f\x2b\xfc\xe8\x37\xb1\x91\x99\x82\xb4\xe7\x25\x21\x24\x91\x3d\xcf\x57\x0f\x7d\xa9\x79\xb2\xae\x60\x1d\xa3\xe2\x2d\x25\x83\xc1\xcb\xb5\xdc\x90\xb7\x37\xc8\x09\xfc\x0f\x5d\xfa\xed\xfa\xd2\x0f\x36\x95\x7e\x20\xe6\x9e\xfe\xfc\x96\xca\x2b\x77\x12\xcf\xd6\xcb\x2a\xff\xce\x57\xe1\xf9\x77\xc2\xf3\x96\x7b\x6a\xab\xed\x5d\xa1\x4e\x07\xfb\xde\xa1\x68\x36\xf9\xa1\x37\x9f\x0b\xb5\x7b\x7a\x87\xbc\x27\x5a\xdc\xcf\xa4\x4d\x5d\x19\x95\x94\xef\x60\xcd\x82\x3e\x11\xfd\x26\x64\xd1\x08\xbe\x99\xe7\xc1\x28\x8e\x05\xfc\x30\x3f\x44\x9c\xf2\x3e\x3c\x31\x3f\x46\xf1\xb0\xbb\xae\x39\xcd\xe6\xa6\xc6\xce\xe7\x9b\xbe\x6e\x11\xa2\x84\x2b\x45\xcf\x7b\xb2\x69\xac\xba\x7f\xeb\xe3\xef\xc9\xd2\x66\x57\xa0\x44\xb8\xe1\x15\x15\x4f\xe3\x3e\x3b\x92\x28\xc5\x5d\x7a\xb8\xbf\xbf\xf3\xf8\x60\x3e\xdf\x3f\xd8\xed\x3c\x3e\xa4\x3d\x64\x4b\xdc\xa0\x44\x70\xdf\x7e\xd5\x12\x17\x69\xab\xa3\xbf\x90\x1d\xbc\x28\x44\xa8\xeb\x38\xe2\xc8\x71\xf0\x46\xc5\xe6\xe2\x12\x2a\xb2\xb1\xd1\x41\x0a\xd2\x94\x88\x13\x2e\x51\x98\xb6\x5a\x10\x56\xa9\x0c\xe7\x73\x44\x5b\xa6\x80\xa2\x10\x38\xa2\x58\xb1\x02\xbd\xb2\x69\x41\x96\xb4\xc8\xea\xfe\x2d\xbd\x28\xa7\x49\x1a\x9a\xe4\x6f\xd3\x24\x0b\x9a\x8c\x36\xa4\xf6\xb2\x45\x29\x8d\xeb\xd1\x7f\x47\x7e\x87\x16\xa5\x52\x65\x74\xa4\x44\x2b\x55\xb4\x4a\x07\xc3\x5d\x53\x47\x4a\xe8\x3f\xc9\xfe\xc1\xee\x8e\xd7\x6c\xee\x3f\xdc\xdd\xdb\xfd\x27\xa1\x3d\x79\xd1\x6e\xb3\xcb\x56\xea\xa7\x55\x0a\xe0\xcf\x75\x73\x4f\xb8\xc9\x64\x14\x29\xae\xb5\xc0\xf0\x62\x3d\x94\xee\x53\x0d\xf4\x95\xfc\xa6\xb2\xf4\x1b\x93\xd6\xec\xc3\xf7\x99\xfe\xa4\xb6\xe2\xb4\x3a\x15\x24\xd6\x72\x39\x62\x24\xad\xb7\x36\xed\x89\x8b\x76\x5b\x5e\xb6\x98\x5f\x8a\xc7\xde\x22\x97\x93\xb3\x6e\xe7\x9c\xfc\xcb\xc8\xd8\xfa\x7d\x3a\x3a\x8b\x82\x20\x0c\x82\xaf\x13\xcf\xb7\xfe\x43\xb8\x11\xef\xb3\xdb\xd3\x81\xe9\x5c\xb9\x0e\x54\xf1\xc1\x1a\x2c\x5b\x0b\x9b\x8b\x39\x87\x45\x0b\x95\xf0\x3d\xa2\x89\x7c\x55\x20\x20\xc5\xb7\x76\x0e\xbe\xc0\x90\x2e\xe3\x34\x5d\x55\x48\x4e\xd1\x00\xc9\xc3\x4e\x2e\xec\x1d\x5b\x72\x9d\x07\x5a\x0a\x69\x77\x0e\x11\xb3\x69\xd5\xca\x59\x2e\x7c\x30\x0c\xac\x45\x64\x55\xe4\xd0\x23\x45\xd7\x35\x47\x49\x7a\x1e\xf4\x73\x51\x6f\x1d\xa7\x54\x60\x1d\x60\x25\x58\x69\xc4\x5f\x82\xdb\x85\x81\xcf\x61\x98\x4b\x8e\xf1\xa6\xaa\xf7\xb4\xcc\x7d\xe5\xf3\x45\x46\xe8\x60\x6d\xbf\xeb\x8a\x2e\xc4\xa5\x1a\x9d\x80\xaf\x5f\x27\x19\x1c\x48\x0d\x99\xf0\xd5\xb2\x5d\x1d\x18\xd8\xe5\x7a\xf1\x6c\x19\x18\xd2\xcb\x4d\x42\xd5\xaa\x02\x40\x75\x91\x74\x65\x11\xcb\xdc\x58\x2b\x04\xa1\x2e\x46\xd7\x14\xb3\xec\x8a\x4b\x05\x21\xd6\x45\xe3\xb5\x45\x21\xda\x54\x18\xa2\xcb\x55\x9b\x8f\x14\x77\x79\xa1\xbe\x7a\xf5\xfa\xc3\xe9\x89\x3b\xa1\x22\x61\x5a\x30\x0b\xa9\x0c\xaf\x2c\x7b\xf3\xb6\x44\x33\x34\x96\xe0\x9c\x5f\x45\x49\x23\x4a\x1a\x3c\x96\x8d\x29\x1d\x45\xfd\x86\x2a\xb9\xd5\x70\x5a\xdc\x1d\xb3\x24\xa1\x43\x06\x0a\x81\x92\x8d\x46\x6a\x16\xf4\xb9\x35\xcd\xfa\x59\xed\xc9\x2c\xd2\xf8\xdd\x6d\x7c\x1f\xd2\x84\x35\x76\xfd\xcc\x3e\x10\xc4\xf1\x88\x51\xcb\x3c\x20\x7a\x13\x25\x2b\xfa\x63\x8e\x1c\xda\x78\x72\x7a\xfa\xc6\x51\x42\x9f\x2e\xb5\x93\x97\xe2\xe9\x38\x60\xa2\x54\xd2\x45\x4f\x83\xf3\xc6\xab\x93\x73\x05\xee\x23\x71\x48\xda\x3b\x9d\xbd\x87\x7b\x8f\x76\x0f\xf6\x1e\xce\xe7\xe5\xf3\x21\x11\xf3\x39\xf2\xe6\x02\x2b\x41\x04\x37\x9b\x68\x2b\x4a\x8e\x23\x1e\x49\xd5\x15\xf3\xb9\xf8\xf7\x0e\xae\xa3\xd3\x24\x19\x1a\xf6\x6a\x34\xac\x21\xfc\xf8\xcd\xe9\xd1\x79\x49\xf9\x41\x5e\xaa\xae\x36\xe6\xa5\x44\x23\xe2\x89\xa4\x3c\x54\x2f\x3f\x68\x20\xfd\xa5\xe5\x38\x39\xca\x0f\xe7\x67\xaf\x4e\x5e\x94\x38\x1f\xfb\x39\x6f\xcb\x6c\x2e\xaa\x00\x77\x43\x03\xaf\x5e\x96\xb0\xfb\x39\xac\xd5\x92\x87\xf9\x3b\x2d\x24\xb9\x51\x62\x84\x25\x81\x7b\x53\x6e\x2c\x17\xf0\x39\xaf\xfb\xcd\xab\x0f\x56\x6b\x1e\xfd\xba\xe4\x36\xcf\x8a\xf2\xc6\xd1\xd9\xd9\xd1\xd7\xb2\x70\xc7\xf3\x73\xfe\xd9\x5f\x69\x4f\x12\xa5\x15\x69\x3e\xdf\xca\x35\xf4\x9c\xbd\x66\x48\x4f\x9f\xbc\x7e\xfe\xf4\xbc\x31\x8b\xe4\x55\x83\x36\x06\x11\x1b\xf5\x1b\x9c\x8e\x59\xbf\xf1\x3f\x9d\x96\x6c\x39\xff\x53\x57\x68\xb8\x70\x3f\x23\xea\x42\x96\x16\xce\x98\x21\x86\x7b\xcc\xd7\x13\xfd\x4a\xaa\x15\xa4\x95\x1e\x43\x62\xc7\x57\xe4\x31\xbd\xc3\xd5\xdb\x58\x23\xa4\x6c\x5d\x34\x40\xa2\xd8\x65\x64\x05\xac\xf1\xe6\xf4\xe4\xc5\xf3\xb3\x06\xd5\xb8\x1a\x27\x8c\xf5\x1b\x7a\x33\x68\x68\x62\x1b\x41\x2a\x1b\x31\x1f\xdd\x35\x12\xc6\x1a\x4e\x2b\x47\xd3\x72\x1a\x8c\x4b\x11\xb1\x44\x57\xf0\x1b\x2d\x19\xd6\x5b\xb2\xe3\xff\xb2\x8b\x7f\xd1\x40\xd3\xd3\x45\x77\xa6\x40\x89\xd9\xe2\x52\x33\x30\xba\xd9\x57\x34\x39\x9d\xf1\x77\x22\x9e\x30\x21\xef\x50\x8a\xf1\xbd\x45\x6d\x7a\x69\x84\x05\x4d\x2a\xb6\x59\xcc\x95\x84\x34\xa3\x97\x92\x13\x74\x8a\xcc\x2f\x28\x65\xd7\x89\x44\x27\x12\x95\x0d\xda\xf5\x4b\xdd\x97\xbb\x03\x88\x09\x77\x87\x10\x11\xaf\x1b\x1d\xc6\xc5\x8e\xdc\x6a\x65\x04\xc4\x17\xd1\x65\x36\x38\xd5\xea\x59\x37\x24\x21\x52\x95\x59\x35\x85\x79\x2d\x7b\x7e\x49\x7e\xad\xa7\xf5\xeb\x2b\x5d\x52\xb1\x06\x96\x95\xd8\x2f\xe8\x1a\x90\x63\x08\x14\x55\xdd\xc0\x0d\xba\x01\x09\xdc\x20\x23\x26\x30\x26\x9d\x68\x80\x6a\xa4\x0c\xc8\x89\x42\x08\x83\x82\x18\xd5\x3b\xba\xe5\x03\x5c\x34\xdd\xaf\x31\x67\x63\x21\xca\xbe\x7a\xd6\x0a\xe7\xaa\x55\x96\xa7\x6d\xb3\x65\x7e\xa5\x6f\xe0\xde\xf4\xb0\x61\xe0\xda\x35\x90\xf5\x60\x58\x1d\xc0\xa1\x54\xdb\x8e\x1e\x40\xed\x4e\x08\x5d\x6a\xf5\xa7\x52\x12\x2c\x93\xf6\xb6\xed\xe5\x9c\xa1\x90\x41\x41\xc6\x0a\x9f\xa7\xf1\x3b\x95\xa5\xc7\x55\xc9\xa2\xd8\xa2\x9e\xdf\x4e\x58\x28\x23\x3e\x54\x9b\x92\xde\x8c\x4a\xb3\x3c\x2f\x0c\x76\xcb\x86\x6c\xee\x6e\xab\x1d\xa0\xb0\xe0\x6e\x75\xba\x4b\xfb\x94\xe7\x57\xbb\x9e\xbb\x54\xe1\x71\x69\x37\xdb\xc6\xb2\x7d\x29\xdb\x1a\x32\x5e\x5f\x61\xb9\x5b\x5e\x9d\x5d\xbb\xa1\xc6\x11\xe6\x6c\x38\xe3\xa9\xf9\x82\xcd\xc0\xae\x78\x61\x3e\x2e\x18\x67\x81\xa1\xaf\x31\xf4\x9b\xcd\x65\x28\x8b\x56\xa6\xa1\xd8\x2a\xa8\xdd\x12\x6a\xa0\xa1\x06\xcd\xe6\x50\x41\x0d\x41\xb8\xc3\x72\x19\x14\x50\x57\x1a\xea\x6a\x15\xae\x62\x73\xb1\x10\x58\xd3\x6f\xb8\x5e\x6a\xde\x2a\x45\xee\x72\x10\x2c\xf1\xb9\xcb\x0e\xa5\xf6\x63\xaa\xc9\xa7\x2a\xd6\x3e\xb5\x0b\x76\xb9\xce\xf8\x3f\x59\x2b\x88\x6a\x81\xc7\xec\xbe\xd1\xe0\x0e\x09\x50\x0c\x10\x38\x6e\x39\x8e\x2d\x18\xcf\xec\x19\xc8\x35\xce\xbb\x0d\x62\xab\x34\x7e\x4c\x99\xc9\xc1\xb7\x9c\x28\xb4\x25\xba\x1b\x0b\xdd\xfd\xb6\xef\x01\x55\x42\x73\xf1\xf9\xa8\xfa\xb9\x53\xfb\xfc\xa1\xfa\x79\x07\x02\x9f\x43\xe8\xab\x2a\x8c\x94\xfe\x7c\x83\x94\xbe\xab\xa1\xfb\x5a\xf0\x87\xeb\x0d\x80\x7b\x16\xa0\x6e\xc5\x53\x6e\x9b\xe4\x5f\x69\x22\xb8\xb6\x3c\x03\xf3\x9f\xf2\x56\x2b\x53\x15\x74\x0f\x5e\xf9\x17\x97\x8b\x9c\x43\x1e\x2b\x58\xe0\x65\x0b\x4e\xed\x15\xff\x81\x23\x7b\x99\x73\x74\xc3\xd1\x2b\x05\x80\xb1\xbd\xce\xbf\x67\x04\x72\xf7\xca\x28\x48\x02\x83\x46\xac\x9b\x7c\x5e\x69\x89\x3d\x16\x35\xf4\xdf\xcd\x57\x30\xd5\xe8\x2a\x4c\xf3\xbe\x70\xb2\xd5\x81\x33\xae\x34\xb3\xa2\x52\x5d\x81\xe2\x13\x67\x3c\x33\x64\x63\xd8\xfa\x92\x99\xbb\x55\x09\xaf\xcb\xc9\x19\x77\x93\xab\x68\x20\x11\xee\xe2\x2d\x3b\x70\x43\x3b\x74\x84\x3b\xc8\x34\x66\xae\xd6\x93\xbb\xad\xa6\xb8\x67\xdc\x62\x1d\xf5\x4f\x01\x37\x54\xba\xe7\x50\xb3\x1f\xde\xc5\xc2\x1d\x12\xf5\x33\xd2\x2c\x56\x2d\x1e\xd3\x20\x85\x50\x7f\x08\x90\x42\xa7\xb6\x9b\x1c\x72\xc1\x46\x09\x53\xd4\xea\xf8\x8e\xc2\xcd\xe0\x0e\xdc\x50\x57\x1d\x54\x7a\x42\xe1\xe1\xaa\x0b\xb5\x09\x22\x1a\xa0\x7d\x43\x4d\x46\x9e\x70\xaf\xaa\x6b\x30\xab\x78\xa0\x2b\xbe\xca\x9b\x8c\x75\xa5\x0d\x45\x83\x9a\x5d\xda\x06\xe9\xf9\x1d\xa5\x0a\x2a\x50\x88\x7c\xe1\x0e\x17\x90\x97\xed\x2f\x16\x0b\xc4\x71\x57\xf7\xf6\x62\xb1\x41\x9b\x3b\x51\x03\xc5\x80\xbb\xe1\x13\xf5\xe7\xa1\xfa\xb3\x53\x6e\x08\xcb\x61\x31\xf8\x7e\xb1\xa8\x38\xf0\x4e\x6a\x8a\x9c\xd9\xbb\x66\x68\xc4\x81\x83\xe8\x09\x77\x30\xa2\xc3\xc4\x9f\xc6\x51\xbf\xe1\xe1\xae\xde\xc5\xe6\xf3\x2b\x94\x59\x45\x63\x72\xbf\x80\x88\xa0\x90\x48\xa4\xb7\x32\xb5\x13\x13\x8a\x02\x88\xd4\xa6\xb8\x42\xf7\x07\xa6\xb9\x14\x53\x12\xd0\x1b\x9e\x1b\xa7\xde\x28\xf6\xd4\x4d\x5d\xda\x6c\x22\x24\x89\x9c\xcf\xef\x17\xf8\x82\x5d\x92\xd4\xa5\x48\xab\x49\xa0\x20\x56\x20\x64\xe4\x7e\xa8\x35\x6a\x43\xe2\x02\x52\xc2\xdd\x10\xa8\x92\x91\x41\xc9\x39\x4c\xcb\x39\x83\xc2\x3b\xe5\x5e\x91\x57\x1c\xcd\xd0\x73\x5e\x74\x54\xc3\x8e\x2a\xd2\x5f\x38\xdc\x6f\xfb\xfb\x10\xf8\xf6\x64\x30\xae\x1b\xee\xd2\x8a\x35\xd9\xdd\xee\xdd\x21\x0a\x4c\x33\x37\x3f\x6c\x36\xe3\xde\xad\x0e\xbb\x13\x6e\x04\xc2\xbd\x56\x6f\xef\xf4\x8b\xb0\x27\x5c\x35\xd4\xea\x95\x1a\x06\xe0\x6e\x80\xf1\x02\xd9\xe6\x35\xb9\x40\x31\x04\xd6\x00\x05\xa6\xa9\x6a\x4c\x18\x70\xd5\xad\x03\x14\x29\x59\x01\x04\x86\x9f\x1c\xc5\x10\xba\x01\xa4\x28\xc2\x05\x8e\xea\x5b\xa0\xbd\xfb\x49\x2c\x64\xe2\xd3\x85\x7f\x9f\x39\xb7\x38\xb9\x5f\xe8\x01\xfc\xfc\xbb\x3c\x41\xb8\x43\x54\x67\x09\x6b\xf6\x8b\x19\x3a\xe7\xc0\xdd\x2b\xc8\xd8\xb6\xa8\x4e\xb9\x67\x9b\x03\xb6\x34\x33\xbf\xf1\x05\x8c\x14\x43\x2f\x79\xdb\xeb\x3a\x47\x1f\xab\xef\xaa\x0d\x1f\x37\xb2\x72\xee\x73\x88\x6b\x36\x9c\x9f\xc5\x66\xa4\xa7\x10\xa4\xb9\xa3\x53\x4f\xcb\x97\x1c\x6d\x79\x20\x20\xd5\x1b\x1d\x06\xf5\xbb\x03\xb2\xf8\xcd\xf1\xf7\x6c\x3f\xbd\xdf\xf6\x9d\xc1\xad\x03\xd4\x4f\x2f\xd8\xe5\x7c\x7e\x1f\xf9\xc7\x70\xed\x1f\x57\x02\xcb\x5e\x5a\xeb\x36\x93\x92\x44\x21\x25\x75\x7c\xb3\x00\x84\x7b\x03\x94\x20\x4a\x52\x88\x09\x83\x19\xe2\xbd\x37\xfc\x82\x5e\xba\xcc\x37\xff\x0e\x2a\x72\x5e\xe9\x49\x8c\xbb\x42\x7b\xab\x7e\x60\xc5\x3a\x27\x8a\x6f\x14\x5b\xb0\x92\x11\x4b\x6f\x94\x5a\x20\x48\x5e\xa4\x97\xaa\x1a\x0a\x29\x41\xa9\xb6\x35\x63\x8b\x6e\xe0\xbd\xd4\x8d\xc8\x09\xa2\x90\xba\x11\xf6\x53\xf7\x3a\xfb\x71\x8d\x21\xc5\x85\x31\xa1\x54\x24\x84\x3b\xee\x86\x6e\xa0\x54\x02\x37\xc0\xba\xad\x6a\x72\xaa\xd6\x66\x15\x77\x2b\x66\x0b\x4d\x46\xd6\x27\x6e\x0c\x12\xee\x27\xbe\x70\x39\xfc\xf0\xd9\xc2\x6c\x53\x14\xe2\xb2\xf3\xde\xea\xe6\xbe\xe1\x17\xfc\xb2\xd9\xbc\x42\xbb\x56\xbf\x7e\xaa\xce\x3a\x0d\x09\x1a\x92\xdc\x33\xff\x1b\x07\xe1\x0b\xa0\xfe\x0f\xbe\x80\x67\xc5\x1e\xf8\x6d\xad\x94\x53\x89\x5a\xf9\x51\x2c\x78\x09\x21\xb9\xb8\x84\x98\x68\xcc\xae\x50\xdc\x4e\x12\x0f\x6a\xcb\xc3\x0c\x46\xc2\xe4\x79\x34\x66\x71\x6a\xf1\xec\x7c\xb7\xc6\x78\x01\xb2\x18\x0c\xeb\x73\x38\x62\x54\xe4\xc5\x84\xb6\x07\xe5\x50\xa6\xce\x80\x44\xa6\x5d\x6e\xb8\xc6\xec\xdf\x15\xb9\xcf\x12\x97\x22\x60\x08\x29\x89\x91\xd0\x3a\xa1\x51\x4f\x72\xf9\x91\xea\xa8\x28\x7a\x89\xca\x38\xb9\x68\x81\xe1\x3e\x49\x83\x24\x14\x51\xc0\x2a\x6c\x2f\xcc\x77\xf5\x05\xa4\x7c\x35\x08\xe2\x6a\x0b\x08\x33\x83\x3d\xc6\x96\x69\x19\x1f\x7a\xf3\x79\xa8\xfd\x02\xda\x96\xdf\xc1\x0b\xb3\x6a\x9f\xf0\xee\x1a\xce\xb3\x4a\xa1\xd1\xae\x50\x9c\x8b\x59\xef\x39\x71\x52\xde\x67\x83\x88\xb3\x7e\xa9\x9b\xf7\xe3\x30\x1d\x33\x2e\x7b\xf9\x83\x7f\x6f\x39\xfc\xdf\x15\xc2\x11\x9d\x4c\x18\xef\x3f\xbd\x8a\x46\x7d\xd5\xe1\xab\x36\x58\x46\x98\xcb\xe3\x3e\x2b\xb7\x8d\x09\x15\x8c\xcb\x93\xb8\xcf\x5c\xc1\x26\x23\x1a\x32\x83\x60\x2a\x10\xb7\xb7\xdc\x05\x06\x86\xe1\xbe\xc2\x6f\xfe\x5c\x29\xcb\xaa\x96\xbc\xa8\xcc\x47\xdb\x2c\xfa\x0b\x1f\x98\x67\x8d\xf9\x7d\xce\x43\x68\x97\xb5\x48\xea\x06\xf3\xb9\x07\x99\x2b\x2b\x2d\x3d\x6c\xad\xd2\x49\xa5\x99\x6c\xe8\x87\xd0\xf7\x47\x42\x87\x3f\xfa\x12\x06\x3e\x85\xc0\x67\x5a\x42\x40\xd9\x8e\x0f\x5f\xff\x5b\x08\xfc\x3d\x12\x77\x7e\x8b\x44\xe3\x99\x11\x9b\xa4\xf4\x6b\x9f\xeb\x2d\x25\xf0\x3b\x2d\x24\x74\xe5\xb8\x32\x40\x42\xd4\xca\xec\xab\xed\x07\xc6\xbe\x80\x9b\x5c\xb2\x58\xac\x63\x1c\x02\x5d\x70\x10\x97\x2b\xe4\x2e\x23\x37\x66\x93\x56\x8a\xf5\xca\x50\x86\x03\xe4\x2a\x2c\x79\x64\xf2\x42\x5b\xe0\x05\xd9\x20\x10\x16\x78\x80\xad\xc2\x54\x46\x2c\x6b\x5c\xa9\x20\x68\xa3\x03\xc0\x42\x07\xe9\x2a\x84\x76\x24\xf3\xe2\x37\x1c\x04\x15\x84\x40\x57\xa1\xac\x06\x37\x2f\x7e\xcb\x85\x50\x43\x0b\xe1\x2a\xc4\xf5\x70\xe7\xc5\x6f\xba\x19\x96\x90\x43\xbc\x0a\xfd\x72\xfc\xf3\xa2\xe6\x8c\x18\x40\x00\x09\x8c\xa0\x0f\x53\xd8\x86\x31\x5c\x55\xaa\x58\xfa\xba\xaa\x12\x41\x02\x90\x24\x01\x46\x46\x90\x92\x3e\x50\x32\x85\x90\x6c\x43\x4c\xc6\x10\x91\x2b\x78\x44\x08\x41\x9c\x0c\xf0\xaa\x70\x6b\x88\xd6\x05\x5c\xa3\x28\x5b\x45\x75\x77\xc9\x62\x7d\x50\x88\x92\x7d\xa8\xe7\x58\xf2\x15\x06\xba\x61\x15\x3a\xb4\x63\x03\x43\xb8\x11\x76\xa7\x02\x1b\x6f\x84\xdd\xb5\x61\xbb\xeb\xd6\x98\x06\xdd\x53\xa0\x02\x62\xff\x7e\xa0\x4b\xc8\x45\x85\x0d\x44\xa2\xe4\xd3\x8e\xda\xe7\x26\xd2\x51\xca\x9a\x33\x71\x7c\xbe\x66\xfd\xab\x4e\xd0\xaa\xe0\x76\x6f\x86\x52\x01\x4a\x98\x41\x92\x70\xe0\x24\x61\xda\x77\x19\x63\xc5\xcc\x98\xbb\xad\xd8\x7e\xef\x0e\x0d\x18\xf0\xc3\xdd\x5e\x20\xfc\x44\x40\xc0\x94\x58\xcd\x5c\x8a\xfd\x19\x8a\x58\x66\x83\x5e\x60\xec\x67\xa1\x6f\xc0\x72\x4f\xa1\x80\x60\x5d\x3f\x34\x4e\x11\x37\x5b\xbd\x62\xb7\x0b\x0c\xc9\xda\x1e\x0b\x5f\xab\x49\xe0\x86\xaf\x31\xd0\x23\x5f\xb8\xf4\x08\x68\xaa\xfe\x4d\x2b\x5d\xa1\x79\xae\x25\x67\xde\x2f\xac\x88\xb5\xc2\x20\x45\x81\x11\xe9\x6e\x43\x4a\xa4\xcb\x75\x74\x41\xdc\x55\x83\xb7\x45\x08\xeb\x21\x49\x84\x16\x8b\x91\xfa\x87\xa8\x9d\x51\x0d\x16\x21\x84\x35\x9b\x4e\x38\xa2\x49\xa2\x7e\xa4\xbd\xbe\x40\xd2\x1c\x56\xd0\xe2\x29\xc5\xbe\xf9\x7a\x42\xc7\xac\x80\x10\x06\x42\x68\x88\xc5\x72\x84\x5d\x5f\x54\x64\x7a\xc2\x2f\xc4\x65\x57\xfd\x21\xac\xc7\x5a\x4e\xc3\x69\x49\xdf\x3a\xae\x36\x15\x55\xd3\xda\x76\xae\xdd\x17\xee\x07\x05\xe1\xde\xe8\xe0\xd1\x1b\xc2\x5d\x13\xda\x8c\x73\xcb\x44\x01\xf6\x9e\xbb\xa1\x60\x54\xb2\x73\x76\xab\xc5\x03\x13\xc8\x17\x0d\xd0\x9e\x06\xb3\x2c\xc7\xdc\xbd\xd1\xea\xe9\x75\x57\x7d\x62\xee\x76\x17\x2f\xf9\x17\xd2\x5e\x4a\x2e\x52\x60\xee\xf5\xa5\x9f\x7b\xb9\x95\xf0\xad\x04\x92\x1b\xe3\xda\x26\xf7\xd7\x7e\x0a\x13\x5f\xe4\xc6\x23\x14\x92\xa9\x40\x0c\x94\xfe\xcd\x46\xe3\xef\x6c\xca\xb8\xfc\xae\xc4\x97\xef\x82\x0d\x08\x85\x70\x11\x0d\xd0\xae\x4d\xf5\xb6\x40\x4a\x39\xbe\x42\xdc\x1d\x62\x10\xc0\xdd\x3e\x86\xb0\x5b\x38\x07\x7a\x45\xb3\x9e\x8f\x98\x12\xa5\x4e\x3e\x20\xee\x0e\x40\x3b\xc9\xea\xdf\xb4\xeb\xac\xfb\x84\x37\x9b\x0e\x55\xeb\xc5\x0d\x9b\xcd\xd0\xa5\xfd\xfe\x73\x45\xc8\x9b\x28\x91\x8c\x33\x81\x9c\x70\x14\x85\x37\x0e\x3c\xe1\x28\xc4\x18\x14\x09\x59\xcd\x85\xe1\x32\xd6\x0a\xfb\x0a\x97\xc4\x3b\x8e\x42\x98\x0a\xd4\x51\x8d\xe8\xc5\x17\xd1\xa5\xaf\xfe\x68\x27\x43\x21\xc4\x86\x96\xbd\x5c\x2c\x19\xee\x95\x6a\x27\xed\x68\x98\xae\x62\x49\x6a\x20\x7a\x2b\xed\x14\x84\xbb\x89\xbc\x1b\xb1\x95\xc1\xae\x0b\xc4\x21\xc5\x7e\xb6\xf8\xab\x18\x6c\xbd\x92\xab\x01\x39\x4e\xf4\x2c\xd2\x4f\x6a\x19\x94\xaa\xa6\x2c\xa3\x84\xd8\x25\x84\x44\xa9\x91\x6a\xea\x50\x6d\x62\x0a\xcd\x5f\xf7\x87\xbb\x4d\x08\xa1\x5a\x69\x74\x7f\x10\xda\x0d\x63\x2e\x23\x9e\xb2\x05\x77\x05\x1b\xc7\x53\x56\xed\x68\xa6\x76\xb7\xb0\x34\x96\x44\xa0\x96\xb2\x75\xec\x27\xd7\x57\x06\xee\x0f\x90\xa4\xaf\x59\x07\xf0\xdc\xbd\x22\xb1\xd5\x6b\x90\x12\xed\xe8\x06\x41\xa4\x4b\x81\x92\xb4\x97\x1e\xee\xf6\x84\x4b\x7d\xc5\x44\x7c\x01\x92\x74\xd4\x12\x15\x6e\xe0\xef\x12\x92\x36\x9b\x9a\xa7\x84\x04\xc9\x66\x53\x75\x61\x3c\x79\x27\xe2\x09\x1d\x52\xb3\x95\x01\xda\x59\x02\x4f\xb1\x02\x9d\x08\x3d\x71\x9f\xb1\x01\x4d\x47\x12\x61\x88\x70\x97\x91\xd0\xbd\xee\x9a\xb8\xe1\xe5\x80\x78\x86\x29\x61\x88\xe2\xae\xb6\xaf\x95\x93\xa8\xd0\x74\xe2\x76\xbb\xab\x60\x2e\xe2\x4b\x05\xa6\x74\x94\xc9\x22\x44\x54\x5b\x60\x72\xb9\xc0\xfd\x41\x38\x0c\x16\x48\x00\xc5\xc0\x97\xe7\x2d\x83\x10\x06\xa2\xd9\xbc\x9f\xd0\x24\x89\xa6\xcc\x4f\x54\x9d\x87\x3b\x4a\x32\x51\x8c\x2d\x34\xe6\xbd\xf5\x63\x61\xc0\x72\x31\x52\x4f\x11\x3d\x77\x76\x57\xcd\xbe\x42\x8c\x36\x33\xce\x8a\x23\xea\xb2\x1e\x77\x13\x26\x8f\xa4\x14\x51\x90\x4a\x86\xcc\x09\xb3\xac\x5e\xeb\x35\x5e\x14\xf3\x73\xef\xef\xd5\x01\x29\x61\xee\x40\x73\x9b\x78\xa9\xbe\x93\x0f\x28\x85\xd5\x75\x9a\x4f\x65\xbd\x53\x3a\x4a\x59\xc1\xea\xaf\x58\x78\xc3\xfa\xd9\x4f\x6d\xc8\x23\x24\x55\x6b\x42\x9b\xf8\xf0\x62\x21\xc5\xdd\xfd\x2c\xe2\xfd\x78\xb6\x82\x6d\x48\xc7\x78\x1c\x4e\x35\xab\x74\x8d\xda\x57\x38\x4c\xef\x17\xe0\x64\x03\xe3\xc0\xfd\x90\x49\xdf\x12\x9b\x06\x82\x6c\x79\x4a\x34\x29\xc3\x34\x2c\xaf\x58\x65\x0b\xb8\x28\x02\xd8\x87\x19\xef\x00\x0f\xdb\x07\x9c\xaf\x84\xad\x1e\xde\x6f\xfb\x02\x84\x2f\x21\xf1\x19\xc8\x4c\x47\x80\x34\x57\x16\x0a\x23\x4c\x19\xa8\x64\xb9\x75\x44\xe5\x28\x89\x0e\xeb\xcc\x19\x13\x57\x62\x82\x92\x26\xd4\x62\x4c\xb7\x08\x31\xac\xa0\xb3\xa5\x7b\x6c\x47\xbf\xb0\x2d\x29\x57\x6a\xef\xf4\x80\x69\x67\x2b\x59\x6d\x23\x52\x3c\xf5\x37\xcf\x49\xa9\xed\xb5\x38\xed\x96\x69\x89\xdc\x55\x4a\x18\x77\xfb\xc0\x7c\x06\x03\x5f\xed\x03\x81\xcf\xdd\x60\xb1\x50\x8c\x81\x92\xce\x22\xb3\x6b\xd1\xcc\xaa\xb5\x5f\xf1\x34\x8f\x20\x56\x95\x43\x44\xc2\xc2\x67\x49\x22\x42\x48\xc1\xe1\x07\xcd\x66\xa4\x56\xea\x80\x84\x17\x91\x9a\x1c\x8a\xb7\xab\x0e\x18\xd8\x6d\x45\x42\x6f\xc4\x37\xb8\xab\x1e\x84\xda\x91\xf5\x86\x15\xd4\xc6\xce\xbd\x01\xe1\xde\x40\xa0\xc6\x4f\x97\xf3\x0e\x83\x22\x72\x4d\xf7\x57\x07\x18\x04\xb8\x08\x4b\xc9\x89\x4d\xd4\xde\x0c\x23\x22\xdc\x6b\xe8\x93\xad\x0e\x4c\x55\x75\x7a\xb3\x9e\xaa\xcd\xba\x4f\xb6\x3c\x58\xda\xb1\x93\x5e\x42\x2e\x12\x98\xaa\x1d\x3b\xc9\x22\xc3\xd5\x8e\x3d\x25\x53\xf7\xa6\xd8\xd9\xb6\x89\xc8\x50\x6d\xaf\x47\x35\xea\x8d\xc8\xc5\x08\xb6\x15\xaa\x91\x41\xb5\xad\x50\x6d\x93\x6d\xf7\x26\x6f\x62\xbf\xd9\x4c\xb2\xe6\x6c\x11\x32\xca\x1e\x7b\xf5\xd9\xe0\x23\xd4\x5f\xb7\xec\x89\xd7\x95\x87\xe5\xf1\x05\xe3\x25\xe4\x17\xf2\x52\xcd\xc4\x0b\x79\xb9\xc2\x45\x88\x12\x18\x61\x3f\x21\x84\x8c\xf0\x7c\xae\xeb\xd9\x01\x06\x23\xd3\xc5\xaa\xdf\x95\xda\x22\x81\xb5\x3a\x4b\x7e\x75\x3d\x08\xdc\xa5\xda\x67\x49\xb3\x31\xd8\xd5\x26\x72\xba\xe4\xa2\xd7\xe8\x26\xc5\x0a\x81\xbb\x7a\x08\xd3\x12\xc4\x6d\x0e\xb1\xeb\x6b\xdf\xf4\x95\xae\xe7\x6a\xed\x32\x09\xc8\x4c\xcd\x92\x3e\x08\x25\x7a\x04\x19\x3d\x7b\x7a\x4e\x74\x03\x22\xdc\xa8\x74\xe6\xda\x2d\xc8\x21\xf7\xcd\xec\xb1\x8d\xd3\x16\x35\x69\xb6\xa5\x77\x0b\x67\xb5\xda\xdb\x32\x4f\x71\x0f\x21\x6a\xd7\x8e\xad\xca\xa9\x52\x74\x73\xbe\x80\x7d\x8b\x62\xeb\x90\xb2\xa8\x09\x18\xb9\xa2\x56\x86\x95\x70\xac\xc4\x9a\x2d\xbd\xb5\x2a\xf1\x24\x7f\xda\x2d\x9e\xf6\xf4\x53\xcf\x04\xa1\xf4\x50\x4c\xf8\x45\x7a\x89\x95\xe6\x68\x22\xa8\x71\xb3\x99\xf1\xef\xac\x44\xce\xbf\x0d\x0f\xca\x64\x1e\xd9\x6c\x22\x14\x92\x18\x2b\xe1\x04\xc5\x84\x62\x77\x5b\xbb\xb8\x43\x97\x42\x9c\x9d\xe4\x42\x8c\x30\xe3\xcb\x31\x72\x7d\xe5\xb7\xec\x65\x02\x98\xec\x39\x4e\x2e\x4a\x49\x55\xc1\xae\x79\x6b\x78\xa9\xd6\xd5\x14\x5b\x1a\x40\x9c\xb3\x57\x7f\xf9\x10\xd1\x45\x7a\xa9\xd0\xa8\x9d\xc2\xcf\x3a\x39\x3f\xf5\xa6\x6a\x84\x54\x75\x76\x9d\x20\xdd\x6d\x51\x16\x8d\xa3\x7b\xaf\x42\x74\xa4\x18\x62\x64\x1f\x1b\x2d\x0f\x7f\x5b\x3c\x3c\xe7\xdc\x4c\x73\x6e\x06\x9c\xa4\x39\xa3\x13\x84\xe6\xcb\x4c\x1c\xf2\x9e\x1e\xd4\x03\x60\x70\x3f\xf5\x05\x44\xbe\x3e\x37\xe1\xf3\x43\x91\xcd\x83\x87\xe6\x13\x07\xe6\xd3\x45\x29\x16\x87\x84\x1f\x8a\x9e\xd6\x5c\x89\xd7\x8d\x0f\xc3\x6e\x9c\x07\x9a\x44\x24\xbd\x88\x2f\xbb\x43\x81\x22\xa0\x17\xf1\x25\x48\x68\xb5\x4c\x5c\x6c\xa4\x0d\x5d\xd6\x2c\xbd\x15\xab\xcf\xfa\x00\x25\xf7\x8b\xdc\xce\x6d\x04\x70\xd5\x8c\x41\xc1\xa0\x21\x20\x51\xfe\x98\x10\x0f\x46\xc4\x83\x3e\x61\xdd\xe4\x70\xd0\x6c\x8e\x0e\x83\xcc\x79\x3b\x85\x6d\x82\xa6\x24\xbe\x48\x2e\xb1\x4b\x61\x4c\xd0\x35\x89\x2e\x46\xfa\xc7\x15\x99\xba\x01\x0c\xc9\xb5\x1b\x28\xc6\xbe\xbd\x45\xc8\xd8\x94\x9a\xc0\x0c\xee\xe0\x16\x6e\xe0\x08\x3e\xa8\xc2\xad\xce\x25\x3c\x57\x05\x5b\x1d\xbd\x09\x7c\x68\x36\xd1\x8c\x7c\x70\x03\xb8\x23\x63\x35\x4d\x27\xe4\x83\x9a\x5f\xf0\xbc\xd9\x44\x37\xe4\xb9\x1b\xc0\x11\x51\x12\x32\xba\x25\xcf\xf5\x87\xa3\x66\xf3\x0e\x0f\x05\xba\x82\x1b\x48\xa1\xd5\xea\x63\x38\x12\x3a\xd7\xc4\x36\x0c\x61\xa4\x44\xb2\x7e\x8b\x5c\x19\x2b\xe4\x87\xfc\xcb\xcc\x40\xf6\x5b\x64\x66\xbe\x24\x2d\xb2\x03\xa3\x16\xd9\x31\xf2\x65\x34\x40\x47\xb8\xdf\x6a\xe5\xb8\xc6\x39\xae\xa2\xa6\xbe\x8d\x37\x69\x91\x4e\xb5\xf4\x1d\x2e\xea\xba\x2a\xea\xca\xa0\x87\x02\xcd\x60\x98\x53\xbb\x4c\x43\xa7\x9b\x3b\xae\xb7\x3e\xcc\xe7\x93\x2d\x42\x6e\x71\x20\x18\xbd\xe9\xd6\x71\xd6\xa9\xab\xd5\x71\xb3\xbe\x8e\x9d\x85\x91\x64\x75\x7b\x6c\x5a\x8a\x16\xb5\x60\xd4\x6a\x2d\xb4\xcb\x21\x39\x1c\x74\xf3\xf6\x58\x83\x6e\xc6\x79\xb9\xa0\x39\xb9\x59\xce\x95\x6b\x78\x4a\x9e\xce\xe7\x17\x97\xdd\x8c\x5e\x6b\xae\x5c\xbb\x01\x64\x02\xd5\x53\xac\x6b\x44\xde\x61\xbe\xa4\xe6\x73\xef\x30\x2c\x9e\x9f\xe6\x1c\xf4\x91\x5a\x39\x33\x3f\x85\x5b\x3f\x84\x3b\xff\x69\xe6\x4c\xba\x11\xc4\xf9\xce\x46\xe3\xcf\x07\x4f\xde\x58\x39\x6d\x8e\xc4\x2a\xb7\xb7\x3e\xda\xa8\x7a\x38\xcc\xf7\x8e\xec\xbc\xd9\xbd\xf0\x53\x38\xf2\x43\x72\x1f\xfa\x1e\xfc\xf4\x19\xa8\x17\x49\x61\x19\xce\xe4\x0c\x55\x9e\x84\x5a\x8d\x52\x7a\x6a\xe8\x86\xf8\xbe\x86\x61\x81\x21\x74\x43\xb2\x93\x79\xcf\x2b\x82\x4b\xe8\xfe\x04\x06\x31\x84\xae\x50\x50\x82\xa4\x06\x6d\xe8\x26\x6e\x42\xee\x67\x7e\x6c\x30\x2c\x72\xea\x5b\x37\x22\x37\x93\x96\x81\x2f\xcb\x3b\x51\xd1\x2e\x5a\x04\x2e\x50\x43\x5a\x4e\x48\x58\x25\x84\x01\x75\x7f\x42\x08\x69\xb6\xb9\x5f\x09\x24\xe0\x31\xa4\xaa\x83\x43\x38\x52\xac\x69\xf1\xa1\x4a\x83\x09\x71\xb8\x17\xa4\x80\xcd\xad\xf3\xba\x53\xee\x43\xbf\x63\xf5\x9b\xb0\xf7\xce\xe7\x16\x57\xda\xb2\x5c\xf2\x35\x6b\xac\x61\x76\x4a\x48\x35\xd6\x30\x12\xb9\xc2\x4c\x2a\xb5\xb7\xd2\x6c\x5a\x05\x90\x90\xc8\x08\xcf\x6a\x08\x92\xde\x73\x45\x8f\x74\x6f\x20\x72\x13\x88\xb1\xff\x48\xbf\x45\x91\x2b\x89\x80\xc8\x4d\x49\x0c\xde\x21\x52\xfc\x2d\x71\x67\xb8\x10\x13\x4d\xf5\x01\x78\x59\xf5\xd8\x7f\xbc\x5c\x10\xa9\xba\x12\xb5\xad\x24\xee\x91\x9b\x10\x61\x50\x6d\x46\x84\x7d\x1b\x07\x86\x2d\xa4\x5a\xd5\x6a\x69\x47\x2c\xd2\xcd\xc2\xff\x2c\x26\x60\xaa\x67\xf1\x88\x48\xd3\xa6\x3d\x2d\x7f\x15\xcc\xbb\x4f\x64\x26\x54\xf6\x8d\x50\xd9\x2f\xc5\x44\x55\x71\xdf\x74\x60\xab\x03\x21\x88\x15\xa6\x21\xb3\x46\xa6\x44\xba\xac\x9b\x0b\xa9\xe1\x55\x34\xea\x9f\xc4\x7d\x96\x14\xdb\xcf\x98\x78\xdd\xf1\xe1\x34\xdf\xc8\xc6\xf9\xde\x73\xa5\x34\x7f\x32\xea\x4d\x2f\xc6\x97\xbe\xfa\xa3\x39\x7c\xab\x45\x5b\xc8\x2c\x7c\xbd\x14\xe8\x21\x19\x34\x9b\x83\x43\x32\x6c\x36\x51\x4a\x38\xda\xbe\x18\x5f\xc2\x55\x36\xb6\x43\x28\xfa\xa0\xd6\x03\x45\x17\x74\x29\x19\x2e\x8a\xfe\xc8\x75\x33\xf0\x40\xb8\x01\xd8\x79\x55\xae\xc5\x92\x43\x44\x9b\xea\x72\x11\x99\xfb\xc8\x9a\x6c\xf0\x54\x3d\x4b\x3b\xe8\xf1\xa9\x58\x25\x28\x0b\x5b\x50\xae\xeb\xca\x12\x58\x2d\x2a\x65\xc9\xc7\x5f\xc8\xc1\x2b\x8d\x44\x84\x5b\xae\x45\x90\x64\xaa\x27\x2c\xee\xca\x15\xe3\x35\x9f\xa3\x55\xaf\x8d\x95\xa9\x3e\xb6\x5d\xd6\x6c\xca\x2d\x42\x78\xb3\x59\x73\x59\x4a\xe0\xd6\xe9\x68\xed\x89\x4f\x40\xb8\x69\x2d\x4e\x3f\x33\xa2\xb9\xa9\xfa\x8e\xa1\xee\xc8\xe7\x39\xd2\x67\x54\x52\xe4\x01\x2f\x64\x1e\x0b\xba\x10\xeb\x4d\xd7\xba\x49\x5d\x98\x5f\x45\x7a\x6f\xd5\x4b\xf7\x9a\x08\x37\xf1\x57\x7d\x22\xf7\xd7\xbe\x6a\xc2\xc4\x17\x6e\xba\xc8\xab\x3e\xf0\xed\xb3\x59\x49\x96\xe1\x45\xba\x91\x0e\x8b\xcc\x2d\x18\xa6\x47\xb8\x35\xed\x2f\xa4\x3b\x2d\x45\x3e\x9e\x87\x9a\x96\x02\x13\xd2\xf8\xb0\x16\xf7\x2a\x05\x19\x51\x45\xbb\xac\xd8\xab\xb2\x9a\x22\x9e\x30\x21\x9f\xb0\x41\x2c\x18\x9a\x0a\x94\xea\x58\x4c\x37\xc5\x40\xeb\xf5\x3c\x56\x2a\xcc\x56\x56\x03\x2e\x8d\x08\xb6\xf3\xd9\x22\x5b\xf5\xb3\x61\xe0\xd2\x3d\xb2\x95\x95\x86\xb7\xa5\x36\x20\xa1\xed\x6a\x6b\x0b\x87\x6e\x42\xcc\x2a\x70\x67\xc5\x90\x3d\x5a\x35\x5d\x73\x2b\x89\xe9\xc8\xea\x97\x68\x60\x9b\x1b\x24\x29\xcc\xc3\xcf\x32\x37\xfc\xb1\xa0\x43\x6d\x27\x2e\xd2\xec\xd8\xfd\x93\x8b\xcf\x17\xec\xd2\x3d\xea\xbe\xe3\x4a\xb3\x24\x84\xa4\x6e\xd8\x4b\xdd\xc4\x57\xfd\xe5\xfe\xd4\xdd\x65\x45\x49\x2d\x90\x74\xef\x4c\x3e\x80\xa2\x01\x65\x52\x0c\x22\xdd\xdb\x2c\xe8\x21\xb5\x83\x1e\xb2\x7d\x3e\xbd\xa0\x4a\xd2\x0d\xdd\x23\x88\xc9\x8e\x36\x44\x84\xbd\xd8\xd4\x15\x67\x75\x75\x6b\xc3\x16\x43\x65\xa8\x43\x57\x5c\xe2\x05\x6b\x36\x75\x54\x01\xb3\x82\x6e\x4c\xca\x86\xea\xd1\x11\xe1\x26\x88\xe3\x6e\xdf\x58\x36\xfd\x2b\xd4\xf1\xf0\x62\x81\x52\x9d\x89\x84\xe8\x25\x8a\x38\x61\x45\xfb\xac\x30\xd0\x57\x22\x8b\xae\x34\xb9\xa5\xd4\x94\x3f\xbf\x9b\xb0\x7c\x6a\xfc\xc9\x11\x77\x25\xbb\x95\x4f\x63\x2e\x19\x37\x47\x0b\x3b\x5b\x6b\x40\x1d\xa7\xec\xa4\x3c\x87\x01\xcd\x4d\x78\x09\xd4\x4f\x8e\x5a\x07\x47\x05\x39\x41\x33\x14\x0b\x48\x5d\x4e\xc7\x0c\x52\x57\x6b\x88\xda\x23\x52\x1e\xe6\xe7\xae\xa4\xc3\x13\x3a\x66\xae\x8c\xdf\xc4\x33\x26\x9e\xd2\x84\x21\x0c\x21\x39\xd6\x9a\x45\xd9\x81\xc0\x4a\xeb\x8f\xae\x2b\x24\x27\xe8\x95\x40\xf1\x05\xbb\xc4\x10\x16\xfd\x79\x87\x5e\xe8\x13\xb0\x10\x56\xe2\x34\x04\x70\x90\x96\x03\x58\x87\x31\xea\x14\x21\x4f\xd4\x9f\x87\xea\x8f\x15\x08\xa9\x0f\xca\xe7\xd1\xfb\xe1\x63\x48\x49\xa8\xbb\x07\x28\x79\x25\xac\x28\x98\xf3\x4a\x2c\x47\x61\x26\x67\x7a\xa1\x91\xb1\x12\x54\x85\x1a\xb1\x6b\x81\xd4\xde\xa5\x76\x0f\xa5\xf2\x2d\x4a\x27\xff\x69\xc5\x3d\x6f\xc8\x64\x55\x32\xd9\x5a\x32\x19\x70\x2b\x54\x62\xa6\x4d\xe3\x33\x73\xbc\x40\x93\x1d\xaa\xa5\x25\x23\x39\x62\x10\xab\xc7\x20\xee\xdf\x41\xa4\x9a\x10\xaf\x6f\xc2\x13\x4e\xd2\xae\x69\x07\xd5\xe9\x04\xc8\x0b\x8e\x1c\x55\xd4\xc1\xe8\x18\x23\xe1\x06\x8f\xb2\xd6\x45\x6a\xdf\x89\x55\xeb\x62\x88\x40\x4d\x6b\x88\x88\x84\x27\x9c\x78\x10\x6a\xa3\x4a\xb8\xd7\x6c\xa2\x9c\x08\xa2\x4f\x5a\xef\x61\xd3\x7c\xf8\x2e\x56\x06\xe5\x08\xf6\x23\x65\x89\x3c\xe2\xd1\x58\x7b\x00\x8e\x05\x1d\xb3\xde\xca\xb7\x95\x98\x22\x2b\x96\x8a\x43\x87\xed\x3e\x38\xf0\xb0\x15\xcd\x73\x2e\x90\xb1\xc4\x22\x99\x9d\xae\xb1\x23\xaf\x29\xc2\xf7\xa9\x96\x4e\xd2\x9e\xe7\xa3\xef\x02\x51\x0c\xda\xd3\xda\x29\x16\x59\xed\x94\x1f\xe1\x20\x7a\x48\xc3\x68\xf6\xa3\x85\x95\x8e\x12\xd7\x3c\xf3\xd3\x20\x49\xc9\x8e\x1d\xa1\xff\x45\x94\xce\xf9\xe7\x4c\x75\xcd\x28\x0e\x75\x8b\xdc\x2b\xb5\x09\xbb\x74\x3e\xbf\x42\x1d\xbc\x58\x1b\x2b\x49\x63\xb8\x66\x95\x70\x32\x7c\x2f\x9a\xcd\xab\x28\x91\xb1\xb8\x73\x87\x31\x12\x18\x38\x32\x19\x20\x74\x4b\xcf\xd6\x7a\x81\x57\x63\xcb\x51\x29\x45\xe4\x83\xa4\x92\x69\x9b\xb9\x03\x16\x5e\x38\x16\x6b\xb3\x33\x6c\x46\x9a\xc9\x00\xeb\xf0\xde\xd7\xcd\xf9\xb6\x75\x7e\x01\x2b\x3c\x26\x7e\x35\x58\x19\x4e\x56\x4f\x2b\xe3\x2d\xe8\x99\x7f\xfc\x63\x61\xfb\xfd\xab\x41\x2c\xa7\x1c\xd5\x82\xf5\xec\xd4\xa6\xf8\xfe\x15\x47\xa9\x0e\x2e\x2b\xf3\x9b\x2e\x7b\x80\x44\xd5\x03\xa4\x4f\x62\x5b\x84\xca\x35\xbe\x1f\x13\xce\xb7\x2a\x86\x22\x3b\x1f\x64\x9f\x90\x12\xb8\x77\x2b\xb5\x17\xdf\xbf\x91\xb6\x2f\xfe\x4d\x36\xd7\x57\x44\xe6\x0a\x7c\xff\x5d\xd8\xe3\x62\x82\xe8\xf3\x00\x38\x77\xc8\x64\xe6\xa4\x7d\x72\xf7\xaa\xaf\xd6\x8a\x40\xbc\x77\xc3\x91\xe2\x69\xd8\x3f\xe2\x68\xa4\x1d\x7e\x66\x11\xeb\x88\x60\x51\x8d\x08\x2e\x83\xf0\xde\xd4\x59\x4b\xbe\x5b\x5d\x88\x4b\x84\xe1\xd5\xa6\x98\x60\x49\x96\x83\x59\x4e\x84\x9b\x84\x22\x1e\x8d\x34\x24\xbc\x5a\xd4\x83\x2a\xab\x2d\xd3\x61\x94\x12\x61\xeb\x3c\x82\xdc\x10\xef\xb1\x9e\xdc\xac\xd6\x37\x6c\xa0\xd4\xb0\xfc\xe7\x79\x3c\x21\x32\x6b\x84\xc2\xfd\x4c\x90\x5f\xe5\xae\x29\x24\xde\x90\x88\x16\x3d\x2c\xad\x7e\x31\xf1\xba\x61\xb3\x19\x1f\x52\xb3\x89\x46\x4a\x9a\x29\x13\x00\x28\xf5\x9e\xf0\x8b\xb8\xd5\xd2\x8e\xb0\x0b\xd1\x6a\x5d\x36\x9b\xa8\xe3\x11\x12\xf5\x90\x6c\xb5\x80\x91\x0e\xf6\x11\x6b\xb5\x40\x67\x88\x20\x04\x1d\xec\xee\x3d\x7a\xd4\x8c\x70\xaf\x56\xce\xef\x94\xfe\xef\xef\x28\xec\x09\xbf\xdd\xc9\x22\xbc\xe0\xf5\xa6\x88\xb3\xc3\x42\x2d\xaa\x56\x21\xab\x94\xe2\x1e\x47\xd2\x4d\xd2\x20\x91\x4a\x31\xd9\xc1\xb8\x27\x5a\x3b\x7e\xbb\xe3\x73\x24\x2f\xc4\x25\xee\x39\x7f\x71\x6d\xae\xbd\x10\x97\xbd\xf6\x8e\x2f\x5a\x1d\xf5\xb5\xdd\x59\x60\xf8\x28\x36\xa5\x77\xa8\xd4\xa3\xa4\x9b\x05\x86\x9f\x62\x65\x86\x85\x2e\x2f\xb5\x30\x9e\x0b\x72\xb2\x9a\x56\xc1\xf8\xaf\xe5\xe1\xde\xa3\xf9\x7c\xff\x61\x99\x9c\x8d\x97\x52\x15\x86\x97\x62\x63\xe6\x0c\xaf\x5b\xf6\x4b\x57\x94\xc2\x69\x8d\xd8\xf6\xde\x23\xed\x9e\x3b\xf4\xe6\x73\x7e\x48\xd2\xcc\x12\xc7\x08\xff\x83\xb5\xd2\x45\x11\x93\x23\xcc\x38\xbc\x15\x1b\xd2\x46\x78\x2b\xdb\xc6\x56\xb5\x6d\xef\xd1\x3f\xd9\x7c\xce\xfe\xb9\xff\x10\x47\x03\x74\xb0\x6f\x7e\x3d\xf4\xb4\x7c\xc8\x0e\x1f\x3f\x9c\xcf\x3b\xde\xce\x21\xcb\xc8\x91\xa4\x73\xf0\x87\x6c\xb1\xf6\xa3\x87\xc6\xae\x57\xbc\xd8\xdf\xef\x56\x5f\xec\x3d\x2a\x89\xe6\x3a\xd4\xb0\xfb\xab\xc9\x9f\x5a\x39\x19\xf4\x84\xa6\x87\x5e\x2f\x5f\x01\x3e\x6d\xf1\xd2\xee\x1d\x66\xc6\x99\xb8\xb6\x0c\x5a\x2d\xdc\x55\x93\x3e\xee\x21\x46\x3a\x20\x4d\xa6\x98\xa5\x49\x1f\xe3\x66\x53\xc1\x2e\x8a\x69\x4e\xb3\x19\x6e\x32\xf3\x54\x7a\xd7\x8e\x36\xac\x31\x4a\x13\xd7\xc1\x09\x67\xb3\xc6\x97\xb7\x6f\x5e\x4a\x39\x39\x33\x62\x88\x1a\x39\x38\x1a\x20\x49\x28\x56\xda\xf2\xb2\x0f\x7a\x22\xe2\xa1\x60\x49\xe2\x54\x38\x4a\xde\xc6\xa7\xf1\x78\x92\x4a\x1a\x8c\x58\xb3\xf9\x4a\xad\x17\x8a\xee\x43\xea\x2b\x61\x80\xf6\x59\x1f\xc2\xc0\xe7\xae\x8c\x25\x1d\x99\xdd\x60\x45\x90\x81\xc3\x84\x88\x85\x53\x89\xf9\x43\x47\x1c\xdd\x0e\xd6\x96\x90\x46\x3c\x5a\x2e\x73\xb3\xbe\x8c\x22\xa8\x56\x60\x95\x9a\xb7\xe2\x58\x46\xf8\x48\x7b\xfd\x93\x49\xcc\x13\xf6\xf1\xec\x0d\x04\xef\xfd\xfb\x70\xe0\x73\x37\x91\x54\xa6\x09\x84\xaf\x8b\xe7\x73\x76\x2b\x17\x10\xde\xae\x38\x3e\x33\x8a\x4d\xf2\x93\x32\xc1\x5b\xb9\x14\x78\x96\x3e\xc6\xf9\x4b\xfc\xc5\x1d\x0c\xab\x33\xd9\x00\x85\xd0\x28\x25\x4a\x85\xcb\x27\xa1\xe3\x37\x1c\xdc\xf5\x0e\x63\x2d\xb7\x85\x19\xc7\x8a\xf8\x10\x79\x10\x2b\x09\xda\x7e\xb5\xd3\x8a\x31\x08\x72\x87\xce\x07\x76\xee\xef\x72\x8f\xb8\x95\xe8\x48\x69\xb5\x3d\xda\x72\x40\x67\x6c\xa0\x3e\xc5\x0b\x7d\x44\xb6\x08\x3d\x43\x5c\x6d\xaf\x47\xa3\xd1\x59\xd6\x2b\x2f\x19\xed\x33\x91\x20\x8c\x21\xb0\x7b\xcb\x1c\xe7\xd2\xbe\x49\xd3\x3f\x87\x3b\x9e\x37\x9f\xef\x7a\xde\x21\xc9\x5f\xe1\x82\x2d\x2a\xd1\x9c\x94\x85\x55\x5f\xc2\x11\x47\xb3\x81\xda\xa7\xbb\x82\x08\x24\x6b\x52\xc3\x8d\x89\xfd\xf3\xd1\xfa\xc2\x68\x32\x30\xb9\xca\xd4\xf6\x89\x38\xa4\x6e\x38\x71\xa9\x16\x29\xa5\xb8\xbb\xe7\x6e\x3c\x61\x1c\xa5\x6e\xf8\xa7\xfa\xf4\x08\xb6\xbc\xe5\xcc\x16\x7a\x6e\xdd\x0d\x14\xd4\x23\x85\x66\x6b\x7d\x26\x9c\xf0\xb6\x2b\xdd\xa0\x6b\x12\xd2\xe9\x20\x92\x6c\xa5\x99\x2e\x52\x2b\xc4\x24\x4e\x73\x03\xa5\x69\x17\xe4\xde\x4d\x34\xe3\x9b\xb8\x01\x70\x77\x16\xc9\xab\xa7\x82\xf5\x19\x97\x11\x1d\x25\x44\xb8\xfd\x40\xad\x52\xe1\x86\xbb\x58\x69\xcc\x6e\xb6\x02\x54\x91\x5d\x97\x9a\x86\xe5\xa9\x0b\x52\x37\x78\x54\x46\x61\x24\x8c\xf7\xd1\x87\x01\x62\xb8\x87\x56\xd0\xe3\x64\xea\x73\x5b\x51\xe0\x98\x83\xf4\xcc\x0d\xb0\xaf\x9f\x6c\x21\xc4\xa5\x41\x2c\x24\xc2\x8b\xba\xb4\x53\x0d\x35\xf5\x20\xf0\xa5\x1b\x00\xad\x2c\x01\x4e\x14\x6b\x28\x8d\x6f\x33\xf4\x7c\x50\xa4\xa7\xd7\x74\x7f\x13\x6b\x52\xe6\x38\x43\xa7\xcb\xdd\xf0\x6d\xb3\x89\x64\x8b\x38\x63\x47\xad\xef\x30\xcc\x7e\x46\x8e\x19\xc7\x72\xfa\x9e\xb1\xe1\xf3\xdb\x89\xa6\x6a\x79\x24\x6f\xa4\x92\xac\x7f\xac\x97\xf9\x79\x3a\x1a\x69\x85\x6f\x9c\x95\xdc\x9c\xad\x16\x32\x2f\xa2\xa7\x56\xa6\x09\x35\xc9\xd3\x03\x41\x4c\xda\x9d\x2e\x6d\xb5\x0e\x79\xb3\xa9\xc3\x61\xd9\x2d\x0b\x51\x88\x71\xb3\x19\x6f\xd9\x90\xdd\x12\x61\x54\x44\x70\xb5\x3b\x30\xc8\xc2\x55\x22\xb5\xbc\xa3\xdc\xce\x4f\xd8\x45\x74\xd9\x1d\x5c\xb4\xdb\xd1\x25\x09\x94\xe0\x1c\x68\xb1\x39\xcd\x33\x04\x5e\x07\xc0\x2e\xbc\x4b\x60\x86\x45\x00\x85\xcf\x3a\x23\x80\x09\x46\xc9\x2b\x2d\x56\x73\xf9\x8a\x48\xc8\x72\x17\x4a\x7b\x5b\x31\x89\xb8\x32\x0b\x92\x57\xa4\xd2\xcb\x15\xa0\x9a\xac\x19\x0d\x50\xd8\x6a\xfd\x93\xa4\x85\x14\x62\x19\x5e\xa8\x18\x6a\xe9\x3c\x0f\xe3\x68\xef\x42\x9e\x89\x5a\xa8\x46\x8a\x22\x7d\x54\x01\x79\x21\x2e\xbb\xf2\xa2\xdd\xd6\xe1\xac\xb7\x12\x31\xdd\xd8\x22\xcd\xbf\x6e\x2e\x87\x12\x7c\xa9\x8a\x9d\x4b\x08\xe1\x73\x21\xea\x63\x78\xb2\x59\x12\x92\xf9\x98\xda\x63\x59\x19\xd9\x2e\x2f\x78\x72\x98\x0f\x2b\xc3\x99\xff\xcc\x48\x20\x45\x00\xab\x39\x18\x44\x21\xe3\xd7\x18\x57\x11\x17\xd6\xfd\x1a\x3c\xc6\xb0\x72\x60\xde\xaf\xd5\x80\x79\x73\x53\xf6\xbb\x8d\xc9\xf5\xfe\xc7\x86\x92\xe2\xf0\x90\x2f\x70\xf7\x78\x6d\xbd\xe2\x9f\xff\xe4\x1b\x92\x28\xff\x53\x7f\xee\x56\x72\x37\xb2\x75\x47\x2d\x8b\xb3\x64\xaf\xb8\x64\x62\x4a\x47\xb6\x16\xf4\x8a\x23\xb6\xf1\x24\x59\x51\x48\xe0\xda\x59\xdf\x77\x56\x74\x7b\x63\x86\x9e\x4a\x70\xfe\xe2\x8d\x46\xa3\xe1\xc0\x0c\xbd\xd2\xbf\x1c\xe0\xb6\x3f\xe4\x4f\xbb\xc4\x1d\x3a\x95\x6b\x1b\xd8\x52\xe2\xbe\xa7\x38\x59\x51\xf8\x85\x5d\xf8\xf1\xc3\x43\x82\x38\x39\xd6\xc7\xb8\x9a\x4d\x7e\x48\x3a\x3b\x3b\x25\xec\x57\x0b\xb6\x00\x3b\x24\x8f\xbd\x66\xf3\x60\xff\x90\x58\xf6\x50\x2e\x57\x42\xee\x3f\x6c\x36\xf7\x1e\x55\x20\x85\x05\x69\x88\x99\xcf\xbf\x9a\x7f\x34\x12\xeb\x3a\x10\x59\xc9\x5c\x50\x5e\x91\x51\x79\xef\x52\xeb\x66\x8a\x35\x25\xa8\xf5\xde\x71\xb4\x4e\xa3\xd7\x87\x24\x6f\x20\x96\x64\x1b\x22\x49\x50\xc5\x52\x61\x1d\x64\x16\x6e\xa8\x53\x17\xf7\x21\x5d\x37\xcd\xee\x50\x2c\x81\xbb\xdb\x3d\xe9\xa7\x3a\x76\x9a\xaa\x11\xb6\x3f\xa6\xa0\xff\x31\x88\x8d\xfa\xb1\xe6\xcc\xa0\x89\xaa\x6d\xef\x68\x0f\xd9\x76\x35\xe1\xae\x92\xbc\x14\x19\x1c\x28\xb9\xd3\xe6\xf5\x00\xa4\x1b\xc2\x1d\x8a\x34\x66\x90\x2e\xd3\xe9\x96\x53\x10\x84\x82\x24\x4c\x9f\xff\x18\xac\x92\x1c\x1b\xba\xd0\x5a\x3d\x70\x86\x42\x09\x26\x79\xb1\x56\x3c\xe0\x58\xcd\x22\x08\x56\xa2\xe2\x84\xc3\x6f\xa0\xe3\x36\xa6\x44\x92\x0e\x8c\x24\xd9\x81\xbe\x24\x1e\x4c\xe5\x5a\xde\xb1\xc0\xb0\xbd\x52\xf6\x2d\xf2\x4b\xc0\x58\x6e\x3a\x6b\x5c\x64\x10\xc6\x70\xb5\x01\xd0\xb3\x01\x87\x1b\x00\x3b\x36\xe0\x64\x0d\x69\xd9\x69\x42\x98\xad\xf9\xbe\x93\x7d\xbf\x93\xe4\x27\xdc\xfe\x02\xc9\x8d\x24\xaa\xde\x05\x1c\x49\xc2\x39\x7c\x90\xe4\x0b\x3c\x97\x64\xc2\xe1\x7a\xf5\x80\xb4\x1c\x67\x01\x4f\xe5\x7a\x8b\xe4\x0b\xe0\xf0\xcc\xa4\xb0\x85\x57\x6b\xe1\x3e\xa3\x19\xfa\x13\x4c\x0e\x1a\x0c\xa7\x92\x6c\x9c\xb4\x5b\xb2\x9e\x21\x9a\xe9\x54\xbd\x66\xc2\xea\x2b\x04\xf2\x6c\xd1\xf6\x04\x85\xef\x92\xbc\x86\xf3\x5f\x20\xef\xe4\x59\xb9\x0b\x91\x3c\xab\x81\x43\x4a\x44\xbb\xa3\x6b\x08\xa5\x2e\xd7\xe5\x84\x81\x20\x29\x48\x42\x55\x05\x5f\xe4\xfa\x95\x7b\x6e\x56\xce\xb1\x6a\xe0\xd9\x06\xb8\xef\x0a\x6e\x86\xbe\x48\xf0\xe0\x4f\x81\x04\x6e\x77\xb0\x49\x23\x7e\x5c\x1d\x82\x3c\xb8\xd7\x52\xa9\x3d\x2b\x59\x78\x25\xb7\xa8\xe8\x09\xbf\xe3\xed\xec\xfd\x81\x44\x5b\x7f\xc0\xad\x4a\xc1\x0e\x6e\xeb\x44\x94\xad\x83\xfd\xfd\xdd\x83\x05\x9c\xac\x59\xc9\xa7\x12\x42\x99\x2d\xac\x37\x7f\x87\x1c\x6d\xda\xac\xd3\x74\x8a\xb8\x12\xd1\x78\xb6\xdf\x77\x30\xf6\xcd\xab\x16\xbf\xe8\x94\xef\x77\x30\xd6\xe2\x0e\x7c\x5e\xd7\x6d\xce\x5f\xfc\x2f\x8e\x9c\xd6\xb5\x44\xbc\xd5\xc1\x2d\xe4\xe0\x86\xd3\x7a\x27\xd0\x33\x99\x25\x49\x81\x67\x2b\x5b\x34\x43\xaf\x55\x77\x1f\xe3\x05\xbc\x5e\x46\xce\xfd\x6c\x5e\xac\xc8\x6e\x64\x1f\x81\xe2\x7a\xee\x2d\x59\x61\xdf\xe4\xb9\x3e\x3b\x26\x51\x44\x19\x56\x5b\xa4\x92\x00\x6e\xdd\x2e\x82\x5e\x08\xa4\x36\x00\x6a\x76\x2a\xa1\x76\xca\x19\x3a\x92\x20\xa4\xea\x70\xa4\xa4\x26\x25\x6b\xeb\xe9\x97\xf6\x1c\xd7\x69\x49\xdf\xb9\xf8\x87\xce\x84\xf6\x8f\x4b\xc7\xcc\x78\xaa\x26\x64\x71\x44\xa4\x51\x38\xe2\xf3\x68\x83\x00\x42\xe2\x5c\x98\xbe\x72\x29\x6e\x39\x97\x8e\x8d\x37\xdc\x84\x65\xc7\xcf\x4f\xe9\x50\x7d\x58\xc4\x0d\xf4\x92\x89\xdd\x20\x3f\x15\x16\x11\x24\xdc\xa0\xe7\x9c\x5f\xb1\xc6\xeb\x24\xe6\xee\x33\x16\xc6\x7d\xe6\xc6\x9c\x9d\x0e\x1a\x54\x36\xae\x93\x98\x3b\x2d\x23\x7e\x38\x70\xa2\xc7\xc7\x77\x96\x40\x1d\xdc\x72\x1a\x03\x1a\x8d\x74\xbe\xb7\x86\xbc\x62\x8d\x41\x3c\x1a\xc5\x33\x93\xad\xea\x5a\xa2\x3f\x05\x8a\xb1\x82\x9a\xd1\xbb\xc4\x77\xba\x35\xc1\x46\x09\x33\xba\x41\x11\xcc\xd0\x99\x84\xcf\x52\x47\xdf\x2c\x38\xa1\x84\x91\x58\x5f\xec\x92\x12\x61\x35\x31\x93\x42\x9d\x33\xca\x1b\x11\x97\x71\x83\xae\x68\x81\x4e\x9b\xc7\xe3\xc6\x24\x4e\x92\x28\x88\x46\x91\x8c\x58\xe2\xb4\x4c\xa3\xd7\xb7\x6f\xcb\x29\xdd\xbf\xa1\x1e\xf8\x58\x27\xb2\xcf\x06\x3e\x22\xba\xfc\x3b\x11\x07\x23\x36\x36\x95\xa8\x26\x6b\x27\xeb\x3a\xac\x2d\xc7\x57\xcd\xd4\x12\x9c\xbf\x5c\x76\x18\x4d\x19\x37\x18\x34\x9c\x83\x5b\xe8\x9d\x40\x33\xf4\x5c\xc2\x1e\xe8\xae\xcb\x5e\x87\x8a\x75\x7d\x94\x1b\x0e\x16\xdb\x9b\x16\x84\xbe\x84\xbe\x3e\x84\x0d\x3f\xf5\x61\xef\x97\x92\x7c\x82\xb7\x6b\x59\xda\x13\x24\xf0\x83\x27\x46\x89\xfc\x24\xc9\x4b\x89\x66\xe8\xad\x84\x1d\xd8\xdd\xc1\x18\xbe\x49\x72\x8b\x3e\x2a\x66\xf7\x49\xc2\x4f\xf5\x1f\x86\x1f\x92\x44\xf0\x64\xfd\x46\xad\x8f\x55\x43\xb4\x56\x18\x37\x75\xbd\x97\xe4\x06\xde\x49\xf2\x0d\xfe\x5c\xbd\x77\xe5\x99\x6a\xe1\xc5\xda\xaa\xf2\x4b\x1c\x7a\x3a\x31\x38\x7c\x95\xe4\x25\x70\x46\x12\x10\x6c\xb5\xa1\xba\x9b\x2b\xec\x33\xc4\x19\xec\xee\x00\x37\x76\xac\x00\xa4\x59\x5f\x59\x57\x4a\x97\x2e\xf2\xec\x7a\xac\xdc\xcd\x4e\x34\xcb\x30\xbb\x8a\x56\xd2\xe5\xaf\xea\x79\x29\x91\x78\xb0\xbb\x53\xf0\x99\xc2\xee\x5d\x12\xe0\xd2\x2e\x27\x33\x24\x98\xe6\x74\x39\x66\xb6\x8c\x59\x27\x24\x64\x39\xe6\xdd\x9d\x3f\x84\xcb\x80\x91\x77\xd9\x88\xed\xee\x80\x6c\x77\x30\x06\x4e\x78\x4f\xcd\x41\x77\x88\x7d\xe1\x0e\x41\x61\x97\x0a\xbb\x2a\xdd\x2d\xce\x86\x7f\x94\xf0\xa7\x82\x1a\xe0\x96\x84\x19\x7a\xa1\x43\xe7\xff\xf8\x24\xb1\x86\x2c\x33\xf6\xd9\x90\xd9\x2c\xd0\x9f\x31\xa4\x8c\x3c\x07\xca\x36\x78\x62\xb2\x4d\x5b\x1c\x7a\x65\xbb\x19\x83\xad\x0e\xdc\x0f\x7d\x06\xcc\x97\x0f\x76\x77\xe6\x1e\x0c\xfc\x34\x4f\x0d\x4c\x32\xc9\xea\x0e\xfd\xd0\x6d\xd2\x06\x98\xae\x12\x2d\x05\x11\x6d\xd5\x48\x22\x81\x99\xd1\xa2\xc0\x30\xa4\x24\xd5\x27\x96\x57\x76\x98\x77\xc8\x8b\x53\xa4\xff\xbe\xbb\x03\x8c\x68\xbc\x12\x78\x5b\x5a\xfe\xbd\x29\xa2\x4c\xd5\xd4\x96\xaa\x06\x0e\xc7\x50\x86\x7b\x7c\x93\x0b\x0c\x31\x5b\x31\x47\xb7\xb8\xbb\xbd\x80\x88\x91\x01\x87\x01\x23\x01\x87\x80\x6d\x16\xe0\x92\xea\xf7\x15\x1b\x57\x3e\xb3\x6b\x01\x5a\x9d\x5a\x60\xd6\x4e\xc1\xaf\xf2\xcb\xcd\x16\x0b\x18\x31\x92\x4a\xe8\x33\xb2\xf9\x9c\xff\x3d\xdd\xf7\x29\x04\x8a\x59\x50\xe9\x33\x08\x12\x5f\x42\x30\xf3\x39\x84\x67\xbe\xee\xcb\x29\x23\x82\xc3\x36\xdb\xbc\x2e\xc7\x8c\x7c\x85\x2b\xb6\x96\x0f\x1c\x76\x7a\xc2\xbf\x43\x63\x35\xf5\xb6\xcd\x29\x45\x35\x6b\x86\x8c\xa4\x1c\x26\x1b\xcb\x39\x8e\x29\xe8\x41\x76\x47\xcb\x8c\xad\x39\x3d\xe5\xe9\x30\x99\x8a\x54\x03\x8c\xec\xed\xea\xb3\x0a\x7b\xfb\x84\xc8\x5e\xc7\xf7\x20\x25\xac\x9b\x96\x91\x4d\xad\x56\x19\x7e\x5b\x4b\xfd\xae\x63\x22\x33\x2f\x55\x71\x98\xeb\x46\x76\x05\xe9\x78\x7f\x88\x16\xb5\xbc\x31\x29\x21\xac\x77\x23\xfd\x5b\x89\x4c\x4d\x6d\xe1\x0b\xbc\x80\xbb\x75\x4b\x42\xe1\x56\x95\xcc\xe7\x33\x34\x65\xe0\xfc\x5f\x0e\xa4\xd8\xaa\xc3\x50\x34\x43\x43\x06\x8e\xaf\xbe\x69\x6a\xb2\x6d\x9c\xea\x6d\xbc\x0a\x1c\x12\xaa\x8f\x7d\xce\x18\x9a\xa1\x2b\x06\x61\xab\xa3\x13\x01\x65\xdc\x86\x96\x9a\xe3\x8d\xec\x52\x52\xe4\xc6\xba\x95\x68\x80\xfa\x4c\x8b\xb2\x13\x66\x82\x89\x69\x7e\xb8\x65\xb1\x04\x94\xc2\x8d\x2c\xbe\x62\xb8\x65\x2b\x37\x23\xd3\x3a\x56\x6f\x50\x9a\x35\xe8\x81\x03\xc6\x00\x95\xaa\x06\xa5\x24\x2d\x53\x75\x4d\xd1\x9d\xa1\xe5\x4a\x4d\x54\x86\x35\x46\x4d\x59\x6a\x13\x94\x81\x29\x4c\x79\x9e\x0c\xb8\x61\x2b\xf4\x05\x43\x89\xac\x53\xc2\x32\x4a\x7a\x0e\x18\xc9\x8f\x99\x0c\xa6\xac\xa4\xe4\x16\xdd\xaa\x2a\x6e\xa5\xe9\x4f\xd6\xea\x80\xc4\x8a\x1e\x4d\x0d\xd3\xf6\xda\x2a\xa8\xe9\x19\x45\xca\xd1\x4a\x0e\x44\xa5\x95\x7a\x36\x23\x44\x66\x84\xfc\x5b\x9e\x5f\x57\x69\x4c\xf7\x92\xc8\x92\x90\x3b\x74\x63\x13\x22\x5b\x1d\x9d\x67\x4d\x93\x21\x6d\x4f\x47\x06\x78\x93\x5d\x77\xf7\x81\x11\xc9\xe1\x39\x5b\x2d\x4e\x7f\x60\xa0\x2f\xf0\xf2\x1f\x3c\x70\x80\xe3\xde\x0c\x1d\xa9\x45\xa6\xab\x78\x08\x1c\x63\xbf\x84\x49\x2a\x40\x1d\x03\xf4\x48\x03\x29\x69\xff\x7a\x79\x45\x76\xbb\xd8\x5b\xc0\x53\x46\x6e\x38\x7c\x25\x4f\x99\x5a\x8b\xaf\xd6\xce\x15\x56\xa6\x9e\x61\x7a\x12\x33\x73\xf8\x84\xe6\xef\xcd\xe4\x8e\x09\x35\xef\x63\x33\x58\xb1\x86\x8d\x2d\x58\x03\x61\xa5\x63\x49\x61\xa6\x03\xce\x66\x3a\x9e\x4b\xdf\xd2\xe0\x52\xd8\xf7\xbc\x43\xd9\xd3\x4a\x92\x22\xe3\x44\x2a\x01\xd5\xbf\x45\xaf\x98\xb1\x99\xb4\x3a\x5a\x04\x2d\x7b\x76\x19\x95\x58\xf3\x55\xd4\x5f\x57\x6f\x64\x38\x65\xeb\x1d\xed\x45\xfd\x9e\x99\x44\xdf\x2b\x93\xc8\xbe\x43\xe6\x0e\x9d\x32\x58\xab\xc8\x87\x12\xcc\xed\x62\x85\x69\x05\xc3\x39\x23\xcf\x39\x7c\x61\xeb\x42\x46\x66\xe8\x9c\xad\xf2\xaf\x3d\x65\x48\xe8\x98\x1b\x83\xe6\xac\x42\xbe\xd4\xa1\x67\xab\x51\x88\xdf\x41\x3d\xcb\x2e\xfd\xc5\x0b\xb5\x82\x4d\x15\xc7\x8c\x7c\xe6\x70\xb2\x76\x4f\x38\xe5\x48\x63\x3c\x66\xa6\x8d\xd8\xdc\x5f\xb0\xc1\xce\xf4\x65\x65\xf5\xde\x02\x90\x20\x33\xf4\x9d\xc1\x49\x86\x0a\x74\xbf\x9e\x31\x14\x4a\x0c\x4f\x19\x3a\xc6\x90\xe9\xa4\x72\x43\x05\x7a\x72\x2f\x30\x7c\xe6\xeb\x6d\x2b\x5f\x58\xb6\x7b\x75\xdf\x70\xf7\x9c\x26\x37\xe4\x3e\xf0\xbf\x42\xe8\xa7\x1c\xfa\xbe\xe4\xc0\xfc\xcf\x1c\x06\xc5\xa1\xe4\x32\xa2\x88\x59\xc2\x03\xfd\xd3\xdf\xea\x40\x10\xf9\x8e\x03\xc1\x75\x25\x8d\xea\xe7\x0a\x5c\xdf\x1f\xc7\x10\x7c\xa9\x40\x3c\xab\x40\x7c\xf0\x3f\x33\xe4\x38\x18\xe8\xc7\xfc\x69\xea\xdf\xd3\xc8\xbf\x91\x40\xaf\xd5\xdf\x70\xea\x73\xa0\xb1\x7e\xf1\x43\xff\xbd\xd3\x7f\x7f\xea\x25\x9f\x14\xe5\x3f\xe7\x4f\x41\xa8\xbe\x07\xa7\xf9\xef\xd7\xfe\x56\xc7\x4e\x0b\x68\xd5\x8f\x66\x68\x16\x6b\xe6\xad\x79\xca\x24\x86\x0e\x70\xec\x73\xdc\x72\x1e\xd0\x49\xf4\x60\xba\x63\xdd\x3b\xf8\x91\x6d\xcc\x2f\xfb\xb3\xfa\x79\xbf\xf6\xf9\x25\x5b\x65\x63\x2b\xd3\xd6\x55\x3f\xef\xd6\x3e\x7f\x62\x1b\x93\xd7\x7e\xab\x7e\xde\xab\x7d\xfe\xb1\xb9\xee\x27\x9b\xeb\x7e\xbf\x19\xf9\xbb\xcd\xed\xfe\x73\x33\xe5\x2f\x36\x77\xea\xd7\xcd\x94\xf3\x74\x63\x69\x91\x6e\xa4\x5c\xa6\x1b\x29\x67\xe9\xc6\x6e\x49\xab\x9f\x77\xea\x2d\xa3\x9b\x8b\x87\xa9\xed\x28\xd7\x1a\x86\x69\x1d\xac\xdc\x25\x5f\xc6\x4a\x43\x99\xa1\x77\x71\x66\xf3\x34\x16\x21\xeb\xca\xe5\x74\xf3\xad\xc6\x77\xe8\xad\x46\x61\x9c\xc7\x65\x02\xa3\xb4\x92\xc9\xf4\x5b\x5c\x77\x4a\x0e\xd1\xb6\xe6\x4a\x12\xe7\x06\xbb\x19\xb3\x1c\xd2\xc2\xdd\xee\x85\x29\xfa\xc7\x2b\x6e\x6e\xac\x88\xb8\x64\x43\x26\xfc\x86\xf3\x8f\x16\x6f\xfd\xc3\xf9\x07\xf6\xd3\x48\x07\x1b\xe4\xbb\x4f\x98\xa2\x2c\x8d\x38\xeb\x1b\xbb\x8c\xc4\x2d\xe4\x34\xfa\xd1\x30\x92\x09\xe8\xfc\xfc\xc3\x58\x9a\x4f\xba\x6e\xac\x33\x21\x46\x88\x47\x88\x6b\x77\x64\x99\x99\x24\xb5\x35\x1f\xeb\x6a\xe1\x14\xf1\xf2\x94\x2d\xd5\xc7\x2e\x02\x08\xc9\x96\x16\x75\xbb\x2b\x7a\xc9\x34\x6d\x8a\xc6\x11\xa4\xc0\x75\x34\x41\xf0\x1c\xb8\x4b\xcf\xb4\xcf\x44\x3b\x32\xa9\xbe\x49\xca\xdc\x16\x19\x16\xa7\xdf\x90\x84\x76\x47\x71\x8f\xca\x10\x51\x8c\x7d\xdd\xe5\x21\x78\x70\x4f\xcf\xb4\x15\xa4\xcc\x8b\x10\x28\xf5\xe6\xb9\xcf\xd4\x88\xbb\x74\x61\x8f\x48\x62\x4f\x8c\xab\x08\xcd\xd0\x76\x64\xd2\xcb\x3e\xd2\xf3\xc3\x6e\xff\xc8\x86\x55\x1d\x9b\x0f\x43\x9f\xde\xf9\xa6\x07\x2b\xee\xb9\xbe\xdd\x5f\x68\x86\x86\x11\xec\x01\xc7\xf3\xf9\x96\x7e\xee\x78\x1e\x70\x6d\x42\x54\x1f\xf4\x0f\xfb\x12\x69\xab\x30\x42\xbc\x4d\x3a\xf8\xc1\xde\xdc\xc3\x6d\xc4\x1f\x74\x3c\x6f\xee\xe1\x16\xe2\x0f\xf6\xf4\x93\x95\x57\xe7\x97\xf3\xf2\x4e\x35\xef\x7d\xac\x43\x5f\xcb\x54\x1b\x69\x05\xe6\x43\x54\x19\x90\xc0\xbe\x55\xfb\x2a\xad\x7a\x29\x23\x38\x8a\x2a\x7e\xc5\x61\xfa\x9b\x4e\xc9\x3b\xf4\x25\xd2\x99\xfe\xcd\x5d\x04\x0b\x0c\xa3\xb8\x82\x69\x62\x63\xe2\x3d\x47\x8a\x94\x39\xbe\x33\xa0\xa3\x84\x59\x3b\xc5\xac\x02\xe6\x6e\xcf\xe7\x8e\xb3\x95\x5d\x69\xad\x76\xad\xf2\x4c\xb8\x0d\xf8\x52\x8d\xf5\xab\x18\xde\x46\xf0\x41\x22\xde\x72\x88\x53\x99\xec\xb7\xe9\x5a\xf7\xa4\xf3\xdd\xc9\xae\x84\xad\xfa\x29\x6f\x2a\x84\x90\x72\x2a\x3d\x36\x53\xa9\xba\x30\x08\xaf\xc4\xae\xfc\x0b\x56\xc5\x7c\xee\x1d\x92\x3b\xf4\x22\x5e\x79\xe7\x80\xb4\x69\x5f\x80\xbe\x64\xe1\xbf\xbc\x90\xba\x45\xc8\x97\x95\xfa\xbd\x3a\x43\x9e\xa8\x4e\x70\xec\xfb\x7f\x3f\xa4\x1b\x37\xa9\xe7\x9b\xb7\x99\x6b\x7b\x61\x70\x77\xbb\xf7\x21\xf5\x9f\xa7\xd8\x5c\x0d\x51\x1e\x15\xdc\x5c\xc7\xab\xcd\x75\x9c\x56\x9b\xf0\x55\x35\xe1\x45\x64\x67\x72\xaf\x02\xe4\x03\x6d\xf0\x58\x70\xe7\x36\x5c\x90\xa2\xef\x69\x95\x45\x7c\xb1\xd7\xeb\x6d\x84\x3e\xa3\x8b\x19\x4a\x22\x48\x23\xf4\x34\x45\x1e\xc6\x70\x9e\xa2\xa3\x54\x3b\x49\x40\x7f\xc9\x3f\xbf\xd2\x2f\xe1\x34\x45\xcb\x83\xed\xfc\xf5\x97\x19\xe5\x75\x00\x5b\xde\x22\xc7\xf7\x1b\xa8\xcc\x21\xc5\xe2\x7a\x61\x7c\x69\xb7\xe0\xac\xb6\xfa\x32\x73\x48\x75\x34\x8e\xab\x40\x3a\x1e\x88\x67\x81\xf4\x65\xae\xf3\x1a\x50\xf9\xe5\x4d\x6a\x3b\xb0\x82\x2f\x65\xdc\x58\x78\xdc\x42\x33\x34\x1e\x80\xe3\xc0\x0c\xbd\x8c\x80\x49\x38\x4e\x15\x17\x1f\x80\x5e\xde\x27\x29\xbc\x57\x75\x9d\x62\x0c\x3f\x22\xb5\xb9\xe9\x6d\xb2\x25\xd4\x3f\x96\x20\x5d\xad\xe1\x56\xdb\x94\xce\xb5\xeb\xe8\x83\x4e\x13\x76\xa6\x17\xef\x4b\x9d\xb2\xfc\x3b\xf0\x65\x67\xd2\x74\x50\x10\x42\x71\x16\x4d\xb8\x5d\xde\x9c\xc6\xed\xc4\xe4\x5a\xfd\xd0\xe4\xbd\x49\xcd\xbf\xb7\x12\x3e\x45\xc8\x19\x44\x23\xc9\x84\x62\x44\x99\x43\x29\x24\x33\xf4\x33\x5a\x2d\xa4\xdc\xa5\x39\xf7\xc4\x0b\x38\x43\x1c\x3e\xa3\x8b\x53\xe4\x24\xd1\x88\xf1\x90\xf5\x1d\xb8\x95\x68\x92\x1a\xaa\xb6\xb4\x15\x43\x0d\x32\x72\x22\x7e\x15\x05\x91\x5c\x86\x60\x39\x04\x0d\xa5\x4e\xd7\x64\x7f\xf6\x20\xcd\x3f\x0b\x16\xb2\x68\xca\x84\x03\xb3\x6c\xe6\x20\x67\x28\xe2\x74\xe2\x40\xa8\x26\x47\xb1\xe5\x7b\x87\x7f\x0a\x14\xe2\x9e\xd3\xcb\x7d\x23\x4d\x05\xe2\x3b\x16\xf7\x7e\x96\x66\xa2\x4f\xdb\x1c\x8b\xdb\x9e\xcf\xb3\xa7\xbe\xf5\xcc\xca\xbe\xe4\xdd\x12\x98\x19\x20\xf3\x40\xcb\x01\xec\xeb\xf1\x63\x90\xbb\x9d\xa5\x4e\x39\x2f\x75\xca\x79\xe9\xb2\xd2\xea\xf4\x2a\x52\x3a\x88\x66\xb5\x21\xe8\x9f\xe6\xd8\xb2\x8e\xf5\xd0\x49\x60\x18\xce\xdf\xe7\x89\x49\x8b\x34\xd8\xba\xa2\x28\xab\x28\xd2\x15\x45\x6e\x08\x92\x20\x4a\x22\xb7\x8f\x55\x79\x42\x5d\x06\x21\x89\x6a\x95\x7a\x40\xdd\x00\x68\x5e\xe9\x12\x0d\xb1\x1b\x40\xec\x86\x10\xbb\x7d\x88\x15\x0d\x12\xe7\x90\x8a\x0c\xa1\xc9\x28\x95\xac\xff\x4a\x1f\xf6\xf3\x3e\xec\x2f\xf7\xa1\xea\x02\xa6\x1b\x98\xea\x5b\x76\x29\x61\x6e\x08\x21\x61\x8a\x2c\xc2\x96\x7a\x32\xd2\x1b\xdc\x80\xac\xe8\x4d\xa9\x37\xae\xb2\x37\xf3\xc4\x01\xa6\x37\xed\x82\x8c\x20\x7d\x85\x16\xd6\xd5\x22\xb5\x72\x18\xd6\x95\x73\x5d\x39\xd7\x95\xf3\xa5\x1e\xcd\x83\x60\x32\x5a\x90\x50\x03\x8e\x57\x0e\x66\x07\x22\x18\x00\x5b\xa2\xc5\xea\xd3\x8f\x4b\x62\xcc\xa7\x01\x4c\xab\x02\xca\xcf\x2a\xcc\x34\x28\x9c\xef\xa5\x22\x5a\x13\x97\x02\xb8\x0f\x43\xa5\xcf\x87\x6f\x95\xa2\x5c\x81\x7d\x5b\x85\x7d\xae\x05\x80\xb1\x6a\x88\xfe\x57\x49\x14\xe3\x01\xbc\x0a\xe0\x65\x8a\x9c\x8b\xf6\xc5\x5f\x7f\x5d\xde\x2f\x10\xfe\xa3\xd5\x73\xe1\xaf\xbf\xfe\xfa\xeb\x7f\x6c\xcf\xff\xed\xaf\xbf\x92\x4b\x07\x6b\x9b\x46\xa0\x0d\xaa\xaf\xe2\xd5\x87\x6c\xc2\x57\x0b\x25\xf8\x28\xfe\x8e\x8d\xc1\xa3\xae\xe3\x7c\xaa\x33\x6d\xe7\x3e\x5f\xc6\xa0\x63\xd2\xbe\x33\xc5\xbf\x38\xc6\x2d\x67\xe1\x54\x16\xf5\xb7\xcd\xda\xd8\x8f\xda\xe6\x5c\xdf\x9d\x9f\xd4\x94\xbd\xfa\xf6\xfc\xbe\xfa\xfd\x71\x5d\x47\xae\x7e\x7e\x54\xd7\x91\x7f\x5f\x40\xbd\xe3\x55\x01\xf5\xbe\x3a\x60\x2f\x6c\x4c\x7f\xa6\xc8\x30\x60\x4e\xc7\xcc\x81\xb3\x00\xa9\x51\x33\xdc\xd1\xa4\x6b\xca\xde\x7d\xc9\xd8\x70\x72\xc6\x86\xec\xd6\x81\xf7\x6a\xab\x08\x86\xf9\xdb\xe7\x3f\x52\x3a\x52\xdd\x3b\x1e\xc0\xeb\xc0\xec\x6a\xef\x07\x8a\x86\x01\xae\xed\xbd\x5f\xd3\xd5\x61\x6e\x9c\x96\xbb\xd9\x0c\x7d\x54\x13\xa1\xb3\xb7\xe7\xe1\xd6\xc3\xce\xe3\xbd\x83\x47\x8a\x43\x89\x43\xaf\x27\xda\x9d\xbd\x03\xef\xf1\x81\x2f\xf0\x03\xfd\xf4\x70\xee\xa9\x55\x6a\x5e\x3f\xfc\x43\xaa\xe5\xc7\xda\x88\xe9\xaf\x5a\xe3\x60\x0f\x76\x0f\xf6\x77\x8c\x1e\x62\x5e\x3f\x3e\x98\x7b\x18\xab\xd7\xf3\x3c\xea\xf6\x9e\x7e\xf2\x11\x27\xac\x8d\x76\x0f\xf6\xff\x48\x5b\x28\xcd\x34\x97\x34\xd3\x5c\x30\x6e\x23\xd4\xd9\xdf\xfd\x03\x09\x82\xf6\xff\xe0\xad\x1d\xfc\xa0\xb3\xbf\xab\x6a\xd8\xc1\x0f\xf6\xd5\xbf\x1d\xa0\xb1\xcf\x88\x68\x21\x71\xd8\xf1\x7a\xbb\x7e\xfb\x31\x86\xa0\xe3\xa7\xad\x3d\xcf\xfb\x43\xb6\xd0\xce\x21\xeb\x79\x7e\xc7\xd6\x62\x04\xad\xac\x9f\x17\x01\xec\xc1\x0c\xf1\x04\x44\xa2\x67\x29\x72\xda\x6a\x02\xbf\x08\x60\x07\x56\x39\xe1\xea\x2e\xb8\x4e\xcd\x05\xb7\x53\x73\xc1\xed\xd6\x32\x2e\xec\xd5\xf2\x33\xec\xd7\x8e\x99\x1f\x54\x6f\x4c\x6c\x3c\xac\xdd\x56\xf8\xa8\x76\xc0\xff\x71\xed\x7a\xad\x8e\x57\xbf\x2e\xab\xd3\xa9\x7b\x01\x3b\x3b\x8b\x05\x9a\xa1\x27\x41\xd6\xe8\x6a\xab\x67\xe8\x6d\x50\xf6\xc6\xb9\xf5\xfe\x93\xf5\xde\xb7\xde\xff\x58\xf3\xfe\xab\xf5\xde\xcd\xde\xef\xc2\x0c\x7d\x2b\x2a\xd6\xff\x6b\x39\xdf\x2c\xb6\x20\x69\x45\xca\x1a\x9a\xdc\xa4\x86\xdd\x07\x03\xac\xb6\xa1\xd2\x01\x72\x1f\x1e\xfb\x9a\xef\x9d\xfa\x5b\xa2\xd9\xd4\xf7\xa1\xcf\xe7\xb2\xa7\x9f\x77\xfc\x5d\xbf\xa3\x6d\x9d\x6e\xf0\xc5\xb6\x22\xd9\x53\x80\x1f\x7a\x99\x34\x1a\x24\x68\xe5\x42\xaf\x07\x09\x71\xad\x61\x09\x2b\xe6\xa7\x71\x8a\xc4\x03\x36\xf7\x7a\xbc\x85\xae\xa5\x79\xc6\x2d\x24\x5b\x4e\xc3\xc1\xd8\x57\x1c\x76\x18\x19\xc7\xc0\x42\x2f\x60\x07\x58\xa4\x64\x7c\x48\x12\xac\x4f\x70\x94\x26\x2c\x8b\xb6\xad\xc2\x1a\xd0\x6c\x6e\xd9\xe6\x80\xdc\x38\x60\xec\x01\x96\x7d\xcb\x6e\x98\x5a\x56\xc6\x24\xa0\xa4\xdf\x69\x02\x1c\xf6\x70\x3b\x7b\xea\x78\x1e\x6e\xe5\x6f\x3d\xcf\xa6\x20\xa4\xff\x7f\x9c\xee\xa5\x51\xce\x6e\x9e\x8e\x67\xe8\xa8\x99\x78\x5f\x6d\x96\x57\x6b\x56\xa7\xd6\xac\x9d\x5a\xb3\x76\x6b\xcd\xda\xab\x35\x6b\xbf\xd6\xac\x83\x5a\xb3\x1e\xd6\x9a\xf5\xa8\xde\xaa\xc7\xf5\xeb\xed\x3a\xde\x52\x33\x6d\x93\x7a\x64\xad\x22\xa4\x3d\x9a\x7a\xd8\x35\xcf\xd6\x37\x3c\x71\xb5\xa2\xf4\x6b\x25\xb3\x69\x86\xad\xdf\xcb\x02\x5c\x89\x4a\x9d\xbd\x83\x8e\x7e\xcd\x2a\xb0\x85\xf0\xae\xf8\xac\x68\x75\x3c\xef\x0f\xde\xda\xfb\x43\xb6\x98\x4b\x5b\x88\xb9\x41\xaf\xe3\xdb\x16\xa7\x81\x4d\x8d\x20\x8a\x38\x22\x08\xc7\x70\x4f\x13\x9f\xb7\x29\x45\x42\x33\x6e\x61\x07\x42\x5f\x25\x4a\x2e\xec\x68\xf1\x90\x26\x16\xb6\xc0\x9e\xe6\x8f\x0e\xf6\x98\x9e\xe8\x0f\x3b\x8f\x3b\x07\xf6\xdd\x32\x09\xad\x6e\xe1\x09\x74\x76\x0d\xdf\xb1\xac\x75\xb4\x1e\xeb\xdd\xdb\x96\xc8\xf9\x70\x15\xa7\xa3\xbe\xbe\xf5\x37\x60\x0d\x36\x9e\xc8\x3b\x07\xfb\x33\x74\x9c\xc0\x54\x22\xe7\xb3\x88\xf9\xb0\xf1\xea\xc3\xe9\xa3\x03\xaf\xd3\x18\xc4\x62\x4c\xa5\x83\xe1\xaa\xa6\xb7\xf7\x6d\x02\xce\xd1\x5d\x0c\xf7\x67\x8a\xd1\x6c\x79\x18\x3e\xe4\x0f\xe7\xf9\xc3\x4b\xf5\xf0\x49\x69\x52\xdf\x19\x48\xaa\xd9\xa3\x6d\x25\x98\xda\xd8\xa8\x66\x57\x1c\xe3\x0d\xe4\x4e\xaa\x86\xa7\xed\x0a\x5f\x0d\x4d\xe2\x90\x53\xc3\xd3\xbe\x54\x23\x1f\xb3\xd5\x21\xeb\x8b\xff\x14\x6d\x75\x60\xcb\xab\x67\xfb\x34\xaf\x3b\xf5\xac\x40\xa7\x68\xcb\xd3\xd0\xb5\x79\x6a\xde\xab\x4d\x19\x61\x9d\x66\xb1\x60\xe6\xc1\x40\xf5\x41\xea\x06\x18\x82\xa1\xcf\x21\x18\xfb\x42\xf1\x6f\x66\x5b\x21\xab\xdb\xc3\x75\x37\x1f\x3c\xad\xa7\x04\x91\xee\x90\x77\x23\xa6\x48\x09\xe3\xf1\x64\xc4\x24\x6b\xd0\x7e\x3f\xe2\x43\x1d\x51\xa7\x0f\x43\x29\xf5\xd9\x17\x6e\xd0\x9b\x48\xd3\xdf\xdb\xfa\x2a\x51\x5f\x95\x7d\x6b\x00\x92\x06\x15\x4c\x27\xbe\x88\x04\xeb\xdb\x96\xa9\xab\xea\xbc\x7a\x97\xc0\x8e\x9a\x56\xb7\xe8\x47\x02\x9d\x0e\x74\xd4\x0f\x3d\xdd\x76\xf2\x5d\xce\xb2\x7b\x2e\xcf\xc9\x4e\x7d\x4e\x4e\xe8\x46\x07\xcb\xac\xda\x01\xa1\x49\x00\xf3\xd1\x0c\xe4\xa9\xb6\x43\xd0\xcf\x5a\x0f\x4a\xb4\x1e\x34\xed\x96\x8e\xbd\x19\xda\x1e\xc1\x54\xc9\xaa\xf4\x83\xf6\xef\xe5\x2f\x94\xe6\xe8\x87\x90\x98\x17\xa3\x11\x50\xed\xbe\xd3\xbf\x5e\x26\x6a\x45\x7f\xc1\x90\x6a\x47\x9e\x80\xe0\x34\x03\xd3\x81\x55\x35\x5f\xde\x9d\xdd\x42\xa5\x3c\x22\x4e\xf4\x98\x61\x77\xbb\x37\x1d\xf9\xaa\x75\x15\xb3\xcf\x2d\xdd\xe8\x11\xba\xd9\xdc\x1b\x47\x76\x75\xdb\x03\x74\x3b\xaa\xae\xc1\x0f\x9b\x8b\x3f\xa7\x9b\xcd\x88\x9b\x3f\x3f\xad\x08\x12\xd5\x7c\x39\x96\x19\xb1\xca\xfc\xb4\xcc\x7e\x6c\x44\xf6\x19\x7a\x39\xd2\x31\x13\xa5\xca\xf6\xd7\x5f\x3d\x63\xda\x71\xe9\x3e\xc6\xd8\x0d\x7a\xa7\x3a\xb3\xbb\x70\x03\x63\x12\xcb\xe3\x47\x85\x1b\xe8\x18\x0b\xec\x9f\x22\xe7\x81\x03\x37\xc5\x71\x22\x5d\xc9\x9b\x11\xfc\x1c\xc1\x39\xe2\x70\x4f\xf7\xb5\x77\x56\xfa\x0a\x4f\x78\xa6\x26\xfe\x02\xeb\xf1\x78\x1e\xab\x77\x96\x49\xd3\x22\x55\x93\xdd\x5d\x4a\x84\x96\x6f\x79\xb9\x4a\xa6\xca\x57\x37\xbf\x4c\x9b\x2a\x3f\xec\xf8\x15\x2d\xac\xfc\xb0\xeb\xdb\xf7\xf9\xdb\x5f\xbc\xf2\xcb\x41\xe5\x43\xb1\xa3\x7e\x1b\xd5\xb6\xd0\x4f\xa3\xda\x1e\xfb\x63\x54\xe7\x3c\x6f\x47\xd6\xe0\x7d\xa7\x1b\x5d\x8d\xe7\xd5\xcf\x9d\xfa\xd8\x7f\xa9\x7e\x7f\x58\xfb\x7c\x56\xfd\x7c\x50\xfb\x7c\x6c\x75\xf4\x1b\x69\x39\xef\x3a\xd9\x35\x24\xdc\x57\x6a\x8f\x16\x35\xb1\xab\x14\xbc\x27\x11\x50\x9d\x80\x19\x57\xef\xf8\x3d\xa9\xea\x33\xbc\x0f\x9f\xd1\xc5\x8b\x11\x72\xe8\x88\x09\xd9\xd0\x7f\xdb\x33\x2a\x78\xc4\x87\x0e\xbe\xc4\xea\xb3\xec\x23\x4d\x40\x55\x51\x7c\x53\x99\xcd\x29\x12\x6a\x2f\x8a\xfa\x3a\x0c\x53\x8b\xd9\x14\x4e\x7d\x2f\x93\xa7\x17\x15\xb6\xf6\xb9\x4a\x44\xd2\x87\xfc\x9e\x88\x80\x55\xd7\xe3\xb3\x2a\xe4\x9f\x23\x70\xae\x04\x1b\x38\xf0\xe0\x7f\x5c\xd3\x29\x35\xf7\xb5\xf8\x0f\x22\x57\xb2\x44\x22\xc4\x09\xc7\xc5\xe9\xcc\x07\x7f\x25\x0f\x86\xe0\x38\x18\xeb\x24\xc1\xb6\x6d\xab\x8a\xf5\x3b\xab\xbb\x4c\x2d\x33\xab\x34\xbd\x58\x37\xb6\x8e\x75\xc7\x69\xf2\x60\xd6\x47\xce\xf7\x60\x44\xf9\x8d\xd5\x63\x5c\xf7\x16\x9c\xe8\x6b\x9f\xfb\x30\xd1\xc9\xd7\x8e\xab\xe6\xa0\x0a\x6b\x56\x32\x16\xb7\x43\x8e\x6e\xfa\x70\x0c\xda\x78\x7f\x5b\x8c\x92\x64\xb7\xb2\xcd\xe3\x99\xa0\x13\xab\x2e\xd1\x72\x7c\xf5\x13\xc3\x0c\xdd\x15\xb0\xb3\x76\xc7\xf3\x34\xd4\xeb\xe5\xd1\xfb\x59\xed\x82\x71\x51\x2a\x90\xbc\x11\x48\xde\x8e\x53\x39\x8a\x38\x6b\x47\x7c\x10\x37\x82\x58\xf4\x99\x68\x7b\x0e\x06\xdd\x64\x53\xf3\x0c\x0d\x8a\x62\x03\xda\x18\xd0\xb6\x2e\x11\x5e\x51\x21\x1b\x63\xd1\xde\xd1\x95\x1f\x63\x90\x7d\xe4\x7c\x88\x53\x11\x32\xa7\x4a\xc5\xcb\xcd\x3d\xc0\x6d\xaa\xda\xda\x1c\xac\xf1\x36\xc6\x41\x86\x5c\x53\x21\x96\x88\x4f\xc6\x39\xc9\x22\x1a\x5e\xc9\xb6\xd7\xd0\x1d\x37\x4e\xa5\xda\x9b\x61\x86\x46\x7d\x70\xd2\x84\x89\x76\xc2\x46\x2c\x94\x0e\x38\x11\x8f\x64\x44\x47\xc5\xd7\xf6\x38\xfe\xd9\xfe\x05\xc8\x8c\x05\x37\x91\xfc\x05\x54\x46\x48\x18\x8f\x62\xe1\x80\xf3\x6f\x61\x18\x56\x86\xee\x1f\xa4\x70\xb4\x9b\x11\x8c\x57\x36\x67\xd8\x1e\xd0\x3e\xeb\x57\xc6\x26\x61\x61\xcc\xfb\x54\xdc\x39\x18\x3e\x53\xf4\x86\xa2\x53\x1d\x8a\x85\x31\x4c\xfb\xc8\x39\xd6\x96\xff\x46\x70\xd7\x90\x57\x51\xd2\x18\xd1\x80\x8d\xac\xaa\x9d\x96\x1e\x8d\xca\x80\xbc\xb5\x05\xdc\x7f\x7b\x90\xd9\xfd\x93\x07\x9c\xcd\x7a\xc6\x91\x40\x9c\xd6\xdb\x08\x7d\xaa\xc9\xae\x9f\xac\x81\x3c\x4e\x11\x77\x83\xf7\x6e\xf0\xb6\x12\x63\xf0\x37\x27\xd9\x5b\x8a\x96\x56\x66\x21\x08\x7d\x33\x4e\x5e\x2f\x73\x54\x0c\xa4\xaa\xf1\x0a\x6b\x5b\xd6\xea\x99\x19\xb0\xd1\xa8\x9d\x8c\x68\x72\xd5\x8e\x97\xe7\xa6\x69\xa6\xee\x0e\xdf\x1c\xc7\x81\x4d\xe4\xf6\x29\x1f\xaa\x8e\xad\x10\x6c\x77\x97\xd3\x12\xbf\x41\xc9\x3a\x3a\xfa\x9a\x10\xab\x77\xbf\x55\x17\xeb\x75\xbf\x76\xec\xf6\x47\xf5\xbb\xb0\x98\x7a\x34\xe4\x6a\x6a\x0e\xda\x21\xe3\x6a\x32\x14\x55\x9a\x39\xf0\x4a\x71\xb7\xe0\xb4\x36\x0b\x9e\xac\xe0\xcf\x57\xaa\xcd\x0e\xe8\x0b\xab\x38\xbc\xe9\xdb\xf0\xef\x37\x4b\x4d\xef\xe8\x46\xcb\xed\x9f\xab\x35\x0d\x6e\xbb\xe5\x32\x59\x7f\x4b\xce\xe7\xfa\xf0\xaa\x96\xf8\x77\x4c\xee\x78\x7d\xc9\x51\x21\xfc\x57\x42\x93\xec\x76\xac\x9a\x50\x15\xb3\x11\x77\x83\x41\x66\x3d\x92\x55\xb3\x51\x3e\xe3\x82\x31\xac\x33\x1b\x69\x1b\xec\xa2\x1a\x6e\xf1\xd5\xae\xfe\x14\x71\x9d\x61\xa3\x34\xa8\x86\x55\x97\x76\x5f\xb1\x8e\x49\x2a\xb3\x3e\xfe\x4a\xf3\xbe\x7e\xdf\xaf\x4c\x06\x11\x2e\x0d\xce\x0d\xbb\xeb\xc7\x33\x5e\x8c\xce\xbb\xca\xe8\xc8\x95\x05\xd2\xc9\x1a\x70\x16\xae\x91\x10\xc2\x78\xd4\x08\xe3\x51\x9b\xa6\x32\x2e\x99\xef\x6f\xf2\xe8\x78\xe3\xda\x37\xfc\x6b\x86\x3e\xf6\x61\x4b\xc7\xac\x14\x13\x54\xbb\x69\xd7\x32\xc6\xea\x82\xb4\xb1\x74\x2a\x58\x9c\xff\xe7\xff\xce\x79\x5d\x65\xa2\xa7\xe1\x46\xa9\x8c\x86\x1b\x65\xbe\x70\xb9\x63\xc7\x71\x9a\x30\xbd\xd2\x96\x65\x99\x78\x0d\xf8\x88\xd1\x29\x5b\x06\x8f\xc2\x8d\x22\xe1\x20\xdc\xa8\x07\x05\xe1\xc6\x35\x97\x84\x1b\x57\xec\x68\x99\xd4\x60\x94\xae\x68\x53\xff\x7f\xe3\x64\x49\xa7\xf5\xc9\xc2\xff\xe6\x44\x51\x18\x7e\x6f\xa2\x4c\x37\x77\xe7\xf6\xe6\x89\x32\xde\x3c\x56\x57\xe1\xc6\xb8\x92\xe1\xe6\x89\x30\xd9\x8c\x7c\x16\x6e\xd4\x49\xef\xc2\xcd\x7a\xcb\x6d\xf8\xbf\xd6\xbc\x5b\x53\xc1\x1e\xda\xaa\x7d\x58\x37\x86\xec\xd4\x6d\x21\x47\xeb\xa6\x63\x9f\x4a\x56\x91\x01\x6b\x72\x7a\x69\x00\x08\x8b\x7d\x41\x9f\xde\x1b\x0f\x20\x8e\x90\xa7\x15\xec\x18\xaf\x12\x4d\x43\x3a\x62\x4a\x08\xfb\xde\x18\xc7\x5c\x5e\xe5\xa8\x91\x20\x12\x6c\x38\x11\xcf\x0c\x44\xfb\x4a\x27\x20\x5a\xb5\x2a\x26\x82\x4d\xdb\x1a\xa8\xd1\x6f\x0f\x46\xec\x36\xdb\xb6\xcd\x84\x7d\x36\x2d\x8a\x7c\x9c\x16\x5b\xbc\x10\xf1\xcc\x59\x2b\x6d\x50\x3e\x1c\xb1\xf6\x88\x0d\xa4\xfa\xb5\x7b\xdb\x08\x53\x91\xc4\xa2\x3d\x89\x23\x83\x58\x4b\x1f\x97\x39\x8b\x2d\x49\x31\xa4\xaa\x2e\xab\x93\x52\x74\xdd\xb5\xd2\x6d\x8c\x2b\x2c\x8b\x25\x7a\x3d\x85\xe3\x4c\x98\xf9\x0d\x67\x98\xf3\x9a\xf2\x54\x49\xaf\xd5\x59\xe4\x1c\xb3\x40\x58\xef\xf3\xd9\xe4\xbc\xa5\x22\xbc\x72\xaa\x53\xca\x39\x9a\x88\x68\xe4\x54\xe7\x95\xf3\x96\xe6\x85\xf7\x8b\xba\x52\xce\x9c\xaa\x51\xc0\x79\x9d\x8e\x72\xb8\x87\x05\xbe\x74\x98\x26\xd2\xa9\x5a\x0b\x9c\x0f\x6c\x22\xd9\x38\x60\xc2\xa9\x5a\xde\x9d\xd3\x50\xc6\xe5\xeb\xc2\x00\xef\x9c\xc4\xd3\x0c\xbe\x3a\xa3\x9d\x67\x2c\x34\x1f\x2c\x9f\x9a\xc0\xcb\xfd\xcf\xf5\x64\x5d\x37\x15\x3e\xff\xa7\xa7\x82\x56\x88\x7e\x3d\x17\x2e\xcd\x80\xbe\x99\xea\xc4\x1a\x95\x25\xf2\xbc\xba\xc8\x62\x01\xce\x98\xde\x9a\x33\x70\x0e\xd4\x03\x56\xaf\xc3\x8a\xd1\xeb\x75\x4f\x37\xf2\xb8\x3e\xf3\xc7\x71\x9f\x8e\x1a\x4a\xc5\x69\x24\x57\xaa\x15\x99\xf6\xd4\x8f\x92\xc9\x88\xde\x39\x6a\x03\x8a\xc3\x9b\x55\x8b\x46\x17\x6d\xf7\x23\x3a\x8a\x87\x0d\xfb\x47\xd6\x63\xe5\x72\x5f\x2e\x15\x9a\x1c\x60\xeb\x01\xea\x6b\xb5\xdc\x5a\xc2\x51\x9c\xb0\xc6\x38\xdf\xe2\xf4\x8e\x12\xa2\x0f\x53\x7b\x37\xb9\x75\x56\x2f\x2d\x85\xd9\xa4\x63\xce\xf1\xa6\x7d\xb8\x0b\x41\x07\xa6\x4c\x2b\xc0\x5a\x8b\x6f\x28\x3a\x69\xc4\x4b\x4a\x34\x9f\xaa\x00\x4e\x64\x7b\xaf\xa1\x98\xcc\x75\x9a\xc8\x68\x70\x97\xb7\xad\xb6\x6e\x67\xe8\x9b\x1a\x53\x4f\x15\xd6\x4f\x9d\x7c\xb8\xd7\xd0\x39\x88\x63\xb9\xba\x07\xc6\xa3\xf6\x4e\xa3\xbe\xc3\x26\x69\x18\xb2\x24\x51\xdb\xfa\x86\x8e\x79\x4a\x79\x68\x94\xd1\xea\x7e\x5d\x41\x39\x11\xd1\xb8\x50\x6e\x67\x21\x7a\x5e\x41\xf1\x81\xc9\xc6\x33\x2a\xd9\x83\xf3\x68\xcc\xac\x4d\x7b\x7d\x87\xd3\xf0\xa6\x2f\xe2\x89\x3d\xcb\xf2\x19\xef\xe7\xe0\x66\xd6\x85\xa3\x68\xe2\x80\x23\x58\x28\x91\xa7\xef\x9d\xf0\x70\x31\x25\x27\x71\x12\xe9\x3b\x29\xc1\x19\x44\xb7\x1b\x66\x97\xae\x28\xd7\xef\x7e\x41\x4f\x49\x8a\x65\x2f\xde\xbc\xb3\xbf\xda\x2c\xc3\x9d\x6e\x96\x2a\xbe\x6f\x96\x0b\xce\x97\x17\xb9\x88\x67\xc9\xf2\xfa\xfe\xb2\x19\xcf\x59\x68\x87\x8d\x3c\x95\xfa\xa8\xce\x67\x74\x61\x69\xcb\x0e\x68\xbf\x84\xc3\xfa\x91\x74\xd4\xd0\x69\xa3\xf7\xdf\xb1\x15\x08\x6b\x5a\x3c\x57\x58\x54\x3f\x96\x01\x9d\xc1\x7b\x37\x78\xd7\x13\xfe\xdf\xc4\x3a\x43\xc3\x6d\x45\xda\xb5\xf1\x7f\x58\x75\x9c\x31\x73\xd9\x41\xcd\x9a\x75\x6c\xb5\x55\xc7\x20\x88\x32\x06\x41\xd8\xc6\xc3\xa7\x12\xb6\x94\xd2\xd9\x6c\x8a\x9e\x43\x1c\x5f\x3d\xcf\xe7\xa2\x57\xbe\xfb\x0f\xc7\x77\xb6\xf4\x1f\xa2\xbb\xcb\x04\x6b\xb9\xc1\x97\x4a\x85\x27\x9b\x2b\x04\x41\xde\x32\x74\x4b\x6b\x76\x59\x13\xfe\x90\xd7\xe5\x2d\xd5\x5e\x8d\x83\xb0\x42\x2f\x67\xe8\xc7\x14\x6e\x75\x2e\x63\xdd\x54\xdb\x18\x1c\x66\x96\xd3\x6a\xda\x93\xc6\xff\x39\xcb\xaa\xcc\xbf\xe2\x06\xb1\x71\x82\xb9\xc1\x23\xed\x06\xa3\x2e\xdd\xd5\xe6\x06\xea\x86\x7b\x15\x11\xef\xff\xb3\xdb\xd4\x64\xbb\xfa\x5d\x27\xef\x77\x36\x69\x4a\x7a\x3b\x33\xac\x56\xac\xd4\x87\x7e\xb1\x85\xc9\xdf\xd9\x3d\xd8\x7f\x81\x45\x5b\xee\x83\xcd\xda\xd6\xb3\x55\x2a\x4b\x21\x5c\x66\x71\xc8\x35\xd1\x72\xc2\x78\x3f\xe2\xc3\x25\x69\x8d\xdd\x4e\xb4\x3f\xd7\x3e\x7d\x58\xe5\x8c\x1f\x56\xcc\x92\x72\x1c\xae\xb7\xeb\xef\x54\xa7\x9e\xd0\x31\xf3\x1b\x66\xf7\xd3\xfe\x85\x60\xbc\xd4\x21\xbf\xc2\x71\xd4\xef\x0b\x96\x24\x15\x34\xf4\xc5\x92\xe2\xfa\x31\xac\xd8\xb9\xc6\xc6\xce\xf5\xde\xb8\x7f\x7f\x74\x6d\x9b\x61\xd6\x8c\xd3\x6d\xf3\xa4\xb7\xe9\x51\x9a\x48\x26\x1a\x1f\x74\xbe\x5a\x53\x93\xe5\xf8\xd7\xb9\x43\x6c\x4f\x89\xe3\x98\x75\xa6\x1d\x51\xab\x94\xa5\x41\x2c\xc6\x99\xde\x5f\x91\x55\xcb\x36\x86\xf1\xa8\x9d\x8c\x2b\x46\x4a\xd3\x5f\xce\x52\x17\x65\xa0\x1d\xaf\x3e\xb1\x55\x0f\xa0\x2a\xec\x7f\xae\x62\xd3\xee\xdf\xa9\x3a\xb7\xba\x22\x4e\x4c\xc4\x66\x40\xfb\x43\xe6\xc0\x96\x57\xe9\xb1\xf5\xa1\x12\xa6\x40\x2e\x3e\x39\x2b\xe3\x26\x32\x98\xdc\x3d\xb7\x3a\x5a\x22\x03\xca\x2c\x1d\xda\xe6\xb8\x40\x8a\xca\x17\xa3\xcc\x19\x9c\x07\xda\x32\xa5\xa0\x6e\x0f\xe0\x24\xcd\x82\xeb\x8a\x86\xff\x8e\xf2\x26\x18\xed\x2f\xa9\x6e\x09\x93\x6a\x3f\x5d\x5e\x48\xfd\x28\xa1\xc1\x48\xaf\x24\x24\x6d\x26\x50\x9d\x4e\x6c\xcd\x74\x62\xff\xca\xe9\xf4\x8e\x31\x51\x0e\xea\xf6\x9a\x41\xd5\x5d\xf4\x3a\x04\x9e\xcf\xa7\xaa\xff\x6c\xc9\xd2\x90\x2d\x9f\xe1\xb4\x5c\x3e\xf6\xb2\xf9\xaf\x93\xfd\x71\x22\xa3\xdf\x5e\x07\xe7\x89\x62\x2b\x9f\x8b\x6e\x56\x7c\xc0\xa5\xef\x71\x16\xf9\xfd\x0d\xd6\x2d\xf9\x4f\x4c\x24\x5a\xba\xe4\x26\x50\x49\xed\xc8\xff\xa2\x06\x3c\x11\x94\x87\x57\xbf\xd9\x00\xe1\xd2\xb7\xab\x36\x8a\xff\x64\xd5\x69\x34\xea\x2b\x6d\xe1\xf7\x6b\x3f\xf9\x17\xd7\xfe\x31\x61\xe2\xf7\x6b\x3f\xfd\xd7\xd5\xfe\x22\xce\xc6\xf4\xf7\x6b\x7f\xf4\xaf\xab\xfd\x8c\x4d\xa3\xbf\x55\x79\xf0\xec\x5f\x57\xf9\xdf\x6d\x78\xf0\xd5\x36\x7f\x68\x6d\x9c\x9e\xbb\xc1\x04\xec\x15\x5e\xd9\x20\x63\x3e\x88\x86\x39\xfa\xf3\x82\x98\x49\x7b\xaf\x74\x00\xd3\xf0\x46\x51\xce\xfb\x0e\x38\xff\x36\x78\x38\x78\x38\x78\x5c\x7c\x1c\xc4\x5c\xb6\x07\x74\x1c\x8d\x94\xf0\x38\x8e\x79\x9c\x4c\x68\xc8\xca\x06\x7e\x2f\x6b\xe3\x16\x71\x97\xcb\x67\x2b\x5e\xda\xa6\x4b\x73\xb2\x88\x70\x37\xfc\x94\x07\xe5\xd0\x22\x28\xa7\x08\x82\xd1\x76\x87\xef\x14\xf4\xc9\xd1\xf0\x0b\xac\x16\x7a\x79\xdc\x1e\xa6\x52\x32\x91\x94\x64\x9d\xe8\xaf\x77\x53\xe4\x0c\x22\x36\xea\x27\x4c\xda\xfd\x7e\x1c\x89\x44\x36\xfa\xf4\xae\x11\x0f\x74\x18\xdd\x8c\xb1\x9b\x62\x14\x66\xda\x62\xf5\xb6\xbf\x5c\xf6\x0e\xdd\x4e\xc1\x79\x1b\xf3\xbe\x92\xa5\xb7\x52\x37\x9c\x42\x2a\x31\x98\xf7\x1f\x52\xf3\xbe\x63\xae\x8a\xd3\x9f\x0c\xc6\x17\xd5\x79\xa2\xad\x96\x2b\xad\xbd\xce\x49\x2c\x99\xdf\x38\xbf\x8a\x92\x86\xda\xab\x22\x3e\x6c\xa8\x47\x3a\x35\x39\x07\x47\x71\x48\x47\x8d\x44\xc6\x82\x0e\x99\x22\xfe\x2e\x4e\x45\x23\x50\xaa\xaf\x91\x62\x0b\x63\x49\x2d\xbe\x68\x86\xc2\x3e\xfc\x0c\xc1\x68\x9b\x67\xf5\xdb\xe0\x56\x3a\x1d\xff\x34\x6a\xc8\x2b\xa3\x84\x3c\xd5\xb1\x78\xc1\xab\xe5\x18\xaa\x5d\xdf\xa4\x70\x12\x76\x46\xa1\x57\xdb\xc0\x40\x02\x85\xad\xad\xb4\xb8\x7c\xbd\x2a\x30\xb0\x7e\x5d\x3a\x38\xa1\xe6\x20\xff\x02\x71\x37\xdc\xae\xdf\xd9\x3e\x43\x57\x53\xd5\x80\x5d\xa0\x2e\xad\x5f\x0c\x29\x8d\x76\x19\x7e\xc7\x2e\xfd\x6e\x0e\x0c\x9f\x83\xbe\xb4\xec\xd6\x04\xb7\xbf\x5a\x33\x7b\xc6\x81\x5a\x0b\xd5\x79\x93\xbd\xdf\x71\x30\x98\x29\x34\x92\x4c\xb4\x03\x2a\xda\x45\xbc\xa5\x3d\x99\x46\x99\x0a\xa1\x86\x3a\xed\x67\xa7\x1b\x29\xbc\x65\x7a\x4d\x4f\xe1\x3e\xfc\xaa\x0f\x3e\x19\xc1\x02\xee\xd0\x68\x1b\x8e\xb6\x75\x76\x2c\xb8\x45\xfd\x6d\xb8\xdb\x06\x73\x65\xed\x25\xc6\x35\x97\x84\xc6\xf8\x92\xc1\x2d\x4a\xb6\xe1\x46\xea\xa4\x3e\xdc\x0d\x23\xf5\xe7\x1b\xae\x07\xa9\x56\xa0\x6f\x25\x52\x1d\x05\x5f\x47\xe0\x38\x15\xf8\x87\xeb\x87\x60\x32\xb5\x73\x86\x54\xa5\x88\x2f\x05\xff\xe0\x74\x1a\x28\xc1\x47\xff\xd3\x96\xf1\x70\x38\x62\x4a\x7e\x6a\x8f\xfb\xf9\xcb\x91\x36\xe4\x16\x71\x21\xe3\xa0\xbd\xdf\x98\xc8\xf6\x6e\x63\x12\xb4\x77\xeb\xd1\x27\x41\x2c\x65\x3c\x76\xc0\xe9\x4c\x6e\x1b\x49\x3c\x8a\xfa\x0d\x31\x0c\x28\xf2\xa0\x61\xfe\x73\x3b\x3b\xfb\xb8\x1c\xa6\x33\x8b\xad\xd6\x2c\x8f\xb6\x15\x25\x23\x25\x10\x94\xf7\xf3\x28\x88\x8a\x92\x32\x62\x42\x8e\x29\xa7\xc3\x72\x00\xb7\xeb\xa5\x39\x9d\x96\x02\xd7\xc7\x6d\xc4\x31\xfc\xdc\xc6\xab\xc4\xe6\x32\xa5\xdd\x8e\x5f\x1d\xc5\x4c\x58\xac\xf5\xf5\xd2\x06\x12\xf1\x51\xc4\x2d\xa3\xed\x72\x8b\xd6\x38\x1c\x6b\xf1\x1d\x9c\xcd\x2a\x4c\x85\xcd\x1a\x76\x0c\x89\x92\x14\x8d\xc0\x58\x91\x19\x3f\xd5\x2c\x72\x75\x77\xf3\xb7\xda\xf7\xfa\x19\xb4\x1f\xb5\xef\xf5\x43\x68\x4f\xec\xd9\xf4\x82\xa3\x48\x54\x8d\x34\xef\xc3\x4a\x2e\xa5\xca\x62\xcd\x9e\x3e\x6d\x83\x8e\x22\x0c\x5f\xe0\x96\x33\x8a\x82\x07\x41\x1c\xcb\x44\x0a\x3a\x69\xef\xb9\x9e\xeb\xb5\xe9\x68\x72\x45\xdd\x83\x76\x3f\x4a\xe4\x83\x30\x49\x4a\x00\x77\x1c\x71\x37\x54\xaa\xcb\xa7\x50\x0d\xe6\xa7\x6d\xe0\x06\x87\xde\xe3\xe8\x8c\x25\xf1\x98\xb5\xf7\xdc\x87\xae\xa7\x4b\xda\xaf\xcb\xc2\x3f\x6a\x85\xd9\x68\xdc\xee\x53\xc9\x26\x51\x78\xc3\x84\x2e\x58\x7d\x65\x8a\x7d\x0b\xeb\xda\x84\x51\x1c\xbe\xab\x5d\xfd\x31\x08\x37\x9c\xa9\x3f\x23\xdc\x2d\x12\xfc\xde\x8b\xe2\x49\x2e\x25\xfd\xa5\x16\xf7\x2d\x5e\x06\xab\x5e\x86\xd6\xcb\xa5\xbe\x7d\x1b\x22\xe1\x86\x9f\xea\xb2\x47\xb6\xa4\xd4\x52\x2d\x78\xe3\xcb\x50\x1b\x5e\x4a\x16\xb5\xe3\xeb\x1b\x15\x1a\xa2\x98\xd6\xe6\xb7\x5c\x54\x3e\xf3\xe2\x73\x34\x40\x3b\xda\xfc\xa6\x48\xca\xcb\xd6\xca\x94\xe9\x68\xb6\x4d\x32\x79\xc5\xe9\x97\x10\x04\x05\x02\x7d\xaa\x77\x3d\xd6\xca\xf2\x5b\xd4\x91\x87\x56\xd2\x98\xe2\x65\xa0\x6f\x6d\x29\x04\x18\x93\x2d\xff\x5d\x08\x7f\x86\xf0\x22\x84\xaf\x21\xf0\x18\x44\x0c\x32\x06\x16\x43\x1a\x93\x67\x1c\x39\xe7\x34\xb9\x71\x30\xd0\x78\x5d\x5e\xac\x34\x46\x45\x6a\xac\x2c\x83\x56\xfd\x1a\x28\x3d\xad\x4f\xf5\x96\x15\xbe\x83\xca\xe5\x74\xfa\x86\x16\xf4\x45\xe8\x8b\xe6\x8a\x43\x5a\x02\xdd\xd3\x59\x79\x97\x0b\xb3\xae\x31\xa3\x84\xc1\x89\x58\x75\x67\x54\x3c\x49\x24\x95\xcc\x01\x89\xe1\x3f\x4e\x84\xcb\xe9\x34\x1a\x52\x19\x0b\x37\x4d\x98\x38\x1a\x32\x2e\xcb\xfb\x8b\xce\x45\xd4\xd7\x66\xbd\x66\x73\x25\xb6\x2b\x9a\x5c\xe5\x81\x57\x12\xaf\x3e\x69\xd6\x15\x6e\x28\xc5\xe8\x4f\x76\x37\x9f\x0b\x77\xcc\x24\xcd\x1e\x93\xab\x68\x20\xf5\x73\xe7\x50\xed\xcf\xa9\x94\x31\x9f\xcf\xb9\x2b\xa9\x18\x32\xa9\x4f\x67\xc7\x33\x3e\x8a\x69\x7f\x3e\x47\xc2\x9d\x08\x7d\xc3\xf2\x33\x33\x17\x10\xd6\xc2\xc9\x95\x60\x03\x10\x44\x75\x0d\x70\xf2\x9c\x21\xa9\x4f\xf7\xa0\x14\xf1\x66\x53\xb8\xc1\xcc\x4c\x17\x7d\x6f\x69\x10\x98\x1f\x81\xfe\x91\xb8\xd4\xfc\x4c\x5c\xda\x2b\x1c\x03\x7e\xe6\x89\x90\x0b\x73\x5c\x05\xc2\x27\xfe\xca\x08\x3f\x7d\x5f\x2a\x07\x5d\xb1\x54\x70\x8f\x7d\x73\x1f\xea\x43\x5f\xdf\xa2\x1a\xee\xa8\x7f\x77\x34\x8a\x38\xcb\x5f\x1f\xeb\x87\xdd\x05\x44\x31\x19\x71\x18\xc4\x84\x72\x08\xf4\x4b\x6f\x01\x89\x7e\x68\xef\x2c\x60\x14\x93\x24\x86\x69\x4c\x46\x31\x6c\xaf\x9b\x52\xf7\xf4\x99\x3f\x8d\x81\x3e\xd7\xc1\xf0\x2f\xfd\x20\x06\xfa\x4a\xfd\x0d\x63\x5f\x00\xfd\xee\xbf\xd1\x59\xdd\xe8\x63\x3f\x4b\xa6\x46\xbf\xf9\x8e\x03\xa1\xf4\x8f\x81\xee\xe8\xd3\xd8\xc7\x3e\x87\xf0\xa5\xc2\x12\xdc\xf8\xc7\x10\x8c\x74\x56\xb3\xa7\x5a\x54\xd1\x1f\x83\x3b\xff\x3e\x2b\xa6\x7f\x6a\xa8\x9f\xea\xcf\x53\x9d\x9e\xed\xa5\x2e\xa0\x4f\x4c\x40\x70\xee\x7b\x8b\x05\x86\x71\xde\x9e\xab\xf5\x94\x3f\xd5\x34\x6b\x6a\x4b\x42\xb7\xfd\x71\x0c\xe1\x54\x11\xbf\xe7\x3f\xd3\x99\x53\x4d\xad\x6f\xfc\x20\xd6\xb7\x12\xc4\xe4\x3e\x38\x53\x3f\x60\x12\xff\x46\xfe\x55\x0f\xda\x79\x1e\xd5\x98\x30\x0e\x77\x31\xb9\xa7\x43\x45\xf8\x77\x4d\xf7\xad\xfa\x7b\xa6\xfe\x7c\x50\x7f\xce\xd5\x9f\x97\x3a\xe5\xdb\xad\x6e\xc3\xc1\x02\x6e\xf4\xc3\xce\x02\x8e\xf2\xa1\xfb\x10\xaf\xbf\x1a\xe1\xc0\xbe\x1a\xe1\x79\x3e\xea\xd7\xfa\xe1\xd1\x02\x9e\xe6\x58\x5f\xc5\x1b\xee\x40\x44\x1c\x49\x73\xae\x32\x26\x2b\x53\x0c\x2e\xdf\x7c\xca\xf1\xbd\x14\x77\xf7\x27\xa2\xb8\xe8\x95\x88\xf2\x7e\x25\xfb\xfe\x57\xc1\xd4\x72\x42\xfa\xb4\x12\xc6\x0b\xf8\x1e\x93\x8f\x1c\xce\x63\xf2\x9a\xc3\x97\x98\x9c\xc7\x6a\x24\xce\x62\x72\x26\xe0\x78\x3d\x91\xf7\xf4\x95\x2f\x21\xb8\x52\x8d\xbd\x35\xad\x3d\x59\x3b\x20\xc2\x0d\x7a\x3a\xd9\x60\x68\x12\x20\xea\xa4\x40\x6f\xd6\x83\x9b\x3c\x85\xbc\xc8\x51\x78\x12\xc3\xb1\x19\xc5\x8f\x9c\xc4\x2b\x53\xfc\x42\x08\x31\x44\x25\x75\xaf\x7d\x09\xf4\xb3\x1f\x03\xf5\xfc\x14\xe8\x81\x2f\x32\x62\x7f\xfa\x0c\x82\x53\x9f\x42\xf0\xde\x8f\x20\xf8\xe4\xeb\x2c\xe3\x67\xeb\x6f\x84\xbc\x0f\xfa\xaa\xa5\x6f\x15\x86\x77\x4a\x96\xc1\xf0\x39\x5e\x9d\xec\xf8\x21\x04\x3a\xd9\xf1\xb3\x98\x0c\x18\x8a\x30\x44\x9b\x12\x35\x3e\x8b\x61\x86\x06\x26\xd3\x9a\x49\x81\xf9\x3a\x26\x31\x87\x8f\xf1\xe6\xbb\x3e\x62\x4e\x66\xe8\x75\xbc\xe1\x28\xb7\x93\xf2\x89\x88\x43\x96\x24\xac\xef\xe4\xdb\x69\xc0\x50\x66\xae\xcd\xfd\x0b\xd6\x97\xec\xa4\x9b\x93\xa4\x93\x89\x58\x2a\xb7\xb3\x24\xb5\x7e\x8c\x91\xf3\x91\xdf\xf0\x78\xc6\x1b\xf2\x6e\xc2\xfc\x86\xd3\xe2\x78\xa1\x96\x8d\xee\xcc\x3b\x14\x41\x99\x70\xe5\xc9\x9d\x03\x9f\x63\xa4\xbe\xe9\x0f\x79\xaa\x96\xa5\xf7\x66\x5b\x8a\x39\x04\x0c\x9d\x09\x9d\x77\xe5\x67\x6c\x90\x99\xec\x02\xb7\xb1\xfa\xb4\xa2\x7b\x82\xb1\x1e\x1a\x0c\x2f\xe3\xdf\xbc\xb7\xe4\xed\x86\xc9\x5d\x4b\x2b\xaf\xc0\x3f\xc5\xd5\xeb\x1c\x79\x91\x46\x79\x7d\xa2\x37\x9a\xdd\xaf\x97\x9d\x80\xc1\x76\x9e\xbd\xec\x00\x54\x76\x61\xac\x0e\xe7\xd6\xbb\x3f\x70\x92\x2a\x79\x10\x5b\x65\xb5\x4e\x91\xdf\x93\x60\x19\x99\x5f\xc6\x20\xe7\x73\x66\x22\xcc\x2b\xdf\x74\x2e\xab\xfc\x9b\x92\x72\xd4\x62\x87\x6f\x31\xf9\x14\xc3\x8f\xdf\xed\xa1\x27\xf1\xa6\xd4\xfb\x26\x49\xd6\x50\x2d\x27\x69\xd2\x63\xe9\xb5\xf1\x3e\xe7\xfd\xef\xe2\xf5\x09\x42\x7f\xc4\xf0\x3e\x86\x5b\xf4\x24\xb6\x72\x7d\x69\x69\x4c\xb3\xbd\x3f\x63\x82\xde\x85\x9a\x77\x7a\x2b\x33\x15\x9a\x74\x81\x4a\xaf\x55\x4d\xef\xe5\x59\x07\x3d\xc5\x5b\x6a\x99\xbd\xde\x19\x94\x2f\x62\xf2\x5a\xc0\xd7\xf8\x17\x29\xdb\xcd\x90\xea\x5c\x62\xea\x6b\xaa\x24\x4f\x9d\x9e\x90\x42\xbb\x53\x24\x33\xd6\xf5\x7d\x40\xa9\x4e\x8f\x71\xe8\x65\x29\xc3\x18\x84\x7e\xaa\x53\x86\xa5\x3a\x65\x98\x50\x7d\x22\x81\xfa\xa9\x4b\x17\xb8\x9b\x12\xc4\x88\x46\xb4\x83\x7b\x28\x4b\xeb\xde\xea\x80\x24\xb2\xd5\x81\x0e\xf6\xb3\x77\xd4\xe4\x79\x6f\x75\x30\xa4\x7a\xd4\x5e\x8b\x55\x9b\xc0\x8a\x5e\x99\xa2\xaf\xb1\x11\x5f\xed\x0c\x6a\x6a\x55\xf2\x48\xb5\x5e\x44\xff\xcd\x53\x38\xd0\x36\x97\xb0\x32\x09\xcd\x39\x85\x14\x4c\x3a\x74\x05\xa2\xc9\xa3\x3a\xb5\x0f\xd3\x0d\x94\xd1\xea\x9c\xcd\x22\x82\xa9\xbe\x61\x04\x58\x44\x7e\x40\x1a\x91\x38\x05\xba\x12\xd8\xd3\x47\x65\x4d\x7c\x57\xb3\xb9\xf5\xe0\xe2\xaf\xe4\x36\x88\x2f\x1f\x98\x33\x57\x5c\x5f\xcb\x48\x5a\x1c\x13\xc2\x75\x0e\x2e\x93\xc9\x39\x8c\xc8\xaa\x64\x94\x8f\x0f\x4d\x42\xc8\x55\xa9\x24\xa9\x6c\x8c\xe3\x44\x36\x1e\x6f\xcc\x24\x99\x5f\x6c\x1f\x21\xc7\x73\x15\x67\x5c\x97\xc8\x72\x30\x8a\xa9\xac\xa5\xb1\x64\x11\xea\xb0\xdd\x3f\x84\xce\xa2\x60\x67\xa4\x84\x38\x22\xa9\x84\x28\xfa\x45\x9a\xfb\x46\x1c\x21\xde\x3a\xf0\xfe\x10\x7f\x1c\x78\x7f\x74\xd8\xae\x7a\x46\xb2\x4d\xb1\xfe\xa1\x90\xb3\x96\xbe\xb0\x6e\x50\xd9\xa0\x68\x79\xeb\x57\x48\x38\xc4\x44\xac\x9f\x17\xe1\xdf\x63\x6d\x26\x79\x9f\xda\xbc\x85\x1b\x2e\xf3\xb5\x74\x15\x5f\x4b\x0d\x5f\x33\xb9\x35\xb5\x5d\xaf\xce\xdd\x52\x98\x21\x0a\x86\xff\x99\x3b\xcd\x21\x88\xd6\xdf\x60\x34\x30\xf3\xc9\x48\x12\x49\x44\x82\x08\x46\x9b\xc1\x9f\x14\xe0\xfd\x68\x29\x3b\x75\xc6\x90\xd7\xf7\x11\xcb\xfa\x68\x6d\xff\xf0\x95\xab\x45\x6a\xf6\xaf\x17\x2e\x86\x69\x44\xfa\x11\x6c\x47\xbf\x79\x6b\xd7\x38\x22\xcf\x04\x5c\x45\x24\x48\x61\x18\x91\xb7\x30\x89\xc8\x11\x3c\x13\xcb\xf3\xbc\xd8\x4e\x8c\x49\xd7\xd8\xe1\xcd\x1d\x00\xc5\x95\x15\xa3\x14\x51\xeb\xa2\x3e\x51\x36\x6d\xb7\xb3\xbf\x7b\xc0\x0e\xfe\x40\xac\xdd\x79\xfc\xd0\x53\x8a\x58\x96\xb5\x00\xa5\x87\xbb\xf3\xf9\x56\x3f\x45\x0c\xf7\x68\xbb\xe3\x53\xdc\x42\x53\xf5\xab\x3d\x4d\x91\x06\x2e\xa3\x75\x52\x35\x51\x45\x4b\xe2\x45\x26\xc0\xa4\xf5\xac\x15\xbb\x9d\x43\xda\xd3\x74\xf8\x22\x97\x5f\xac\xcb\x20\x1e\x1f\xd2\xf9\x7c\xe7\x31\x21\x84\x36\x9b\x59\xa5\x39\xf4\xce\xc1\xc3\x47\x7b\x6c\xbf\x6e\x4c\xad\x60\xdc\xf7\x1e\x3f\x3c\x28\x60\xca\xdc\x17\x9e\x05\xf3\xf0\xe1\xc3\x03\x76\x50\xb7\x96\x57\xd0\x74\xbc\xdd\x83\x47\x05\xcc\xc1\x4a\x34\x9d\x5d\x6f\xef\xa0\xa4\xe7\xe1\x6a\x44\xfb\x07\xbb\x16\xd1\x8f\x56\x03\x3d\xda\xed\x1c\x3c\x2a\x80\x1e\xaf\xac\x6e\xc7\x7b\xfc\x78\x7f\xa7\x00\x2a\xd3\x6e\x54\x50\xed\xec\xee\x3f\x7a\x68\x41\x75\x56\xe3\x3a\xd8\x39\xd8\x2f\xbb\xa9\xb3\xb3\x1a\xd7\xa3\x47\xfb\xa6\x33\x6b\xc2\xa2\xcd\xf0\x74\xb4\xb0\x66\x78\xd7\x12\xa5\x26\x29\xe1\x62\x01\x33\x34\x8a\xac\x3f\x69\x84\xfa\xe8\x7b\x9e\xaa\x31\x4a\xd1\x1e\x86\x24\x45\x4e\xdb\xc1\xd6\xcb\x1d\xfb\xa5\xfe\xad\x3e\x6e\x58\x2a\x3b\xf6\x52\xb9\x8b\x7e\xff\x32\xb7\x5c\xa6\x10\xd9\x2d\xbe\x3a\xb5\x9e\x5a\x33\xc8\x03\xe9\xd2\x5c\x56\xdb\xb2\xae\x9d\x60\x44\x5f\x45\x85\x18\xa1\xd8\x2d\xae\xd3\x60\xd9\x7d\x2e\x33\x34\x8b\x40\xe8\xbb\x7c\x41\x9a\x6b\x5c\x6e\x23\xb2\x9d\xc2\xcd\x06\x95\x81\xff\x81\x14\x53\x6f\x99\x3b\x17\x56\xec\x5d\xb5\x7c\x9a\x95\x2e\xb5\xff\x98\xf4\x97\x51\x96\xaf\x27\x45\xce\xb9\xb3\xdc\xad\xfe\xaa\xbe\xf6\xad\xbe\x86\x4a\x3d\x06\x67\x2a\x0d\x9c\xab\x10\x86\x11\x56\xef\x3c\xe3\xa9\xcc\xa1\xa7\xd1\x9a\xfc\xf4\xaa\xdc\x37\x43\x48\x7d\x32\xdc\x44\xbf\x2a\xdf\x31\xe5\x5b\xa6\xfc\x4a\x98\x76\x06\xa3\xe6\xcb\xe5\x2f\xda\x56\x7e\x4f\x23\x34\x40\x51\x04\x1c\xbc\xfc\xff\x58\x49\xa8\xe6\xe8\xf4\x33\x81\xe1\x43\xb4\x41\x4b\x56\x42\x72\x45\x3e\x7e\x1e\xfd\xad\xeb\xdc\x8a\xdb\x04\xad\xdb\xd2\x74\x78\x94\x39\xec\xad\xa7\x52\x28\xf5\xc5\x18\xf6\xad\x68\xf9\x0a\xac\x83\x3e\x8f\xea\xa0\x8a\xa6\xeb\x15\x6b\x86\x70\xa4\xa8\xef\x40\xe8\x1f\x43\xdf\xef\x40\xe0\x7b\xaa\x11\x3a\x41\xc2\xa2\x22\xd0\x6c\x4b\xa4\x31\xab\x2d\xfe\x18\xeb\x2c\x2d\x6a\x07\x5b\x60\x78\xba\x0a\xf1\x0c\x5d\x47\x7a\x5b\x5d\xc2\xf1\x9d\xc1\x38\x05\x2d\xf8\x18\x24\x54\x21\x79\xbb\xac\xee\x9a\x4d\x48\x27\xa7\xb1\x91\x28\xad\xb4\xcc\x4d\x2d\x33\xad\xd4\x0f\x98\x41\xa5\x95\xd3\x57\xd1\x1a\x15\xa0\xe0\x12\xed\x4e\xfd\x4a\x31\x60\xe6\x76\xa0\xd3\x68\xfd\xf5\x36\x3a\xab\x64\xea\x6e\xcf\xe7\x4a\x69\x28\xde\x30\xf5\x86\xb9\x34\xcb\x46\x99\x25\xee\x34\x69\x3b\xed\x34\x8e\x25\xb2\xec\x2e\x1c\xa6\x93\x76\xb2\xa5\x6c\x8f\x4c\x8b\xd0\x45\xb6\xc7\x3c\x9d\x26\x75\xfb\x40\xad\x3c\x8f\x0a\x5b\x08\x69\x96\x66\x32\x26\x4a\x57\x89\x48\xaa\xf3\x81\xa6\xfa\x0e\xd5\xd4\x65\xdd\x3a\x95\x35\x92\x62\x88\x8a\x74\x96\x9a\x3a\x8a\x73\x02\xc3\x1a\x59\xea\x7b\x9e\xa8\x33\xa3\xb0\x68\x41\x41\x94\xc2\x47\x35\x51\x18\xbe\x47\xe4\x29\x9c\x47\xab\xef\xb0\xa9\xdf\xfe\x9a\x55\xa2\x00\x92\x18\x92\x18\x17\x97\x6b\xd2\x82\x15\x4b\x9d\x24\x53\xea\x24\x99\xd2\x65\xdd\x22\x1f\xd6\x77\xd5\x94\x14\xd7\x97\xd4\x14\x9d\x46\x99\xed\xe8\x0e\x9d\x9b\x11\x08\x31\xc4\xf5\x90\x36\x5d\x77\x96\x7c\x54\x7d\xad\x6d\x6e\x16\x9a\xb0\x44\x14\x63\x2d\xbd\x7d\x59\xd5\x3e\x49\x0a\x30\x73\x19\x8f\x1e\x82\xfc\x38\x6f\x79\xbb\x67\x35\xc5\x68\x9e\xe5\x53\x35\x50\xba\xfa\x06\xa0\xb3\x68\x8d\xa1\x68\x27\xb3\x43\x1d\xad\xd6\xab\x22\x06\xc3\x14\xce\x22\x73\xd3\xc9\xf1\x1a\x2c\x8f\x21\xd4\x48\x4e\xd6\xd5\xb2\x0f\x43\x63\xed\x12\xab\xaa\x39\xd1\xec\xfa\x38\x42\x37\x12\x9b\x43\xbc\xb7\x4a\x6a\xd6\x5c\xf3\x4d\xa4\x74\xfb\xfd\x05\x7c\xae\xf4\x4f\xc5\xbe\xd0\x58\x5a\xf4\x9c\xcc\x50\xa4\x15\xdc\x7a\x9a\xe1\x80\x21\x96\x65\xbf\x80\x0c\xca\x54\x2f\xe1\x38\x42\x4c\x7b\xdd\xb8\x15\x6c\xaf\x39\xc5\x33\x93\x8a\x59\xb3\x07\x9d\xbe\x07\xde\x44\x9a\x67\x6d\x30\x7a\x18\xab\xdd\x1d\xfa\x1c\xe5\xa6\xbb\x37\x91\xbe\xb3\x29\xbb\xd2\xf8\x59\x44\x0a\x43\x56\x9a\x38\x70\x26\x8c\x61\x8b\xf1\x7e\x72\x24\x1d\x78\x6b\x7e\xa6\x13\xc5\x9d\xfa\xd6\x9b\x44\x52\x21\x6d\x90\x41\xc4\x87\x4c\x4c\x44\xc4\xa5\xb6\x7a\xe9\x97\x79\x02\xe2\x44\x9b\xcd\x7e\xe6\x66\x33\xca\x79\x2c\xb5\x71\x37\x71\xe0\x48\x9b\xd3\x6e\xd1\x53\x70\x86\x8c\x33\x41\x65\x2c\x3e\x9e\xbd\x71\xe0\x99\xd0\x5f\x6e\xa4\x29\xa4\x33\x2d\x14\xf0\x01\x43\x1f\x8b\xe4\x85\x18\xc3\xeb\xac\x21\x3a\xd7\x8a\xa9\xee\x59\x84\xab\x54\x38\xf0\x33\x5e\x83\xeb\x58\xdf\x27\x04\x1f\x57\xee\x8c\x84\xa3\x2a\xef\x97\xbe\xb9\xb8\x56\xe9\x3e\xaa\x17\x7f\x6e\x50\xca\x4e\x19\x7c\x8c\x74\x9a\x12\xa3\x97\xbd\x5c\x7f\xdb\x62\x99\x93\x1b\x19\x5d\x1a\xc3\xdb\x95\xc6\x03\xc6\xc3\xb8\xcf\x3e\x9e\xbd\x7a\x1a\x8f\x27\x31\x67\x26\xbd\x3d\x7c\x52\xa8\x4f\x31\x7c\xdb\xb0\xbf\x6b\xef\xc4\xa9\x39\xd5\xaf\xcd\x84\x3f\xa2\x2c\xb6\x96\xfc\x87\x03\x3b\x3a\x33\xe2\xd6\x7f\x38\xb0\xab\x9f\x88\x03\x9e\x79\x45\x1c\x7d\xa6\x09\x9e\x44\xe4\x3d\xbc\x8f\x56\x4e\x39\xdb\x26\x95\x12\x89\x04\x46\xf9\x35\x62\xdb\xf8\x9e\xda\xd7\x88\x69\xf9\x94\xcf\xe7\x14\x52\xb5\xf1\x9a\x1d\x24\x75\x29\x84\x9a\xf1\x2b\x66\xaf\x75\xbb\xb0\x5c\x33\x5a\xdd\xd4\x45\x42\xc5\x44\xf5\xf2\x51\xbf\x04\x09\xdd\xcc\x04\x65\xe4\xd2\x77\x51\xf5\x16\x25\xb9\xc9\x04\x75\x8b\xde\x47\xe6\xfe\x0f\x99\x29\xb0\x7f\x46\xeb\xcd\x80\xef\x14\x17\x5c\xc9\xa2\xa6\x11\x5c\xa7\x60\x62\x03\xb4\xf9\x4e\x73\x8c\x4e\x67\x01\x5f\x6b\x8a\x38\x5b\x49\x8e\x11\x10\xb4\x1d\x4f\x66\x06\x31\x8a\xcb\x6b\x03\xc4\xaa\xcb\x34\x18\xc6\xbe\xfe\xb4\x93\x5b\x14\x73\xcb\x5e\xa7\x76\x19\x00\x77\x83\x56\x47\x4b\x74\x6e\xf0\xbc\xd5\xc9\x6f\x05\xf0\xab\xa5\xb8\x4b\xcf\x5a\xf5\xa2\x22\x2f\x56\xde\xc9\x81\x81\x0f\x54\xeb\x1e\x2e\x40\x0c\xd6\x0b\x87\xc5\xbe\x28\x2a\x77\xdb\xe5\x36\x96\x50\xdb\x58\xfa\xda\x77\x5c\xdf\xfc\x74\x5e\xad\x7c\xfb\x33\x0a\x08\x5b\x71\x13\x6f\x79\xfb\x9d\x2c\xb7\x39\x03\x9e\x56\x24\xc6\x05\x06\x39\x58\x33\xac\x5b\x33\x24\x06\x5a\xbc\x73\xb7\x17\x18\xd8\x60\xfd\xf0\xcb\x41\x6e\x5d\x49\x07\x24\x5d\xe3\xd6\xa9\xae\x83\xd8\xb2\xcd\x66\x96\xd9\xd8\xb2\xcc\xe6\x66\x58\xd5\xdf\xb4\x62\x82\x5d\x74\x43\x82\x28\x41\x99\x15\x36\xfe\x95\x15\x36\xb6\xad\xb0\x4a\xee\x31\x16\x74\x3a\x20\xcb\xda\xc9\xb7\x08\x03\xfa\x1a\x92\x7b\xfa\xd5\xe7\x03\x08\x9f\xfa\xe8\x45\x48\xee\xc3\xa7\xfe\x6d\x0a\xe1\xb9\x76\xae\x9e\xf8\xb7\xe9\x02\xbb\xe1\x53\xf5\xe2\x45\xe8\x86\xe7\xea\xdd\x8b\xd0\x0d\x4e\x16\xb0\x66\xe6\x7e\x55\x5f\xcb\xd9\xfb\xff\x72\xf7\x26\xda\x4d\x23\xdd\xc2\xe8\xab\x18\x1d\x5f\xba\xaa\xb3\x6d\xec\x24\x4c\xa2\x75\xbc\x20\x21\x24\x84\x10\xc8\x40\x80\xfe\x38\x59\x25\xa9\x94\x28\x91\x25\x53\x2a\xd9\x09\xe0\x77\xb9\xcf\x72\x9f\xec\xae\xda\x55\x1a\x2c\x4b\x4e\xba\xbf\xe1\x3f\xeb\xff\xd6\xd7\x44\x56\x0d\xaa\x61\xd7\x9e\x6a\x0f\x38\x5b\x51\xd5\x43\xd7\x00\xf8\x8b\xd7\x67\x5f\x68\xae\x76\xcb\x41\xd9\x25\x59\xa0\x8a\xbc\x2d\xd0\x70\x6b\x80\x76\x68\x42\x3c\xfb\xa8\x76\xb7\x2b\xd5\x44\xa9\x3e\x5e\x5b\xac\x05\x6a\x64\x46\x8f\x2b\x6b\xa7\x6a\x46\x78\x00\x02\xbb\x38\x59\x3a\x5b\x66\x68\xc5\x09\xd1\x89\x03\x51\xa8\x6b\x0d\x78\x52\xc1\x6e\x5a\x0e\x43\x35\xdf\x35\x86\xde\x9f\xeb\xa4\x06\x40\xf6\x3d\xe7\x37\xeb\x37\x2d\xf2\xa1\xcd\xdd\x25\x27\x43\x0a\x93\x84\x0c\x29\x05\xa9\xba\xaf\x24\x8f\x18\xe4\x79\x25\xf6\x3d\x2d\xe0\xed\x87\xb0\xef\xc1\xe7\xcc\x90\x3d\xaf\x15\x5a\xc5\x9c\x42\x12\xac\x54\x16\x7a\x05\x38\x07\xc1\x4a\x19\xa3\x33\x23\x89\x5a\xaa\x6e\xa6\x25\xda\x24\xc0\x64\x7d\xf8\x6e\x46\xfc\x46\xf9\xf6\x75\x46\x90\x48\x22\x27\x8b\x56\x84\xb4\xb5\xee\x71\x46\xde\x4b\x92\xe9\x34\x76\xdf\x74\x4a\x37\x37\x58\xc5\xd6\xe4\x23\xb9\xeb\xdb\x5c\xf5\x39\x23\x6e\x88\x23\x75\x43\x8c\xb4\xee\x86\x88\xec\xef\x1e\x11\xb6\x4e\x30\xbd\xc7\x37\x1c\x54\xda\xb6\x4e\x8a\x89\xbb\xc7\xdc\x5e\xdc\xbd\x9e\xf7\x9d\x99\x59\x55\x88\xd1\x70\x56\xff\x99\x53\x88\x16\x46\xb8\xac\x69\xcf\xbb\xff\xd4\x78\x1d\xcb\xea\x32\x07\x92\xbc\x33\xf2\x67\xfc\x4d\xb1\x68\x41\x50\xcc\xb8\x2e\x74\x2c\x56\x4c\xab\x15\x97\xec\x07\xdd\x50\x4f\x56\xea\x3f\x59\xf9\xab\xec\xe3\x96\xb8\x41\xde\x03\xa5\x5a\x59\x0e\xbc\x7d\xc3\x76\xe6\x20\xcc\x26\xf9\x81\xf3\x56\x34\x65\x48\xe9\x98\x6c\x40\xd6\x3f\xe2\xe2\x49\x98\xfc\x2b\xf0\x56\x38\x39\x7a\xcc\xa4\x51\xa5\x90\x38\x71\x7e\x7a\x63\xfb\x3c\x23\xf9\xa3\x35\xb7\xc0\x7b\x6d\xb3\x00\xbc\x33\xdb\x02\x0b\xbc\x81\xed\x07\x0a\x35\x5a\x3f\x2d\xf0\x1e\xdb\x03\x85\x34\xc7\x54\x55\x8a\x93\xbe\xf7\x5a\xd5\x3b\xcf\x88\x7a\x3e\xa3\xaa\xb6\x7a\x1a\xa8\x06\xfa\xad\xfb\x9e\xaa\x66\xf7\x09\x73\xbe\x9c\x69\xb8\xb6\xb4\xeb\xf3\x39\x7e\xe8\x31\x02\x7d\x12\x80\x5b\x7c\x02\x7f\xea\x4f\x4f\x49\x94\x97\xa8\x81\xea\x97\x7a\xac\x6e\x31\x50\xdd\x0d\xd5\x9a\x23\x0a\x53\x45\x46\xf6\x12\xd8\x0a\xc9\x5b\x41\xe1\x28\xa3\xd0\x0d\xfe\x4e\xfa\x4b\xc5\xa9\x8e\x0c\x08\x0b\x8a\xb9\xc0\x4d\x0a\xcc\x71\xb0\x8a\x31\x8e\x31\xfe\x24\x85\xcb\xc0\xdc\xd6\x5e\x04\x4e\x1a\xc3\x24\x68\x57\x9e\x6e\x56\x95\xa7\xb3\xa0\x59\x28\x34\x41\x67\xe0\x36\x58\x9d\x0e\xfa\x26\x30\x46\x35\xd7\x81\x31\x91\x79\x19\xb4\xe6\x99\x3e\x6e\x2a\xea\x98\x0b\xa4\x39\xbc\x5e\x39\xd3\xae\xd6\x23\x61\xe8\xe0\x42\x0e\xb8\x5a\xc5\x6f\xe4\x12\x39\x53\x12\x39\xeb\xb3\x82\x07\xeb\xa9\x4f\x7a\xfd\xee\xc3\x87\xfa\x81\x19\xa6\x4c\xff\xf2\x15\x57\x1e\x06\x44\x35\xf5\x50\xbf\x93\x5b\x27\x9a\xd1\xbc\x55\x94\x6b\xde\xf4\x43\xcc\xab\xb2\xbe\x5c\x50\xea\x54\x93\xb0\xf4\x39\xe8\xdb\xec\xad\xc5\x15\x69\xc9\x80\x92\xb3\x09\x69\x52\x3a\x16\x6b\xdf\x4a\xb4\xe6\xf7\xf0\xe6\xc7\x37\x0a\x21\x0f\x33\x8b\x98\xf1\x67\x35\x55\x90\x5e\x9f\xad\x80\x64\x8a\x11\x7a\xb1\xbc\x42\x9a\x37\xc3\x9c\x36\x45\x69\xb2\x30\x82\xd8\x49\xaa\xba\xac\xc3\x10\x92\x3e\x2b\xf2\xbd\x6c\x05\x24\xe9\xfb\x78\xe9\xd5\xfe\xdd\x39\xec\x05\x4d\x89\x86\xeb\x5c\xb1\x99\x6f\xf5\xe6\x11\x2d\x2a\x81\x21\x87\xec\x21\x87\x1c\x06\xe4\x98\x28\x8a\xf6\xc7\xa0\xb6\xe5\xfa\x61\x71\x01\x34\x78\xcc\xc8\x5e\x80\xc9\xc2\xd4\x22\xe8\x29\xb3\xbe\x5f\x9d\xf0\xaf\x5f\x49\xbe\x1c\xa1\x5a\x0e\x51\x2e\x47\xb8\x34\xc0\x60\x31\x3d\xce\x61\x08\x61\x9f\x41\xd8\x77\x21\xec\x7b\xf9\xc7\x42\xb5\x2c\x01\x9d\xaf\x1e\x4d\x69\xc1\x7a\xa8\xde\xb9\xe4\x2a\x80\x45\xb0\x46\x73\xb2\xe6\xe5\x53\xa3\xfb\x1b\xcb\x77\x81\xcb\xa7\x04\xfa\x86\x74\xc8\x77\xc1\x64\x8c\xea\x1a\x7f\x3e\x27\x5e\x4e\xc6\xf3\x71\x8c\xd2\xc4\xc6\xd5\x90\x45\x12\x1d\xa6\x60\xc0\xab\xe6\xc3\x3e\x0c\x8b\xb9\x99\x75\xd0\x53\x3c\x0f\x1a\x35\xce\x7b\x9a\x37\x7b\x51\x99\x2e\x86\x1f\xa0\x85\x7e\x7d\x51\xdb\x56\xcf\x85\x34\xa7\x70\x12\x34\x2a\x33\x04\x31\x02\x8f\xa4\xb5\x50\x92\xe7\xfa\xad\xad\xf3\xfd\x95\xea\x8d\xcf\xad\xb8\xd1\xb5\x31\x78\xaa\x65\x90\xe4\x51\xe0\x64\x12\x76\xee\x60\x26\x8f\x02\xf2\xd3\x7d\xa6\xc4\x9e\x89\x4d\x98\x93\xc1\xe7\x80\xd4\xd5\x66\x17\x01\x60\x36\x97\x67\x65\x50\x5e\xbd\x35\xa3\x89\x49\x48\xd6\x95\x85\x42\x4c\xf1\xe5\xde\x8d\x2d\xc0\xdb\xb7\x63\xf0\x36\x30\x3d\xee\x33\x5b\x82\xef\xda\x0f\x86\x73\xa3\xa0\x9e\x53\x78\xdf\x8a\x78\xa7\x64\x27\x00\xeb\xcd\xeb\x13\x4b\x91\x23\xb8\x0c\x34\x5f\xfc\xce\x90\xbd\x8b\x80\xa0\xfa\xcc\xe2\x42\x24\xc2\x82\x1b\x45\x13\x15\x11\x3c\x0b\x56\xdc\x7e\x11\x61\xac\x22\xb7\x03\xe7\x2a\x86\xb7\xc1\x4a\x43\xeb\xed\x00\x66\xe4\xcc\xfc\xb3\xc5\x21\xa6\xd0\x45\x46\xe4\x84\x2f\x15\x4c\x64\x91\xfa\xf8\x74\x85\xbc\xfa\x36\x40\x7e\xf7\x93\x00\x01\xd7\x38\x14\xd2\x44\x97\xcc\xca\xb2\x4c\xaf\xec\x17\xde\xe8\xaa\x5c\xdc\xf1\x14\x7e\xc9\x27\xe1\x98\x27\x99\xec\xf0\x1b\x8f\x73\x9f\xfb\x75\xff\xff\xf7\x5c\xce\x12\x71\xdd\xd1\x8b\xf6\xa2\xe2\x76\x55\x93\x96\xc6\x01\x5c\xe1\x15\xcc\xc7\xbe\x17\xd0\x35\xab\x63\xad\xe9\x1f\x6f\xe1\x5d\xa0\xde\x3f\xa3\xb5\x0b\x6f\xeb\x34\xe6\xb9\x85\x8b\xe0\xe9\x24\x89\x53\xde\x09\x44\x32\xee\xb0\x49\x88\xb7\x28\x7d\x56\xf7\x96\x3e\x60\x51\x90\x88\x31\xf7\x3b\x99\x88\x4c\x1d\x74\xda\xd2\x24\xf6\x47\xf3\x41\x2c\xa3\xab\xc4\xa5\x76\x51\x67\x51\x4b\xad\xb5\xb3\x4c\xfb\x17\xe4\x73\x39\x55\x80\xf2\x5e\x71\xbf\x67\x09\x79\x1b\xea\x2d\xda\xbd\x7f\xcf\xab\x3b\xdc\x36\x1d\x1e\x18\xb0\x6c\xd8\xcc\xbc\x7e\xbc\x66\x3d\x5a\xd2\xbe\x6a\xeb\x59\xb2\x8a\xd1\xa1\x14\x3e\xad\x90\x1a\x3f\xeb\x7b\x45\x3c\x1c\x5f\xff\x16\x03\x78\x4b\x4e\x02\x9d\xff\x1c\xa7\xb0\x1b\x12\x0f\x83\xdf\xe8\x9f\xe3\x80\x68\x01\x80\xc2\x8d\xa4\xb4\x9a\x6d\x94\xc2\xf7\xe5\x81\x55\xaf\xbf\x45\x71\xfd\xfd\x60\x58\x5a\x78\xa1\xe6\x5d\xf3\x51\x79\x4a\x45\x73\xa5\x8d\xea\xcf\x57\x41\x7b\xca\xf5\xef\x41\xb3\x29\x1f\x0a\xcd\x7a\x48\x1f\x03\x67\x16\xc3\x87\xc0\xf9\x14\x13\x6b\xc2\x45\x1a\xa6\xf2\x8d\x82\x8d\xd7\x37\x13\x16\xfb\x2f\xa3\xc8\x82\x8f\x01\x85\xfd\x15\xa7\xf4\xbc\x10\xc3\xdf\xb4\xd6\x3a\x21\x02\x7e\x7a\xd2\xae\x31\x51\xb5\xc0\x30\x67\xe4\x4f\x0d\x48\x68\x7b\xfa\x6d\x29\xbe\x76\x9e\x6b\xb1\xcb\x21\xe3\x98\xb4\x72\x46\xf6\x24\x28\x31\x26\xa6\x54\x1d\x85\x73\xad\xbd\xfc\x12\xdc\xa1\xa4\x8e\xf3\xbb\xce\xd8\x5d\x39\x64\xf7\xca\x2e\x62\xed\x7f\x09\x60\x1a\x40\xdc\xdf\x35\x2a\x52\xb1\xdc\xb4\xd8\xb5\x1b\xa4\xe3\xec\x02\x09\xf9\x31\x12\xf2\x23\x24\xe4\xbb\x90\x38\xa2\x7f\x8e\xb9\x50\xee\x95\x6f\x71\x75\xae\x45\xd1\x3f\xb9\x33\xdb\x62\xb6\x3a\xdb\x22\xcb\x8b\x4d\x4e\x48\x98\x65\x8a\xf2\x2f\xe5\x5f\x94\x0b\xf9\x17\x13\xfc\xe1\x65\xa9\x4c\xc6\x08\x30\x18\x7b\x82\x8f\x6e\x24\xd1\xe9\x73\x31\xad\xc0\xb7\x5a\x8a\x46\x4e\x47\x47\x24\x86\x85\x34\x8d\x9c\x6a\xf3\x74\xe9\x2a\xc1\x64\x73\x0e\xdc\x75\xce\x04\xb1\x82\xc4\xcb\x52\x8b\x42\xb6\xbc\xcc\xed\xd7\xf6\xe7\x44\x6f\x5b\x68\xc7\x7d\x36\xb2\x2c\x5b\xf4\xdd\x10\x70\x17\x5f\x05\x86\x15\x17\x7d\xf7\x8a\x8e\xd4\xbf\xf6\x91\x42\xce\x57\x79\xa4\x9a\x39\x55\xeb\x81\x64\x67\x2a\x89\x74\x29\x70\xb7\xd9\x0f\xb5\x54\x1f\x94\xe6\x01\x8d\x23\x78\xa7\xd6\xad\x32\x88\x6e\x00\x93\x90\x28\xea\xa7\x06\xf1\xd7\x3f\xb8\xd1\x38\x51\xd5\xcf\x10\x3e\x27\x75\x6b\x2d\x53\x89\xed\x2f\x56\xaa\x49\xdd\xe7\x44\x98\x12\x54\xb8\xba\xc6\x73\xc2\x73\x1b\xd9\x2f\xec\xf1\xdc\xc6\xa4\x87\x9e\xa4\x7d\x77\x74\x41\x04\xd4\x4f\x2e\x35\x37\x41\x26\x8b\x9f\x85\x99\x51\x14\x6c\x58\xb4\x34\x65\x38\x24\x12\x4e\x12\xad\xcf\x39\x4a\x40\xf6\x3d\x75\xcc\x84\xab\x1d\xb6\x8a\x75\x61\x66\x5d\x10\xf2\x7a\xee\x6d\x0f\x1d\xc4\x8d\x4d\xc9\x9c\x42\xe2\xae\x42\xab\xf1\x1f\xce\xa0\x64\x3a\x17\xf1\x6c\x47\x28\x0c\xd3\x1b\x6a\x5f\x65\x35\xfb\xd0\x5d\x6d\xa3\x54\xed\x4d\xd6\x7b\x93\xe6\x92\x5c\xf4\x59\xb5\x5f\x0c\x19\xea\x29\x1e\x52\xe2\x0a\x07\xad\x28\xe7\xbd\x24\xb7\x24\xc4\xf9\xc3\x8e\xd6\x22\xba\xcd\xf7\xf5\x83\x3f\x44\x6e\x51\x78\x88\x57\x44\x8d\x1e\x8c\x6a\x7c\xbc\xef\x56\x47\x58\xbc\xea\xbb\x05\x06\xe6\x15\xd1\xd9\xf8\xf2\xe5\xe6\x81\xaa\x28\xe7\x9a\xf2\x2a\x06\xc6\xf4\x2d\x19\x2f\x21\xff\x8c\xfc\x99\xf5\xf1\x42\xac\xcf\xbe\x15\xf0\x8a\xf1\x5b\xdc\xfc\x8b\xb9\xa9\x85\x0b\x9e\xc3\x16\x9a\xb2\xbe\xbe\x18\xf3\x74\xf3\x79\xdd\xcd\xb1\xda\xcf\xc3\x87\xc5\x63\xde\x69\x82\x9d\x86\x4e\x62\xb2\xa3\xba\x28\x65\xeb\xcf\x30\x60\xd5\x8f\xe1\x66\x24\x68\x25\xed\x49\x94\x00\xf1\x21\xcb\x1f\x3c\x18\xf2\x8d\x3f\xe2\xd1\x8c\x04\x2e\x88\xde\x26\x30\x25\x52\xb8\x2e\xc4\x6b\x43\xf3\x1b\x73\xd7\xe7\x4b\x55\xf8\x3b\xce\xcb\xd9\x70\x33\x8b\x42\xbb\x47\x21\x75\xdb\xd9\x12\xd7\xd5\xc6\x16\xc8\x2d\x34\x6d\x79\x61\x7d\xb1\x4c\x0c\x75\xe2\x61\xd9\x77\xb5\x01\x95\x0b\x7c\x4d\x31\xcd\x89\x0b\xbc\x87\x30\xa8\x4f\x89\xdf\xfa\x79\x9d\x1b\x26\x46\x13\x22\x55\x73\xea\x3a\x1f\xa0\xdb\x4e\xdc\x8c\x8c\x9a\x8f\x64\x5f\xe8\x2c\xc8\x8d\xd1\x0c\x4c\xa5\x07\x17\x44\xa7\x01\x7f\xf8\xf0\x82\x70\xc3\x7e\xa0\xd2\x6c\xc5\xa2\x9c\x4b\xc0\xeb\x60\xb3\x2e\x97\x6e\x93\x2c\xad\xb9\x19\x5a\x68\x14\xf5\x20\xbb\xe8\xc9\xc6\xb5\x5b\x93\xef\x92\x1d\x14\x44\xc6\x2e\xcc\xc8\x67\x09\x43\x90\x14\x7e\x98\x5c\xd7\x99\x83\x26\xe4\x20\x1c\x9d\xc9\x2d\x03\x49\x1f\xad\xff\x1a\x50\x3c\xbe\x33\xf2\x2e\x81\x19\x89\x5c\x10\x5a\x7b\xdb\xdc\x5f\x86\xfd\x69\x95\x7c\xec\xec\x0b\xf5\xf5\xcc\x89\x1f\x65\x8a\xe4\x3c\x92\x60\x92\xf6\xe4\xc9\x98\xbb\x2e\xe1\x86\x61\x94\x12\xbe\x48\xd2\x7f\xac\x75\xc4\x80\xc9\x03\x6f\x25\xb2\xa8\xa5\xfd\xc4\x68\xf8\x68\xe3\x77\x92\xad\xc9\x35\x12\xf7\x04\x7d\x14\x53\x7b\x30\xa7\x70\xb1\x0a\x59\xe5\xc8\x21\x97\xc2\x1f\xf0\x3e\xeb\xbb\xbf\x7e\xd5\x11\x42\x7e\x8c\x19\x2c\x5e\x60\xa3\x51\x91\x7a\xce\xdd\x31\x32\xc5\x82\xc6\x8e\x07\xc2\xe1\x20\x9d\x23\x22\x15\xd6\x67\x8a\xc4\xf3\x28\xe5\x1a\x2b\xed\xa3\xc8\x58\x74\x6e\xae\x3e\x41\x2a\x66\x75\x4e\x61\xe2\x36\x29\x16\xb7\x49\x4c\xfb\x57\x49\x18\x23\x79\x80\x59\x1b\x4c\x58\xb9\xd6\x5b\xfd\x15\x8a\xba\xeb\xdd\x1f\xc5\xf6\xc4\x25\xb7\xe4\xc2\xd5\xbb\xa0\xf7\xd6\x60\xd0\xdb\x16\x0c\xba\xd8\x19\x2d\x54\xd4\x0d\x30\x95\x4f\x67\xad\xbf\xfe\xf8\xf7\x2e\x27\x33\x32\x33\x34\xea\x77\x32\xec\xa1\x8a\xe1\xe6\x9e\x63\x1e\xe4\x43\x1e\xda\xb7\xe4\x56\x63\xfa\x19\xb9\x74\x0b\x1f\xe5\x6b\xd7\x39\x85\x97\x6e\xe3\xdd\x51\x1e\x27\xac\x60\x7b\x9e\x96\x01\xbe\x78\x95\x92\xf2\x0a\x25\xd5\x1c\xc0\x85\x2d\x15\x07\x70\x5f\xaa\x5a\x8b\x01\x72\x4b\x3c\x3d\xd4\x13\xc2\xe1\x27\xfb\xaa\x3a\x53\xcb\xcf\xfb\xec\x2b\x78\xd2\x3e\x22\xbc\xef\x21\x38\xa0\x6d\x98\x92\x7e\xdc\x6b\x7b\x67\xbe\xc8\x26\x61\xd9\x8b\x55\x5d\x66\x45\x8f\x39\xa7\x94\x51\x35\x19\x59\xeb\xaf\x92\x1e\x0e\x9b\xbb\x91\x9e\x5e\xc9\x02\x6d\xd6\x6a\xb0\x75\x55\x43\x7b\xdb\x56\xeb\x3d\xae\xf7\xb4\x55\xef\x69\xa3\xde\xd3\x7e\xbd\x46\x61\x7d\xce\x9c\x7c\x32\x6a\xaa\x0c\xd3\x74\x9e\x10\x06\x3f\xdd\x6b\x9b\x61\x58\x90\xaf\x74\xb4\x63\xcf\xc8\x94\x63\xbc\x35\xfd\x82\xf5\xdd\x6b\x1b\x11\xf7\x70\xa0\x33\x03\x5d\xbb\x70\xe3\xea\xea\x50\xe2\xf9\x46\x51\xe1\x15\x2a\x24\xd5\x0a\xcd\x21\xc8\x28\xb8\xea\x33\xde\xae\xbe\x73\xaa\xce\x76\x29\x22\x1c\x47\x86\x2f\xd7\x2c\x1d\xbb\xc6\x20\xe2\x75\x1b\x20\xa3\xc3\x1f\x78\xc7\xfa\x3a\xe2\xca\x5d\xe5\xa9\xe6\xbd\xb4\x05\x78\x7b\xaa\xfe\x7b\x5b\x82\x37\xb4\xb9\x6a\xb4\xe5\x3a\x5f\x05\xec\xb9\xce\xa3\xfe\xff\x3c\x82\x43\xd7\xf9\x2e\xc8\xf0\xd1\x80\xc2\xf9\x3d\xe1\xbd\x12\x19\x6f\x03\x2d\xf8\x58\xbf\x3b\xe2\xb6\xde\xb9\x1f\x36\x22\xd8\x83\x0c\x24\x06\x30\xa8\x6e\x51\x1d\x60\xb4\x7f\x35\xae\xbb\x27\x41\x4d\xcd\x42\x51\xd7\x3b\xb6\x2d\x6b\x0e\xf9\x6e\xf0\xbe\xfb\x43\xad\x63\xe1\x72\xad\xb0\x6e\x7e\x80\x8e\xcd\x01\xca\x85\xa4\xe2\x00\xdd\x1b\xb4\xca\x63\xc1\x5e\x2c\x8d\x2f\xbb\x7b\x78\x39\xb0\xb4\xe7\xde\x76\xc7\x73\x05\x4a\x19\xa5\x66\x2e\x94\x36\x42\x6e\x3e\x44\xf4\x2a\x5f\x71\x8c\xdc\xdd\xe5\xe3\xd1\x34\x7e\xe3\xaa\x6e\xba\x6b\xc6\x49\xee\x8d\x8d\x48\x31\x33\xa2\x42\x46\xe7\x18\x42\xa9\x05\x52\xe7\x14\x4e\x56\xcb\xea\x79\xf2\xcb\xdc\x84\xe9\xb3\xdb\xa2\xc7\x2d\x02\x27\x3d\x47\x91\x9c\xed\xa1\x4c\xce\xce\x21\x54\x7c\xcd\x2d\x04\xea\xd7\x2e\xb8\x4e\xb6\x66\xfd\xd7\xa3\x5c\x67\x96\x3a\xc2\x25\x6e\x91\xcd\x6e\x59\x9f\x88\x93\x57\xc8\x76\xd7\x88\x5b\xc5\x22\x0d\x70\xa3\x23\xbc\x0e\xf2\x1b\x92\x64\x46\xb5\xa8\x4e\xb1\x13\xa1\xf9\x38\x22\x0f\x6f\xd7\x3e\x35\x19\x3d\x31\x36\x23\x32\x24\xad\x1b\x7e\x39\x87\x40\x52\x93\xf8\x13\x6d\x33\xdb\xb2\xdb\x94\x98\x78\x27\x81\x0b\xad\x3b\xd5\xb0\x96\x69\x3f\x63\x6b\x8e\x8c\x5a\xa0\xa0\xec\x6b\x70\xc7\x47\x75\xa9\x84\x6e\x40\x5a\x11\x95\xe2\x31\x10\x55\x21\x9b\x54\xf1\x4a\x3a\x24\x0c\xe9\xb6\xa8\x13\x9e\x43\xe2\x26\xc0\x96\x63\x5e\x7a\xf8\x76\x09\x52\xbe\x70\x12\xa9\x59\x60\x30\x2b\xfa\x62\x69\x5b\x7c\x35\x82\x3d\x3b\x02\xf6\xdc\xf6\xfb\xee\x22\x86\x50\x0b\x33\x75\x66\xe4\x4d\x00\x12\x18\x85\xae\x33\x23\xb1\x0b\x12\x92\xa5\x9e\x16\x83\x40\xc8\x3e\xbb\x18\x05\xb6\x87\xb1\x20\xf0\x87\x97\xd8\x1e\xb0\x73\xbb\xab\x3e\x34\x55\x90\x8b\xb4\x19\xcb\x66\xe4\xdc\x6c\xe0\x1b\x0e\x71\xa6\x4e\xc7\x6e\x80\xb2\x21\xb5\xcb\xb2\xfd\xbc\xec\x87\x29\x83\xb2\x8c\x67\x58\x76\x10\x10\x5e\xa5\xd6\x95\xc3\x2c\xf4\x91\x13\xea\xc8\xa5\xe4\x84\x48\xf8\x89\x29\x62\xd1\xea\xb6\x8c\x53\xf6\x74\x65\x8b\xe3\x86\x16\xc3\x41\x0d\xd6\xdd\x93\x3a\xac\x6f\xe2\x42\x8e\x9d\x19\xc9\xd0\x5c\x09\x12\xb5\x94\x63\x84\xfc\x71\xdf\x85\x4b\x67\xdc\xf7\xe0\xc2\x29\xe7\x23\xf4\x7c\x2e\x29\x8c\x9d\x94\xcc\xc8\x89\x0b\xdd\xbe\x7b\x85\xb3\xbe\x70\xfc\x51\x85\xb1\xc1\xf1\x8d\x29\x5c\x7c\xa3\xd4\xbe\xa8\x6f\x8b\x5a\x70\x8c\x88\x41\xe1\xa2\x40\xc2\x17\xce\x0d\x79\xe9\x82\x8b\x66\x89\x4c\x6d\xec\xa5\x73\x51\xcd\xba\xad\xdb\x3e\xb7\xa7\xce\x05\xce\xa4\x1c\x98\xcc\x07\x56\xe0\xba\xa9\x73\x43\xce\xcb\xce\x42\xd5\xd9\x74\xa9\x33\xf7\xd6\x9e\xd6\xba\xe2\xb5\xae\x9e\xd5\x71\xc6\xeb\xfa\x3a\x3e\xc7\x75\x9c\x54\x15\xa0\x25\xf8\xa1\xe9\xd7\x04\x44\x9f\x6d\xd3\xd1\x8c\xec\x17\x3f\xec\x19\xf9\x54\xfc\x00\x2f\xd1\x08\xf8\x43\x40\x1e\x0c\x2b\xa8\xf5\x12\x31\xd1\xc4\xd9\x70\x1c\x87\x4c\x9c\x43\x12\xa8\x91\x29\x6a\xfa\xf0\xe1\xa4\xef\x8e\x10\xdf\x7c\x56\x52\xc8\xbe\x20\x13\xa4\xa6\x94\xda\xd3\x64\x79\x20\x13\xf5\x8d\xcb\x02\xc2\x3f\x04\xe4\x52\x4d\x9b\x25\x90\x65\xb0\xc5\xc9\xa5\xd1\xd2\xcc\x29\x1c\xb9\xce\x2c\x86\x1d\xb7\xaa\x6a\xc6\x90\x80\xdb\xec\xf6\x30\x38\xe3\xfc\xda\x82\x23\x97\xc2\xfb\x36\xc9\x33\x06\xde\x9e\x60\x38\x8f\x0f\xb8\x68\x6b\x92\x47\x07\x6c\xb5\x38\x19\x98\x04\xc2\x7c\x54\x54\xcd\x7b\xaa\x4d\xd6\x9b\xda\xbc\x98\xa7\x9e\xa1\x54\x33\xdc\xe5\x44\x7b\x00\x30\x55\x4e\x29\xec\xb8\xa4\x30\x02\x7b\xb7\x92\x4d\x32\xb9\xa5\xaa\x8e\x2f\x18\xec\xc2\x35\xa6\x1a\x5f\xc5\x4a\xb3\xec\x40\x9b\xe6\xcf\x29\x6c\xbb\xce\x8c\x7c\xd5\x5a\x38\xa3\x29\xde\xdb\xb6\xbe\x51\xf4\xbf\x79\xeb\x3a\x37\x31\x9c\xb6\xd2\xce\x0f\x92\xc4\x8f\x50\xc2\xfe\xe1\xde\xdf\x41\x4f\xac\xc5\x75\x97\x10\x7d\xc3\xcf\xfb\xee\x7b\x74\xd2\x2f\x6b\x72\xed\x11\x05\xc2\x11\xb9\xef\xdd\x6e\xeb\x26\x57\x09\xd3\x0f\x8d\x41\x66\xe4\xd4\x85\x2f\x28\xd3\x3d\xe1\x9b\x26\x74\xc7\x41\xeb\x84\x62\x46\x66\x64\xd7\x88\x54\x7d\xf6\x69\x4e\xe1\x93\xdb\x7e\x7d\x71\x11\xc2\xfa\xa6\xfe\x46\xd1\x0c\x9e\x0c\x70\xff\xbe\xae\x6e\x37\xe4\x1b\x7a\x5c\x78\xbb\xb3\xba\xee\x93\x41\xd9\xbd\xaa\xfe\xaa\x5d\xad\xbd\x38\xfe\xe4\xff\x96\xbc\xf2\x14\x3e\xba\xed\xb7\xcd\x83\x3f\xe2\xd1\x2d\xf9\xe8\x42\xfc\xdf\xff\x3d\x84\x23\x22\xd4\x36\x0c\x1f\xc6\xa3\x23\x22\x41\x50\x5b\x52\x5b\xc9\x32\x1f\x56\x28\x8c\x3e\x6a\x89\x12\x75\xcc\xb0\xbf\xe2\x5b\x47\x64\x46\x3e\xb8\x10\xf7\xba\x9c\x48\x0a\x2f\x71\x0b\x35\x58\xbd\x59\xd1\xff\xbe\xea\xdf\x1a\x60\xba\x0b\xbd\x89\x5f\xee\xb1\xe7\x05\xec\x0e\xf9\x86\x66\xca\xd2\xfb\x41\xae\xab\x50\xf7\x77\x71\x4f\x47\x74\x91\x3a\x33\xf2\x5d\xc0\x00\x53\x2d\xa5\x8e\x60\xc0\xd5\xab\xb3\x00\x8e\x5c\x90\x29\x85\x2c\x5d\x81\x4e\x24\x10\x85\x61\x5b\x6f\xcd\x26\x59\x7a\x49\xf4\x45\xa7\x98\x53\x0a\x7f\x7e\xcb\x7d\x73\xd5\xae\xb0\xf4\x9e\x11\x4e\xbc\xb4\x7d\x57\xb4\x0d\xc3\x87\xc3\x63\x63\xc4\xa0\x4d\x96\x21\x59\xee\x3b\xae\xdf\x3e\x17\x59\x43\x74\xa6\x90\x7d\x34\x12\x3d\x24\x96\xb9\x3f\x49\x2d\x98\x91\x2c\x85\x37\x19\x10\xe1\x08\xda\x77\xaf\xf4\x65\x56\xe9\x05\xc4\x53\x22\x30\xe7\xa5\x7a\x9d\x7b\x0f\xe1\x4b\x76\xa6\x5f\xea\xec\x1e\x18\x42\xe7\xc8\x55\xef\x4f\xcd\xfb\x64\x3c\xe6\xb1\xcc\xdf\x1e\x9b\xdb\x38\xdf\xd2\x29\xb9\xdf\xba\x3a\x4d\xf7\x11\x5e\x3c\x79\x26\xa3\x8e\x70\x66\x84\xa5\x60\xb1\xc9\x24\x0a\x75\x9c\xa8\x47\x57\x69\x82\xd9\x1a\x5f\xd7\xd4\x7f\xa7\x01\xb9\x25\x5e\x8a\x2b\xb2\xad\x55\xb4\x61\xeb\x7a\xb3\x63\xc5\x48\x1c\x03\x3b\xb5\x31\xa3\xfb\xd4\x16\x90\xda\x71\x3f\x05\x76\x66\x63\x56\x77\xd7\xb3\x31\xb5\x8a\x7b\x68\x63\xb2\x77\x14\x6b\xe7\x14\x82\x56\xf0\x48\x42\x82\x10\xbc\xc6\xb5\x07\x1b\x05\x37\x75\x1a\x25\x01\x29\xc2\x31\xa1\x73\x48\x53\xe3\x94\x33\xb3\xe0\xc9\x60\xf3\x19\x7f\x8c\x8b\xe2\x5b\x80\x41\x0b\xf0\xc7\xa5\x05\x1b\x4f\xcc\xf3\xd8\xd2\x98\x5d\x6d\x89\x85\x07\xe5\x1b\x85\x28\x5d\x79\xb8\x9e\x2e\xf9\xaa\xe1\xcb\x8a\xdb\xd9\xaf\x5f\x4f\xe7\x6a\xd8\x4f\x7b\xf7\xb1\x4e\xfd\x4f\xa4\xad\x33\xcb\xe7\xa7\x2d\xd4\x2f\x63\x24\xa6\xa3\xa1\x3d\x28\xe2\xfd\xde\x65\x43\xbb\x31\xac\x0d\xf3\xf1\xf3\x35\x59\x1b\xe9\xf3\x41\xfe\xaa\x18\xec\x70\xbd\x78\x57\x0c\x78\xf8\x78\x98\xbf\x2b\x68\xc3\xf0\x59\xf1\xae\xa0\x0f\xeb\xc3\xf5\xfc\x5d\x41\x23\xd6\x37\x37\xf2\x77\x05\x9d\x58\x7f\x5a\xbc\xab\x04\x43\x18\x6c\xae\xc9\xfa\xca\x6c\x6c\x6c\xae\xa1\x35\xc3\x34\xbd\x9b\x4b\xe9\xb6\xd6\x61\x6a\xf5\xd6\x66\xc4\xc7\xb3\x42\xd7\x14\xfe\x1c\xa7\x6d\x64\x76\x69\x69\x37\xea\x10\xa0\x77\x63\xfd\xb9\xbd\xfe\xac\x0e\x0b\x75\x9a\xbb\x31\xa8\x13\xdd\x61\x3d\x7c\xc5\x20\x5f\xda\x7a\x34\x8a\x7a\xe4\x89\x41\x3d\xca\xc4\x70\x79\x11\x97\x56\x10\xe9\xeb\x65\x7a\xe7\x85\xc0\x8c\x8c\xf5\xea\x40\xe6\x78\xcc\x98\x4b\x0e\xd7\xff\x70\xb2\x5f\xbf\x8e\xd1\xbb\xab\xbc\xe1\xfc\xc9\x3e\xd9\x12\x58\x62\x0b\x70\x87\x76\x3c\x37\x4c\x5c\xc2\x48\xb6\x36\xa4\x20\x1d\xd9\x43\x86\xf5\x22\x6d\xbf\x5a\x9a\x91\xa9\xf9\x1e\x9e\x4f\x61\xee\x7d\x26\xa9\xb1\x00\x62\xcd\x2a\x05\x96\x28\xf1\xc9\x54\x9a\xa4\x0d\x95\x88\x87\xbb\xbd\x4e\x1f\x6d\xfc\x52\x12\xdf\x6d\xcb\x99\x02\xee\x88\x17\x6d\x67\x5e\x3a\xa1\x5a\x03\x60\x8c\x48\xba\x36\x5c\xf2\x09\xe8\xa6\xa0\x2b\x10\xe9\xcc\x52\xf5\x90\x30\xb2\xf1\xbb\xec\xad\xd3\x86\x28\xd0\x45\xed\x89\xaa\x5a\x08\x8d\x8b\x80\xc1\x7b\x33\x12\xa5\x88\xe1\x6b\x10\x62\x4a\x86\x45\xc9\x93\x5a\xc9\x7a\x51\xf2\xb4\x56\xb2\x51\x94\x3c\xab\x95\x6c\x16\x25\xcf\x6b\x25\x8f\x8b\x92\x12\xb0\x4c\xd1\x13\x55\x54\x83\x30\x24\xdf\x37\x2b\xc8\xf7\x31\x9a\x50\xfd\x31\x18\xc5\xf6\xe0\x8f\x63\x64\xda\x46\x02\x79\xb6\xeb\x15\xad\xea\x67\xf6\x96\xdc\xa8\x25\x28\xc1\x54\xc7\xde\x58\xc1\xba\x5c\xa7\x80\x09\x27\x0d\x90\xbd\x72\xf3\xa7\x83\x92\xd9\x3e\x4e\x57\x39\xe3\x28\x32\xf4\x7b\xbc\xf6\x84\x6f\xfe\x2e\x30\x58\x93\x5c\x53\xa2\xde\xeb\xd6\x8f\xde\x90\x63\xf5\xd1\x4f\xc5\xa7\xbe\x17\x4f\x5f\x8a\xa7\xaf\xe5\xe7\xaf\xda\xf0\x3d\x92\x55\x43\xaf\x88\xab\xb8\xbf\x97\x7a\x22\x6a\x49\x5e\xeb\xc7\x9e\xa4\x8f\x9e\xf0\x4d\x04\xf2\xad\xb4\xf5\x6e\xd8\x55\x90\xb7\x26\x21\x73\x66\xe4\x4a\xb5\x4c\x42\xc2\x91\xd3\x50\x0f\x3d\x35\xb9\x8c\xa2\x29\xc2\x55\x5a\x98\xe6\x5e\x90\x0c\xca\xfb\x3a\xf1\x82\x97\x95\x65\xc5\x7b\x53\x82\x69\xc5\x29\x1d\x71\x74\x7a\xd8\x5b\xb1\xa7\xb7\x64\x2b\xc5\x7b\xad\xdb\x14\xed\x37\x5f\xa6\x58\x4c\x61\x80\x36\xd1\x4d\x2d\xeb\x89\x99\x8a\x43\x91\x13\x90\x4d\xbb\xde\xb9\xe9\x15\xca\xcd\xc0\x9f\xb8\x19\xe6\xe9\x8b\x79\x1a\x14\xfa\xab\x8d\xbf\xde\xcd\xa0\xd2\x7c\xfd\xbe\xcd\x07\x0b\xcd\x86\x65\xb3\xbd\x14\x86\x43\x1d\x88\x60\x11\x7d\x60\xd1\x7a\xa5\x64\xb0\x50\x32\xa8\x94\xd4\xba\xab\x94\x6c\x2c\x94\x6c\x54\x4a\x36\x17\x4a\x36\x2b\x25\x8f\x17\x4a\x1e\x57\x4a\x9e\x2c\x94\x3c\xa9\x94\x3c\x5d\x28\x79\x5a\x29\x79\xb6\x50\xf2\xac\x52\xf2\x7c\xa1\xe4\xb9\x2e\xa9\x21\x19\x3d\x21\xe3\xad\x38\xa7\x70\xde\x0e\xef\xc8\x54\xa7\xac\xe4\xf7\x7e\xb2\xd0\x16\xc0\xae\xec\xbc\x44\x50\xf0\xa6\x9a\x80\xc5\xc0\xbe\xa3\xb6\xf6\x56\x3d\xfe\xd0\x7a\x96\x13\x14\x92\x3e\xa7\x2b\x63\x2e\x1e\xdb\x67\x68\x4b\xc0\x4e\xd5\x43\x4c\x61\x6a\xdf\x92\xf3\x14\xd0\x67\x17\xd0\xc2\x29\x48\xe1\xe9\x3a\x7f\x8c\x10\xce\x29\xa4\xaa\x22\xb2\xfe\x96\x05\x9c\x11\x8c\xf6\x44\xa9\xe2\xbf\xcf\x38\x39\x49\x17\x5b\x50\xc5\x8f\x5f\x4b\xc5\x8d\xeb\x52\xd5\x0b\xde\x9b\xcc\x29\x1c\xb5\x62\x20\x44\x1c\xbd\x2f\x19\x41\xfd\xcf\x4e\x7a\x87\x43\x8e\x31\x20\xad\xc4\x78\x79\x9f\xb6\x2b\x60\x3c\x9d\x7b\xe2\x14\xdd\x55\x94\x2c\xe1\xa0\xa0\xe0\x39\x28\x3f\x24\xce\x51\x46\x22\x46\x64\x4a\x98\xb6\x77\x28\x7e\x7b\xa5\x4e\xdf\xac\x5c\x96\xaf\x1c\x2f\x56\xce\xc4\x5f\xad\x2f\xd3\x8c\x1c\xa5\xda\x6f\xa2\xba\x56\x9e\x5e\x20\xbd\xda\xc5\x1a\xb1\xca\x1a\xdd\xc4\x0e\x6b\x0d\x50\x5b\xdd\xc7\x4c\x8d\x84\xab\xae\xa5\xea\x92\x81\x7b\xa5\xe4\x52\x8c\x25\xf0\xd1\xf6\xc0\xfd\x64\x63\xc4\xe3\xab\x55\xf0\xe0\x06\x36\x07\xf7\x42\x75\x31\x56\xad\x3f\x6b\xb1\xf6\xa3\x70\x30\x00\x44\x98\xbe\xfe\x9e\xb1\x08\x63\x3f\x24\x09\xc2\xc7\x83\x81\x89\xe2\x10\xa6\x47\xfc\x82\xdf\x58\x90\x98\x20\x0e\x53\x16\x65\xbc\x8c\x35\x51\x8d\xb7\x7a\xa5\x2f\x70\x77\x84\x53\x0d\xd3\x7a\x57\x10\xda\x3c\x2f\xdd\x5f\x0a\x40\x9b\x27\xb9\xfb\xfb\xd1\x67\x5b\xe2\xc3\x7e\x30\xe6\xd8\xef\x52\xa7\x3d\x06\x07\x06\xed\xd8\x31\x41\x3b\x94\xcc\x9c\xaf\x46\x21\x55\x17\x2f\x4a\xf1\x3b\x7f\x55\x0b\xf2\xb1\x14\xd2\xa3\x94\xfe\xcf\x12\xf2\x51\xe0\x48\x6f\xca\xa0\x1b\x70\xd6\x48\xc3\x5a\x74\x0b\x16\x88\x6f\xd5\xac\x97\xe7\x09\x48\x28\xad\xdb\xdf\xa5\x5a\xe3\xbb\x9d\x3a\x44\x24\x4e\x12\xd6\x83\x45\xc7\xe4\x3a\x26\x22\x21\xdb\x4c\xf2\x7e\x9c\xcc\x88\xbe\x8c\xa5\xf0\x36\xad\x6a\xc4\x4d\xa4\xf5\x2d\x35\xdb\x44\x68\x8d\xf8\x69\x7a\x57\xfc\xc9\xdc\x91\xc7\xc4\xb5\x19\x99\x10\x43\x23\x13\x22\x69\x64\x5c\xad\x46\x13\x49\xa6\xc4\x78\xc6\xf4\x19\x70\x34\xd7\x63\x98\xd9\x05\x3d\x52\x98\xf1\x4c\xc9\xcc\x5f\x6e\xfe\x4a\xf3\xd7\x20\x8f\xef\x42\x89\xf7\x37\xe4\x4a\x9d\x5d\xcb\x82\x07\x43\x03\xe9\x4a\x6c\xff\x91\x3a\xea\xb8\x59\x96\x3a\x6f\xea\xdf\x33\x5b\x27\x4b\x37\x88\xee\xca\xfe\x2e\xd4\xb1\xc3\x97\x73\xd8\x6d\x52\xef\x1c\x2a\x84\x12\x53\x88\x18\x32\xd2\x18\x7c\x06\x5d\xc5\x74\x16\x82\xf2\xe6\xf1\x05\x5e\x65\x96\x57\x24\x8a\xf3\xfc\x82\x77\x99\x98\x83\x05\x15\x7b\x0a\x01\x5a\x5b\x2c\xfe\x4d\x76\x5c\xde\x09\x63\xcc\x3d\x34\x61\xa9\xb4\x34\x42\x44\x6d\x72\x33\x3a\x64\x8e\x50\xe8\x10\xad\xde\x8e\xb5\x49\xf7\x29\x9a\x74\xbb\x87\x68\xdb\xc9\xce\xf2\x0f\x1f\x65\x24\x20\xa7\x29\xac\xf4\x1c\x3a\x21\x3f\x52\x50\x8b\x23\xd4\xda\x60\xbc\xec\x6c\x11\x17\x71\xbc\xb9\x1e\xe3\xe4\xa7\x8c\xc8\xbe\xfb\x19\x1f\x38\x3e\x44\x8c\x64\xf8\x30\x23\xbb\x29\xa8\x47\x10\xea\x37\x42\xde\xa7\x56\x22\x70\x8c\xbf\x90\x33\x57\xac\xdb\xd7\x76\xa2\x2a\x5b\x25\xa5\x5b\xf2\x55\xb1\x1a\xc3\xf5\xdf\x05\xf0\x9a\xfd\x42\xc0\x14\x7a\x67\xce\x70\xfd\x77\x35\xbe\x61\x6f\x48\xd7\x94\x78\x96\xf5\x59\x42\xd5\x0f\x01\x9e\x92\x1a\xb5\xce\x7c\x1d\x18\x55\xe2\x23\x73\x50\x36\x64\x30\x5c\x57\xa2\x57\x29\x0f\x30\x23\x0f\x28\x62\xb0\x36\x23\x9f\xd4\x4c\xd9\x27\x2d\x12\x20\x81\xa8\x71\x4e\x7c\xed\xe9\xef\xa2\x8e\xb1\xf8\x1a\x22\xe6\xef\xcd\xd4\x7d\x29\x00\xdc\x12\xdf\xa9\x75\x5e\x9c\xae\x15\x42\xd2\xa6\x1d\x3b\xc3\xc7\x20\x1c\x8c\x15\x6b\x02\xb6\x34\x44\xf5\xd8\xc8\xeb\xa1\x3c\xd1\x5e\x6f\x3d\xaf\x87\xe2\xc7\x8a\x8a\xc3\x05\xa6\x53\x02\x6e\xc5\x46\xce\x7d\xaa\xf9\x28\x80\x78\x6d\x1e\x97\xd8\xca\xb2\xcd\x70\x75\x9b\x81\x1d\x3b\xeb\x6a\x7e\xeb\xab\x46\x63\x2a\xad\x5a\x02\x5c\x81\x21\x08\xe7\x69\x73\xa5\x32\xd2\xc9\x8a\x5a\xe8\xdc\xb2\x42\x02\xfb\x9e\x2a\x50\x8a\x75\xa6\x7c\x45\x82\xef\xa8\xbb\x51\xa9\xfb\xa1\xfd\x04\xdc\x92\xc3\x34\x8f\x14\x56\xc8\x3f\x1c\x24\x1d\x49\x1b\x3b\x8a\x71\x19\xd1\x41\x71\xbf\x01\x75\xf5\x1c\x2d\x04\xa4\x95\x70\x7b\xcb\x83\x7d\xd3\xd2\xf2\x7b\x63\xcb\xea\xd0\xbf\xd4\x5a\x66\xb9\x45\x47\x83\xe2\x27\x0f\x23\xcb\x17\x2c\x31\x76\x5b\xbc\x4e\x74\xa6\x80\x79\xc5\x23\x38\x1e\x1d\xaa\x33\xfc\x03\xb8\xfa\x73\x45\xa9\x7d\x48\xf0\xf1\x07\x05\x7c\x31\xd7\x02\xe5\x38\x00\x8d\xe1\xf1\xc0\xbf\x68\xf7\xce\x38\x21\x19\xfc\x64\x89\x62\xe7\x2e\x99\xf6\x2d\xac\x49\x34\x95\x1a\xb8\x13\xeb\x6a\xe2\x7a\x15\x86\xd0\x1b\xaa\x5f\xe5\x7b\xa9\x49\x68\x0d\xe2\x75\x17\xdf\x4b\xeb\x85\x9a\x00\x94\x97\x5f\xcb\x39\xad\xfa\xf4\xf1\x26\xf3\xe2\x8a\x97\xee\x05\x46\xe6\x2e\x8c\xa0\x5f\x94\x88\x82\x08\x25\xc8\x63\xb9\x5a\x90\x2f\x19\x51\x2f\xf1\x01\x07\x3b\xc4\x71\xc7\x8e\x62\xed\x91\x65\xef\x21\xe3\x2f\xd6\xb4\x77\x25\x6b\xbd\x0e\xea\x77\x47\x42\x7b\x51\x70\xbc\x08\xd5\xda\x04\xcf\x59\x4a\x68\x94\xf5\xd9\xf7\x5a\x88\x36\xdc\xba\x5b\xb5\x25\x61\x25\xcd\x35\x28\xda\x5a\x29\x28\x7c\xb0\x58\xee\x4c\x5f\xa4\xfa\x29\xfa\xd1\xc9\xbd\xaf\x65\x3e\xf9\x05\x82\x9b\x17\x23\x87\x4e\xe7\xc6\x9d\xbd\xb5\x07\x9d\x62\xbb\x62\xc5\x7e\x1e\x82\x5e\x3b\xa1\x17\xe7\x02\xe1\x82\x8e\xb0\x91\x96\xb0\x04\x02\xde\xc2\x67\x08\x85\x04\xc3\x66\x79\x8e\x57\x5e\x18\xeb\x9d\x0d\x6d\x4f\x4b\x81\x4a\x02\x40\x30\x55\x62\x5f\xa2\xc4\x3e\xf5\x2e\x01\x04\xe0\x79\x21\xfd\x6a\xd7\x71\xb5\x36\x01\xba\x07\xb9\xe8\xa9\xdf\x2e\x51\xfd\x31\x18\x0d\x6c\x24\x65\xc6\x30\x3d\x71\x6e\x89\x80\x10\x82\xb6\xdb\xb3\x78\x34\x23\x6f\x52\x98\x91\x14\x9e\x0c\x00\x23\x67\xdb\x33\xb2\xaf\xdf\xac\x6f\xea\x37\xf3\x92\xb9\xd4\xf3\xb8\xb2\x93\xbe\xab\x46\x9d\x60\x34\xfb\x22\xdd\x13\x0e\x2f\xd0\xdf\xac\x0c\x7b\xc9\x05\x26\x3f\xfd\x72\xc1\xf7\xaf\xbe\xd8\x8a\x1d\x46\xbe\xa0\x88\x6f\x31\x23\xf1\xe8\x63\x6a\xbf\x4a\xc1\xd5\x00\xb7\x34\xae\x40\x8f\x2b\x28\xa2\x69\x45\xed\xba\xa0\x3a\x67\x3a\xc1\x0f\xe4\x1c\x28\x6d\xe2\x2e\xaf\x4c\xac\x42\x11\x39\xbb\x02\x64\xe4\x1c\x08\xe0\x51\xeb\x85\xd7\x91\x2d\xfa\xec\x08\x2d\xe3\xfb\x2e\x05\xcf\x16\x18\xfd\x4b\xe8\xc0\x61\xe0\xbe\xb6\x85\x8e\xfe\x25\xfa\x4c\x8d\x36\x8b\x9c\x1f\x02\x58\xe4\x9c\x0a\xf0\xa2\xa6\x43\x7f\x4b\x58\x04\xc3\xc1\x10\x37\xf8\xd7\x2f\xfc\xf9\xe4\xb9\xd6\xcb\xe5\x52\x32\x46\xb0\xc2\x92\xcd\x0d\x8c\xf0\x6a\xea\x6d\x3e\xc6\x5f\x23\xb9\x36\xb4\x25\x5e\xf3\x65\x91\x8e\x00\x5b\xd1\xa6\x09\x3a\xea\x89\x72\xcd\x63\x05\x44\x51\xbb\x86\x53\x75\xfb\x04\x3f\x3f\x9a\x11\x2f\x02\xec\x13\x9d\x51\x0c\x28\x79\x51\x0e\x8b\x61\xb4\xd2\xe6\x91\xa3\xf3\x06\xd7\xb6\x1a\x4d\x91\xe4\xd1\x1d\xed\x43\x02\x26\x28\xec\x0b\x51\x45\x75\x6a\xe8\xcc\x44\x9a\xd2\xc9\x18\x24\xfd\xa3\x68\x10\x57\xa3\x4d\x69\xff\x06\x1e\x15\xd1\x47\x83\x68\x75\x94\xc7\x6a\x82\x0a\x53\x88\x17\x8d\x51\xab\xe8\xa3\xa3\xe6\x2b\xf1\x25\x71\x66\x24\x89\xc0\xd3\x19\x2a\xc3\x80\x24\xa5\xf5\x49\x3e\xab\x1b\x12\x44\xc0\x10\x10\xfa\xec\xa8\x47\x92\x35\x86\x31\xdf\x81\xf5\x3d\x9a\xa7\x9c\xe8\xbb\x90\x34\xc7\xfa\x42\xdc\xab\x6b\x79\x95\x3a\x53\x12\xaa\xa5\xd7\x01\x44\xd4\xa8\x5e\x14\x2e\x55\xcd\xeb\xca\xd4\x32\xbd\xc8\xcc\x46\xb0\x90\x60\x74\x2f\xfc\xb0\x96\xc2\x5e\x54\x82\xad\x70\x8c\x6a\x50\x6f\x5d\x2c\x72\x86\xe2\x99\x5e\xe6\x04\xb4\x13\x78\xab\xb9\x40\x87\x3b\x3f\xd9\x3b\x3c\x65\x14\xd8\x17\x3b\x06\x36\x54\xd2\x4f\x26\x29\xb8\xcc\xbc\x77\xb9\x7a\x25\x25\x05\x6f\xdb\x16\xe0\xc6\xfa\xfd\xbc\x9e\xaf\x41\x83\xe4\xb3\x22\x52\x5a\xc5\xaa\x14\xcf\x85\x28\xf9\x79\x7d\x96\xd6\x07\x3a\xaf\x1c\x1d\xe1\x8a\xf1\xbe\xb7\x0d\xbc\xef\x32\x0c\x9a\x24\x23\x6d\x4e\x47\x21\x56\x93\x53\xf5\x51\x45\xba\x5c\x3f\x46\x4e\x56\x44\xf0\xac\xde\xe2\xf9\xb3\xa6\x06\xec\x5d\xde\x60\xbd\xd2\x20\x20\x6e\x59\xe3\x8b\xea\x97\xab\xa7\x21\x1c\x12\x01\x83\x6a\x48\x94\xd6\x9a\xd8\xe7\x70\x50\x2c\x80\x6a\xa3\xcd\xa1\xe6\x14\x76\x85\xb6\xc4\xb8\x82\x2b\x0a\x69\xe4\x2c\x84\xae\xfb\x22\x55\x85\xbf\x13\x87\x2d\x35\x71\xd8\xd2\x54\x31\x0f\x07\x0d\xe1\x9e\xcc\x3f\xfb\x21\x0c\xa0\x21\x81\x49\x25\x7a\x78\x16\x36\x59\xe9\xee\x65\x24\x5e\xd3\xb9\xbb\x54\x4f\x69\x04\x7e\x40\x31\xa2\xf8\x16\x7e\x3b\x0f\x4f\x65\x62\x2a\x45\x51\x11\x53\xe9\x40\x50\xd8\x49\xc9\x54\x12\xeb\x4c\x24\xf1\x45\xc7\xcf\x04\x1a\x35\x74\x74\xbe\x70\x0c\xf4\xed\x47\x2b\xcd\xb7\x99\x6f\x8f\x13\x70\x3f\xdb\x31\xca\xc0\xd3\xc8\xc4\x29\xea\xde\xd9\x8c\x60\x84\x53\x14\x84\x15\x85\xd1\xe9\x6c\xed\x69\xa4\x65\xe9\x5a\x73\xd9\xe8\x3e\xac\x97\x3e\x62\x84\x39\xb8\x9f\x5c\x3d\xcb\x3e\x3b\x43\x71\x3b\x5b\xe6\xb8\x6e\x49\x1c\xc1\x51\x8a\xa2\x40\x8d\xf3\x92\xfd\xb4\xef\x7e\x7e\x11\x3b\x9c\x91\x6a\xbc\x3e\xc5\xce\xe9\x32\xb4\xf5\x24\x15\xba\x2a\xe1\x67\x6a\xcf\x88\x1f\x41\x06\xb2\x9f\xa2\xaa\x13\x7f\x2a\x2a\xe9\x1e\x56\xf8\x64\x93\x3f\x06\xcd\xc3\x71\x88\xee\x21\x0e\x11\x07\xcc\xfe\x4f\x0c\x95\x9d\x95\x43\x65\x67\x15\x7e\xbc\x6d\xa8\x9e\x13\x45\x7a\xa8\x24\xd3\x83\x0b\x52\xf0\xd0\xe4\x49\x7d\x15\xd7\xdc\x3e\x49\x51\x09\xd5\xf4\x5d\xb6\xf8\xdd\x6c\xf1\xbb\x1b\x76\xbd\x49\x37\x82\x28\xaa\xb4\xe9\x46\xb0\x9b\x96\xc3\xc1\xd6\x7a\xc1\x55\xcd\xda\x8a\x6f\x2e\x74\xc7\x4e\xf5\x37\x63\xcd\xbe\x9c\x56\xd8\xc8\xa5\x6a\xdd\x08\xa6\xb5\x5a\x4f\x16\x6b\x1d\x2f\x74\x76\x5c\x54\x7b\xba\x5c\xad\xe8\xac\xac\xf5\xcc\xd6\x7e\xaf\x4b\xfb\x7d\x48\x64\x7f\xaa\xc4\x36\xfc\x73\xd5\xaa\x3e\x3b\xc7\x35\xc0\xf9\xe3\xea\x94\x52\x02\xca\x09\x0b\x6c\xfe\x39\xc9\x57\x47\x6b\xe0\x85\x76\x25\xd3\x4b\xa9\x7f\x9b\x92\xb8\x28\x89\x22\x68\x50\xf1\x0b\x30\x1a\xd8\x65\xa8\xf2\x9c\xa4\xef\xa9\x3d\xe2\xe8\xc6\xeb\x1e\xda\xc2\x49\xfa\x4c\xab\xfa\x8b\x5b\xa4\xc4\x21\x5e\x15\x9e\x34\xd4\xa8\x09\x2b\x51\xc9\x53\xa7\xd7\xcb\xaf\x24\xcc\x01\x5e\xfc\x50\x71\x0f\xe1\xe1\x0a\x79\x53\xaa\x6d\xa0\x96\xb4\xde\x58\x5b\x7f\x1c\xad\x2b\xa2\x55\x5e\x4a\x55\x7f\x10\xad\x16\x9b\x91\x83\x14\xd0\xaf\x44\xf4\xd9\x66\x11\x48\xb8\x66\x43\xdd\xb5\x6f\x11\x61\x9d\x53\x60\x9b\xf6\x0c\x9f\x37\x29\xb8\xef\xec\x2f\x9c\x58\x5b\x49\x16\xf9\x9d\x38\x91\x9d\x34\x73\xc7\xa1\x44\xdd\xa5\xc2\xa9\x90\xe7\x16\xee\x84\x29\x96\xdf\x72\xd9\xc1\x40\xfc\x7d\x2b\xf7\xf1\xc9\xaa\x11\x8b\x73\xdb\xf4\x77\xb6\x97\x54\xec\x98\x73\x03\xf5\x8f\x19\xec\x22\xcf\x96\xa4\x3a\x68\x21\xbc\xc5\x13\x78\x6a\xcc\xb9\x5f\xa1\x39\x37\xbe\xa8\xf8\x1c\x3c\x37\x87\x7c\xe3\x81\xa3\x76\x45\xa1\x90\x7e\x77\xf4\x39\xb1\x4b\x6b\x7d\xb9\x56\xcd\x91\x6c\xad\x79\x95\x53\x5d\x19\x93\x92\x83\x8b\x53\xc2\xaa\x66\xc0\xda\x39\x81\x99\x71\xde\x92\x77\xae\xf1\x24\x71\xaf\x14\xcb\x74\x4c\x71\xe0\xdb\x69\x83\x57\x43\x43\xd2\x4b\x24\xa5\x65\xda\xcb\xbe\x37\x55\xcb\x7e\x43\x3e\xa7\x26\xe6\x86\x87\xb1\x57\x05\x82\x85\xb7\x63\xe3\x1c\x74\x36\xcc\x46\xaf\x41\x01\xb7\xe4\x4c\x2d\x19\xce\x5e\x8f\xf1\x03\xae\xe5\x92\xa9\xff\xc6\x03\x23\x68\x8f\x54\xb3\xcf\x89\x92\x6a\x1b\x46\x88\x6c\x81\x34\xe9\xf7\x15\x4f\xa8\xad\x15\x97\xc6\x3c\x23\xef\x53\xe4\xd3\x6b\x23\x15\x7d\xf7\xdd\x1c\x16\x2d\xd4\xcf\x5c\x5a\x31\x58\xd1\x9c\x73\x03\x78\x7a\xcb\xe0\xb9\xa7\xc1\x90\xc5\x08\x64\x7e\x98\x4e\x22\x76\xdb\x61\x41\xa0\x63\x17\x61\x2e\xef\x74\x25\x30\x42\x0b\x88\x17\x40\xea\x2d\x7b\x38\xec\x69\x20\x2d\xc1\xf3\x6b\x0e\x9e\xe8\x2b\xe3\xa3\x1e\xfc\x8a\x2e\x87\xc5\x28\x3b\x88\x57\x38\x84\xaa\x1a\x5b\xcb\x4e\x5a\x4b\x7d\xa8\x3d\x51\x0b\x3d\xd6\x48\xba\x38\x99\x6e\xb2\xe0\xc6\x37\x30\x98\x78\x46\xbe\xa4\x45\xc5\xfe\x74\xc9\x6d\x48\x75\x15\xe2\x9a\x6f\x42\xb2\xe0\x88\x37\x1c\xda\x59\xe9\x31\xa3\x97\x2a\xc1\x18\xe2\x4b\x2e\x2e\x78\x5f\xa3\x60\x61\x61\x7d\xbe\xeb\xf5\x49\x9a\xfc\xe7\x72\xf7\x85\x62\xbe\x68\xb3\x15\xad\x0e\x25\xb6\xfd\xfa\xdd\xeb\x93\xd7\x8b\xd1\xc4\x26\x51\xc5\xcd\x40\xdf\xdd\x19\x1f\x83\x59\xf4\xd7\x6e\xd2\xfa\xae\xd7\x72\x99\x76\x11\x81\x80\x49\xa4\xaf\x34\x6e\xff\x4a\xb7\x0d\xc1\xa7\x96\x6e\xe9\xce\x12\x82\x17\x75\xe8\xb2\xdd\xba\x02\x17\x3a\x94\x97\xfb\x41\x7b\x57\x5c\x47\x8d\x06\xc8\x2f\x99\xce\x8c\x90\xfb\xbc\x5e\xda\xfb\x82\xc4\x14\xd8\x8d\x1d\x83\x7b\xa2\x2f\x86\x5f\x46\xad\xe6\x87\xcb\x19\xc2\x59\x96\x5f\xd4\xd7\x20\xd2\x4d\x6a\x6a\x52\x2f\xa9\x6f\xf3\x17\x93\x72\x65\x4e\xe1\x38\x72\xce\xc8\x9f\x43\x58\x87\xc1\x37\x0a\xaf\xdb\x34\x02\x0d\xe9\xf3\x6b\xc0\x76\x63\xcf\xc8\xcb\xa8\x59\x5b\x7c\xce\xe1\x3a\x52\xd3\x3d\x8e\xe8\x1c\xe2\x9a\xbf\xed\xd2\x79\x3c\xb7\x8d\x33\x9d\x86\x6d\x77\x0f\xb1\xde\x8d\x3e\xe6\xb7\xe4\x16\xd9\x38\xb8\x5e\x9e\x7a\x4e\x22\xf6\x14\x71\x37\xdc\x79\xdf\xf5\xe8\xb2\x1f\xad\x66\x35\xbd\x45\xd2\x91\x37\xae\x7c\x4d\x53\xbe\x5b\x32\x8b\xd0\x4a\x60\x2a\xc9\x4b\x25\xcd\x78\xa3\x92\x66\xf1\x2a\xcd\xb2\xa8\xfd\x39\x29\x49\xde\x66\x71\xd8\x17\x4e\x6b\x88\x9c\x0a\x43\xc6\xc5\xd3\xce\x6d\x37\x0c\x92\xbe\x47\x51\x3d\x21\xdc\x5a\xa7\x80\x7e\x6e\x61\xee\xe7\xe6\x39\x6c\xc9\xcf\x2d\xa1\xa0\x4e\x89\xdd\x80\x04\xc2\x39\x46\xc4\x6c\x3e\xea\x55\x9f\x3c\x34\x68\x88\x56\x18\x5c\xb5\xef\xfe\x12\x0a\x1d\xde\x89\x42\x37\x97\x77\x0d\x5d\xa6\x97\x71\x6c\x95\x16\x56\xb0\xad\xbb\x6f\xa3\xe9\x89\x6e\x63\x57\xc9\x81\x2a\x63\x19\xe1\x86\x28\xe6\xd8\xef\x98\x69\xa2\x20\x15\x51\xe0\x39\x51\x40\x51\xbc\xaa\xdc\xcf\x1a\xfc\xe8\xd4\xe8\x86\x08\x7e\x67\x29\x48\xc8\xe0\x35\xab\xa0\xcf\x55\x2d\xca\x3d\x5a\xe4\x6d\x32\xed\xe9\xf6\x43\xac\x34\x29\xfa\x68\xc7\xc0\x4e\x6c\x09\xee\x99\xcd\xc1\xfd\xaa\x31\xc5\xa9\xb1\xf5\x48\x44\x78\x11\xc6\x2c\x5a\x95\x7a\x77\x62\x4c\x2b\xbe\xaf\x4a\xa2\x8c\xf6\x29\xdf\xd5\x67\x3e\x1a\x9f\xfd\x56\x15\xea\x1b\x5b\x80\xc9\xe7\x0b\xbb\x66\x1c\xcc\xf7\x05\x4f\xd3\x76\xfb\x14\xad\x82\x58\x6d\x91\x22\x38\xf3\x6f\x9b\xec\x51\x52\x2e\x65\x54\x33\x3b\xc9\x2d\x52\xfc\x30\x65\x6e\xf4\x4f\x25\x44\xbe\xd2\xe6\x38\x13\x8e\xd6\x1f\xdb\x82\x9c\x25\x64\x57\xc9\x28\x79\x32\x96\xdc\xf4\xe4\x40\xe8\xcc\x2d\x7a\x72\x65\xc6\x16\x97\x93\xef\x3a\x1d\xf2\xae\xb8\xc3\xf8\xe2\x27\x3b\xb0\x25\xb0\xf7\x76\x06\xec\xd0\xe6\xc0\x9e\xd9\x0c\x5c\xd4\x9f\x7d\xd1\x4b\x7a\x60\x96\xf4\x22\xf9\xc4\x45\x1a\x26\x71\xb9\xa8\x6e\x16\x46\xfe\x36\x5a\xf7\x2c\xbc\x3a\x4d\xb9\xa8\xbc\x12\x2c\xf6\x2e\xab\x69\x69\xa6\xe1\x62\x3f\xd3\x4a\xc7\x2e\xc7\xb9\x6a\x1b\x97\xad\x28\xb7\xbe\x91\xa1\x9a\xe2\x41\x6e\x5e\x13\x07\xe1\x85\x05\xa7\x62\xa1\xfd\x5e\x1c\x24\xb8\x28\xba\x52\x94\xa5\x18\x11\xee\x4a\xf5\xf9\x43\xf7\x09\x7b\xd1\x3d\xfc\x71\x72\xee\x40\xbb\xa0\xbf\x0f\x20\x86\xad\x68\x91\x2e\x0b\x45\x97\xb5\x35\xf8\x61\xb4\xea\x0a\x4c\x0b\x00\x7b\x11\x14\x0c\xf6\x15\x83\xaf\x8a\xc1\xce\x11\x84\xfb\xd1\xfe\xe9\x62\xfe\x8f\xb9\x41\x7b\x14\xce\xa3\xf6\xe8\x7c\x4f\x75\x04\xa3\xcd\xe5\xab\xca\x27\x06\x5b\x4c\xc9\x67\x17\xae\xd5\x21\x77\x37\x0c\x6b\xcf\x41\x52\x1d\x54\xaa\x8e\x15\x36\x90\x29\xf3\x32\xdb\x03\xef\x93\x7d\xca\x09\x92\x27\x56\xf2\x76\xc8\x70\x43\xe2\x4c\xc9\xeb\x08\x5e\x22\xea\x38\x37\xbd\x2a\xcc\xa0\xc9\x46\xdd\xdf\xd4\x74\xf7\x51\x75\x07\xde\xb9\xbe\x06\xd2\x48\xef\x80\x03\x5b\xb6\x49\xcd\x1b\x7e\xb2\xb7\x34\x73\x58\x48\x00\x5f\xb5\x8f\x6a\xee\xa2\x5a\x53\x20\xa0\x27\xb3\x73\x4b\xae\x22\x30\x01\xc5\x93\xb9\x1a\x62\x57\x0f\x2d\x5c\x1e\xda\x27\xfb\x03\x27\x8a\x3c\x75\xed\xb0\x1c\xd5\xa7\xea\xa8\x06\x76\xd8\x80\x40\xbd\x4f\xf6\x2b\x4e\x42\x9a\x0f\x0f\x77\xf3\x18\x73\xd4\x86\x5a\x8e\xdb\x52\xc5\x8b\xe2\x65\xd0\xdc\xd1\x77\x4e\x02\xba\x38\xcf\x5d\x4e\x7e\xa8\xb7\x8d\x2e\xee\x79\xbb\xd7\x8b\xcc\xfb\x66\xb3\x13\x7c\x5c\xb2\x61\xc3\x05\x71\xef\x30\x21\xd5\xb2\x16\xaf\x7b\x59\x89\x1c\xb1\x6c\x7c\x5c\x0e\xe6\x2a\x69\xa4\x9f\x33\x72\x68\x44\x8e\x82\x70\x06\x1a\x26\x35\xc7\xe1\x6e\xa8\xcf\x64\x39\x4c\x06\x4d\x30\x89\xd7\x73\x05\x14\x6a\x32\xe8\x6a\x10\xd4\x9d\x78\xe7\xba\x13\x03\x82\xee\xf2\x3e\x9f\xdb\x6e\x33\xd0\x3d\xb6\x5d\x75\xac\xf3\xe1\x78\x9f\x5b\x3a\xf8\x6c\x3a\x60\x0b\x9c\x5a\xaa\x81\xcd\xb4\x35\x60\x96\x2e\xb7\xee\xda\x69\x33\x74\xad\xdb\xa9\x73\x43\x2e\x8b\x2e\xbe\xe6\x73\x68\xe8\xe4\xab\xe9\xa4\x18\x42\x1d\x24\xdc\xe7\x4b\x8c\x4e\xdd\x3b\xdf\x9b\x2d\x55\x79\x5e\xaf\x12\xd5\xab\xac\xd7\x79\x58\x2f\xac\x54\x69\x91\xd2\x92\x45\xd6\xed\x24\x5a\x9d\x3b\x71\x66\x73\xf8\x61\x4b\xc8\x74\x42\xac\x0c\x5e\x6a\x8a\xf3\x39\x6a\xf6\xcf\x73\x1f\x3e\x24\x96\xa5\x79\x2f\xad\x0d\x74\xf3\x6c\x06\x7d\x06\x9f\x23\x54\x8f\x50\x7b\x67\x0e\x47\xad\x28\xf9\x46\xe6\x37\x87\x23\x1d\xea\xd6\xce\xd3\x21\x98\x40\xc4\x3b\x8b\x1f\x97\xe2\xb6\xd2\xd4\xe7\x0d\xa9\xbf\xe8\xdc\x63\x86\x6d\x28\xf2\x2c\xcd\xe7\xf0\xbe\x0d\x85\xeb\xa8\xaf\x8e\x65\x12\xd3\x49\x35\x2f\xa9\x03\x03\x3e\x90\x79\x54\xc0\xd8\x51\x8f\x0c\xa4\xb3\x13\xe1\x15\x73\x7e\x37\x57\x49\x73\x28\x5e\x48\xf4\x5f\x8f\x55\x9d\x78\xe1\xf2\x4d\x1b\x5f\xe8\x68\xbf\x70\x14\xe9\xf0\x28\xd5\x0c\x02\x14\xde\xb5\xae\x51\x4b\x7c\xf6\x07\x71\xe9\x40\x6f\x32\x49\x2d\xe6\x08\x10\xfd\x2c\x8f\xd0\x48\xcb\x35\xc3\xab\x8d\x17\x79\xcc\x32\xde\x67\x0f\x1f\x2e\x46\x71\x2b\xea\xc4\x8e\x9c\xcf\x49\x4c\xa6\xe4\x24\x82\x9d\x25\x07\xc9\x3d\x4d\xa3\xcb\x89\xea\x1d\x7f\xf8\x50\x47\x29\xeb\xb3\x51\xdc\x77\xed\x98\xce\x95\x2c\x29\x29\xa8\x75\x20\x98\x33\xfb\x88\xf6\xbb\xa3\x28\xb1\x31\xf0\xc0\xfb\x08\xa2\xc4\x84\xde\x7d\x68\x69\x54\xa9\x76\xff\x31\x64\x52\xcb\xd6\x67\xd1\x8a\x74\x0f\x39\xdb\x74\x61\x4b\x38\x57\x80\x7b\x63\x73\x38\xb2\x3d\x38\xb6\x19\x9c\xd8\x19\xec\x6a\x20\xde\x8e\xfe\x42\x1e\x34\x49\x8a\x38\xbd\x22\x30\x4a\x66\xcc\xe5\xbf\x42\xcf\x71\x12\x81\xe8\xbf\x04\xd1\xcf\x40\xf4\x7f\x80\xe8\xcf\x20\xbf\x47\xc2\xb4\x84\xd1\x42\xa6\xf6\x22\x4f\x7b\x7b\xfe\xf1\xb8\xff\x12\xb7\x32\x43\x03\xfa\x1f\x68\x3f\x3f\x7b\x51\x95\x99\xdf\xe2\x82\x7f\xa6\xe0\xe9\x3d\x2a\xb9\x49\x3d\xdc\xe6\xd4\x8e\x33\xb2\x1d\xa1\x27\xcd\x5e\x02\x3b\x19\xec\x86\x44\x5f\xbc\x31\x98\x84\xc4\x0a\x58\x94\x72\x4b\xeb\xd4\xe1\x34\x5a\x11\x3d\xa3\x31\xbd\x7c\xf1\x8d\x77\x89\xa2\x27\x78\x1c\x57\xde\x24\xcf\xc8\xa9\x1a\x0d\x91\x8e\x68\xbc\xbe\xfc\x51\x89\xea\x89\x93\x54\xcb\xa2\x16\x45\x40\xdc\x9f\x01\xc1\x6b\xd3\xcf\x94\xa0\x65\x0f\xfd\x86\x16\x26\xb9\x23\xf9\x6e\x65\xfe\xde\x8a\x44\x73\x8d\x2b\xad\x1d\x15\x3e\x1b\x94\xa0\x31\x00\x66\xd6\x2b\xa9\xc2\x05\x89\xc1\xa3\xa3\x7c\x64\x45\x22\x97\x62\x1b\xbe\x29\xec\x57\x44\xed\x9c\xc3\xc1\x8a\xfd\x68\x4c\x9f\xe2\x1a\xbc\x9a\xa7\x42\x44\x59\x75\xae\xc4\xbb\x19\xd9\x82\x33\xf5\xcd\xef\xe2\x8e\x7f\x76\x05\xc9\x03\xaa\x53\x38\x28\x22\x33\x53\xfc\xa1\x03\x30\xe3\x25\xcb\x76\x04\x4b\x21\x98\x73\x20\x99\x60\x22\x50\xaa\xdb\x14\x81\x9c\xa9\x02\xb1\x4a\x38\x69\xf3\xbb\x8c\x1d\x6d\x5e\x18\x7f\x01\x23\xe6\xa9\xf1\x28\x89\x2d\x8c\x2f\xd4\x88\x6a\x09\x06\x93\x7f\x76\x9f\xf4\xb9\xd2\x78\x18\xb3\xae\x3a\x09\xf1\x0c\x66\x2f\x50\xf5\x8e\x46\xd5\x75\xd8\x32\x11\x5b\x05\x85\x58\xef\x20\x91\x08\x53\xe5\x16\x2a\x69\x70\xa6\xc4\x99\xe3\x93\xa3\xbd\xf7\x6f\x2c\xb8\x91\x28\xa2\x23\x18\xef\x96\x8b\xa1\x66\x86\xef\xae\xf0\x2d\xf7\x43\x7d\x6d\xbd\x2d\x16\x53\xa6\xc6\xa5\xb1\x68\x0c\x99\x23\x57\x9e\x29\x01\xe8\x34\x94\xe5\xa7\x2a\x5c\xe1\xb7\x9f\xdf\x94\x09\x0a\xee\x95\x7d\xab\xf6\xf1\x4b\x40\xa6\x01\x85\x71\x40\x76\xa8\x49\xef\x18\xb6\x0c\xdc\x80\x4d\xcc\x67\x16\x85\x5b\xb2\x2d\x20\xac\x82\x8e\x7a\xce\x7d\x2a\x34\x54\x20\x3c\x36\x0c\xda\x25\x67\x0a\xb8\xaf\x25\x3c\x18\xaa\x7f\xf3\xff\x63\x4e\x26\xf3\x95\xca\x97\xab\xe0\xa9\x97\x7a\x79\x74\x57\x78\xb0\xf1\xa5\x16\x0c\x29\xfc\x88\x1c\x22\x13\x07\x15\x5e\x5b\xf0\x91\xc3\x81\x9a\xc4\x16\x6c\x25\x68\x1e\xa1\x1e\xaf\x12\x38\x30\x8f\xaf\x38\x84\xfa\xe9\x03\x57\xbd\xa9\xa7\xef\x1c\x7e\x98\xe2\x53\x0e\xa7\xe6\x51\x47\x31\x6c\x98\x96\xe6\x55\xe6\xf4\x1b\x6d\xb2\x8d\x40\xec\xd7\xc4\x32\xa1\xf9\x8b\x44\x89\x72\x37\x72\x5e\x09\x38\x88\x4c\xa4\xc4\x4f\xda\x40\x61\x63\x0e\x5f\xf5\xd3\xfa\x1c\xbe\xeb\xa7\x27\x73\x78\x15\xdd\x27\x1b\xd4\x56\x8b\x1d\xf2\x18\xb8\xb3\xc7\x74\x14\xd9\x19\x79\x15\x69\x3b\xa4\x30\xd1\x49\x70\xad\x89\x48\xfc\x0c\x1b\x59\x90\x24\x18\x4b\x4d\x49\xa6\x23\x45\xb5\x6d\x45\xe1\xbd\xa6\x56\x17\xb5\xb8\xfe\x79\xcb\xc4\xc9\x63\x1f\x39\x8e\x6e\x97\xff\xae\xb6\x0e\x6a\xa1\x9a\x6e\x74\xeb\xd1\xd0\x1e\x40\xd8\x10\x97\x89\x57\xac\x2b\x6a\x1e\xd0\x4b\xa9\x2d\x3a\xb7\x89\x89\xc3\xa4\x3f\xbf\xf0\x61\xbf\xe6\x11\x63\x3e\x0c\x99\xf3\x96\x93\x7c\xce\x97\x52\x4e\xec\x47\x8f\xa2\xc4\x63\xd1\x65\x92\x4a\xfb\xf9\xe0\xf9\xc6\x23\xab\xaa\x71\x88\xe0\x50\xbb\x72\x8f\x9d\x9f\xfa\xa6\x6d\x07\xef\xcd\x64\xa9\x8d\x76\x13\x70\x4f\xec\x61\x05\x7e\xba\xcb\xb8\x6d\xba\xfc\xca\x5f\x7e\x15\x2d\xbf\x4a\x97\x5f\xb9\xcb\xaf\x82\xe5\x57\xe1\xf2\xab\x06\x9c\xdb\x40\x2e\xd9\xf2\xab\x6c\xf9\x55\x43\x42\xd7\x06\x56\x4b\xac\xe0\xbe\x94\xb8\xe9\x83\xbb\x69\x87\xe0\x3e\xb5\x03\x70\x9f\xdb\x1e\x78\xa1\xcd\xc1\x8b\xec\x0c\xbc\xc4\x96\xe0\x65\x76\x0a\xde\xcc\x66\x78\x57\x09\xde\x1b\x3b\x01\xef\x93\x1d\x81\xf7\xd9\x8e\xc1\xfb\x6a\x4f\xc1\x3b\xb7\xc7\xe0\x75\xed\x2e\xb8\x1f\x6d\x77\x5e\xff\xdf\xe2\x65\x69\xbe\x73\x0f\x86\xe0\xee\xe3\x8d\x1c\x99\x91\xcb\x04\xe3\xf5\xa9\xc7\xae\x7a\xf4\x28\x25\x9c\x92\x90\x92\x8b\x84\x6a\xa6\x96\x64\x94\x30\x4a\xbc\xa4\xfc\x2f\xa6\xc4\xa3\x44\x52\xf2\xd3\x9b\xda\xc9\x5c\xb3\xb0\xaf\x84\xf3\x36\x56\x20\xf2\xb1\xf5\x24\x97\x62\x82\x58\x23\x96\x6d\xad\x5d\x15\x09\x7d\xe0\xc3\x2a\x9b\x58\x23\x3e\x49\xfb\x88\x48\x38\x22\xa5\xe8\xb4\x1f\xb5\x33\x5d\x9e\x22\x3d\x47\xae\x89\x3c\xf4\x26\x72\xf6\x23\x62\x79\x11\x4b\xd3\xf7\x6c\xcc\x2d\x0a\x5f\xa2\xdc\xf1\xca\xbd\x52\x22\x5c\xec\x3b\x6f\x62\x62\xf9\xe1\xd4\xa2\x20\xf4\x8f\x74\xc2\x62\x8b\x82\xf4\x9d\xfd\x18\xb8\xef\xcc\x48\xec\xc3\x0e\x20\x1a\x16\xe6\x49\xfa\xc4\x7a\x97\x30\x3f\x8c\x2f\xfa\xfd\xbe\x45\xbf\xe9\x90\x35\x99\xef\xc4\x02\x98\xdf\x12\xf9\x25\x39\x9d\x4c\xb8\xd8\x62\x29\x27\x74\x0e\x9e\xff\x17\xee\xbd\xf4\xa5\x57\xae\x21\x58\xbc\x28\xe0\x7e\x1d\x57\xbc\x67\xc5\x4d\x57\xa2\x27\xe5\x66\x52\x26\x6a\x5a\xa1\x7f\xcf\x60\x49\x81\x6e\x18\x5a\x14\x5c\xdf\xc9\x04\xa4\x7e\xfb\xc2\xbb\x3e\xc4\x60\x7a\x10\x68\x53\x16\xf9\x0e\x13\xe0\xfb\x0d\x8a\xd0\x78\x64\x05\xac\x37\x0e\xe3\x2c\xb5\x6c\xf5\x38\x89\xb2\xd4\xaa\xe4\xcd\xf4\xd5\x12\x9f\x21\x26\x7a\x13\x11\xcb\x95\x71\xc7\x95\x71\x2f\xc9\x64\x14\xc6\xbc\x17\xc6\x41\xd2\x71\x13\xe1\x73\xd1\x1b\x74\xc6\xa2\x37\xec\x8c\xdd\xde\x10\x49\x7c\xe4\x83\x35\x66\xe2\x22\x8c\x7b\x11\x0f\xa4\x05\x56\x6f\x43\xf0\xb1\xda\x21\xbd\x83\x01\x76\xae\xba\x0d\x18\xaa\xc3\xbf\x51\xd8\x31\x66\x7b\x53\x1f\x01\x46\x86\x32\x52\xc0\xd2\xd5\x2b\x90\x45\x16\x85\xb1\x7e\x66\x16\x85\x4b\x5f\xdb\xf4\xb5\x2e\xc6\x31\xcf\x83\xce\xb5\x56\xf9\x1e\x98\x2a\xe4\x8c\xfc\x99\x63\x66\x0b\xf0\x29\x55\x8f\xdf\x14\xa7\x70\xe1\xaf\x4c\x86\x13\x57\x93\x34\x98\x40\x1e\xda\x1f\x19\x8c\x75\x88\x73\xe9\x93\x0c\xf3\x78\x8c\xf4\x81\x62\xfd\xee\x88\xa0\x6f\x46\x91\x46\x2d\x76\x4c\x4c\xff\x89\x24\x18\x4e\xd3\x93\xd0\x95\x44\xae\x59\x1d\x8b\x02\xaa\x90\x49\x4b\x1d\xac\x21\x9a\x6a\x08\x8a\x19\x3d\xaa\x5f\xad\x7d\xad\x2b\x49\x3e\x10\x9d\x0d\x2a\xc3\x8f\x55\x7a\xd2\x1f\x58\x33\xbd\x2d\xbc\xd7\xef\x30\x5a\x7a\xe3\x59\x3b\x23\x79\x9c\xa5\x7e\x3a\x89\x42\x49\x1e\xfd\x23\x5d\x7b\x74\x81\x89\x62\xcd\x1e\x33\x71\xc1\xa5\x62\x03\xf5\xc6\x4a\xdf\xa2\x70\x63\x9e\x2f\x2d\x0a\xd7\xe6\x59\xf1\x85\x2f\xfd\xf6\x6b\xf0\x18\xdd\x2f\xfa\x6c\x40\xe9\xa8\x02\xba\xd7\xf2\x3e\xb0\x9b\x8b\x10\x8d\xc0\x99\x9f\x11\x05\xe1\xeb\x96\x86\x53\x50\x88\x07\x6f\x20\x14\xc8\xda\xf9\x07\xef\xfc\x92\x45\xe1\x8c\x91\x62\xa4\x6d\xdf\x53\x07\xb1\xfd\x73\x73\x0a\xc7\x7a\x55\x22\x85\x15\x5e\x2f\xac\x0a\x87\x42\xa0\x69\x4f\x8a\x20\xaa\x49\x11\x14\xd7\x3f\x3a\xcc\x73\xdd\x62\xb6\x33\xfb\x10\xe3\x68\x18\x81\xb3\x92\x06\x05\x75\x2a\x02\x0e\xc9\x0e\x68\x2e\x9f\xc2\xd5\xca\xc3\x51\x4f\xbf\x18\x17\x5e\x49\xfe\x0b\xc5\x42\x5d\xf9\xda\x3b\xa0\xcf\x69\x9e\x26\x6a\xcb\xec\x38\x73\xf1\xf4\xef\xf9\x8e\x60\x70\xe8\xb7\xb9\xc1\x41\xec\x1c\x11\xc2\x9d\x19\x79\xed\x9b\x00\xa6\x12\x3e\x4a\x52\xc9\xf1\x42\xab\xb1\xc9\x5b\x71\xc1\xab\x02\x17\xc4\x14\x03\x94\x07\x98\xaa\xe2\x52\x89\x20\xe8\x80\xec\x56\x58\xb4\x63\x5f\xef\x9c\xc2\x73\x93\x24\x0d\x35\x83\x8b\x32\x43\xe8\x59\x1a\xe6\x58\x14\x5e\xc4\xbd\x50\xf2\x71\xda\x43\x87\xef\x4e\x14\xa6\xb2\xa7\xa3\xd8\xab\xd7\x25\x00\x4e\x14\x02\x75\x7b\x9b\x25\x08\xc6\x05\x48\xcc\x7a\xc3\x01\x96\xae\x77\xfc\x5e\x10\xf1\x9b\xce\x52\xc7\x79\xb3\xef\x4a\xa4\x84\xc1\x1f\x5f\xd1\x9e\xf0\xad\x3a\x09\x99\xdf\x28\x2b\x64\xe4\x67\xd7\x7e\x86\x79\xc8\x50\x58\x7a\xe9\xeb\xb8\x27\xb6\x82\x34\x8b\x02\xc1\x60\x99\x4f\xd0\x60\x11\xdf\xd8\x3f\x98\xb6\xe1\xff\x84\xdf\xd8\xc9\xd4\xda\x7c\xec\xbb\x7e\xa5\xca\x8c\x8c\xef\x77\x0c\x3a\x92\xdf\x48\x7c\xd5\x7e\xe6\xf8\x2d\xef\xa5\x11\x4b\x2f\x1b\x0e\x42\xa1\x01\x50\xc4\x7e\x51\x73\x58\xdc\x65\x8c\xb4\xa8\xe7\x58\x6b\x07\x21\xf9\x94\x2d\x9b\xb0\x97\xde\xc2\x21\xaa\x94\x07\x26\x8b\x55\xa0\x38\x23\xbd\xed\x95\xb4\x79\xf7\x3e\xdf\xdb\x0c\x63\xaa\x36\x4f\x2a\x0a\xe3\xeb\xa5\xf9\xbc\x0b\xe3\x6b\x7d\xb0\x09\xa6\xa1\x82\x0b\x22\xd0\x7b\x2c\xc7\x66\x5b\x45\x2f\x78\x28\x3a\x05\x44\x0c\xb1\x1b\x9c\xd9\x29\xd3\xf0\xca\xde\xd2\x72\x17\x0d\xcb\x84\x15\x76\xd1\x23\x43\xa3\x8f\x73\xff\x0e\x57\x15\xd9\x77\x47\xed\x36\xff\xda\xe2\xb6\x5c\x9c\xb8\xba\x38\x1a\xbe\xab\xec\x40\xbe\x18\x62\x61\x0d\x11\x04\xc6\x19\x6e\xa3\x61\x18\xb2\x94\x8b\x5e\xca\x23\xee\x29\x86\x21\x8c\x43\x19\xb2\xa8\x28\xed\x8d\x93\x1f\xbd\x3b\xaa\xcc\xb8\x7b\x1d\xca\x3b\x6a\x99\xcd\xf2\x92\x48\xc9\x68\xd6\x7f\x3d\x76\xbd\x81\x5f\x60\x7f\xe9\x13\xb1\xf6\x9b\x63\xfd\xb6\x16\xaf\xfd\x66\xfd\x86\x1b\x72\x17\x7e\xd7\x68\xfd\x1d\x23\x87\x44\xab\x73\x61\xea\x13\x6b\x07\x01\xb0\xe3\xde\x76\xe4\x65\x98\x76\x22\xe6\xf2\xa8\xf2\x15\x6b\x2d\xe7\x57\xe7\x20\xa9\xdd\xb0\x44\xea\x33\x29\xf7\x92\xd8\x67\xe2\x76\x79\x45\x55\x1f\xef\x13\xd9\xc1\x05\x37\xa7\xe1\x9b\x92\xae\xd9\xaf\x5f\x18\x75\x39\x43\xaf\x2b\x67\x35\x12\x78\x5e\x22\x01\xdf\xd7\x3e\x50\x90\x39\x98\x00\x0d\x32\x67\x88\x51\xef\xaf\x14\xa9\x5f\xb3\x3a\x78\xb2\x2c\x7b\xe1\x67\x6a\x41\xe6\xd4\x06\x3f\x8e\xf4\x50\x07\xc5\x9a\xcf\x2e\x43\xc9\x7b\xe9\x84\x79\xdc\x02\x2b\x4e\x66\x82\x4d\x2a\xf3\xc8\xf4\xd8\x17\x40\x6a\xa7\x8e\x08\xc7\x6e\x6f\xc3\x00\xbc\x27\x41\xc0\x11\xf1\x30\x61\x1c\xb0\xd1\x8c\x74\x8b\x6a\x25\x92\x35\x43\xc8\x8f\xc8\x8c\x1c\xfa\x98\x8d\x1e\x19\x1c\x7d\x44\xf4\x79\x38\xf1\xdb\x35\xef\x98\xf2\x55\x1d\x91\x34\x81\x54\x9b\x42\xfa\x55\x9b\x19\x7d\x72\xb4\xb3\x51\x6c\x32\x55\xc5\xd5\x1b\x8f\xea\x14\x26\x51\xef\xb1\x19\xd0\x91\x6c\x23\x45\x01\x39\xf7\xf1\x40\x06\x78\x57\x72\x89\x17\x16\x7b\x98\x87\x7e\x8e\xfa\x84\xaa\xf5\xfb\x1e\xc4\x0e\x62\x2c\xa6\x6a\xbe\x58\xec\x62\x46\x4e\x7c\x8c\xee\x84\x13\x18\xa8\x0e\x4a\xc9\xc4\x7a\x9f\xe8\x1d\xd4\x00\x94\x76\x82\x24\x8b\x7d\xb4\xc8\x66\xe2\x0e\xd9\xee\x63\x60\x64\xbb\x23\x25\x5e\x10\xcb\xbb\xe4\xde\x35\x9e\xe4\x1d\x23\xae\xc4\x93\x4c\xb1\x75\xef\x0d\xa3\xa2\x61\x1f\xde\xf9\xa5\x59\xa6\x61\xfd\xa0\x68\xfc\x8d\xa2\x9e\xe7\xcc\x70\x86\xb7\x13\x45\xfe\xb7\xfd\x76\xe1\x34\x27\xc3\x6a\x65\x63\x36\x45\x8a\x58\xa2\x9b\xf7\x25\xe0\x48\x05\x8e\x0a\x26\xb5\x06\xba\xe7\x25\xb1\x14\x49\x54\xfc\x54\x03\x70\x93\x9b\xb2\xed\x8e\xe6\x1f\x7d\x33\x33\x2c\x43\x9a\xbe\xd8\x41\x2f\x9f\xe6\x91\x5f\xa6\x39\xa4\x14\x5e\x31\x7d\xeb\x21\x21\xce\xa8\xc1\xf4\xd5\xf3\xb1\xd4\x8b\x1f\x7a\xa8\x31\xba\xbb\xae\xcf\x53\x4f\x84\x13\xe4\x37\xca\xf3\x13\x1b\x4c\xa2\xc1\xf9\xad\xbf\xda\x81\xb1\x7d\xd5\x4c\xc6\x9f\xea\xf7\x55\x15\x24\x5b\x15\x26\x99\x97\x2c\xaf\x41\xa9\xcc\xbb\x56\x50\x14\xfb\x16\x58\x52\xb0\x38\x9d\x30\x81\xfa\x5b\x73\xfe\x83\x24\xd6\xa8\xf8\x92\x8b\xb0\x7c\xed\x65\x22\x45\x24\x3c\x49\xc2\x58\x2b\x7f\x75\x81\xc1\xae\x88\x2b\x62\x6e\x16\x3f\x1f\x8a\x46\xb7\x78\xfd\x83\x83\xd1\xb3\x3e\xf5\xef\x19\xd8\xf7\x87\x86\xd9\xc2\x76\x8b\xc2\xae\xef\xfc\xc6\xe3\xa9\x53\xd5\x56\xfe\x06\x07\x1a\x10\x43\x55\xe3\x93\xef\x3c\x83\xaf\xbe\x33\xdc\x80\xef\x0a\x86\x0f\xa5\xe6\x26\x6f\x24\x4c\x25\xfa\x8b\xc3\xab\x7b\xc8\xe5\xc3\x42\x2e\xff\xd8\x74\x12\x74\x98\x25\x63\xa8\xfc\x41\xd5\x08\x12\xb0\xae\xf9\xed\x56\xe2\x73\x0b\x30\xd4\x3a\x9e\x4e\xe3\xa7\x97\x16\x4e\x75\x7e\x50\x75\xac\x63\x41\xe9\xfa\xb6\xef\x17\xae\x6f\x4c\xe8\x4c\xca\x6f\x8c\x82\x65\xcc\x22\x75\x28\xbf\xe8\x79\xea\x8f\x53\x88\xa7\xad\xe1\xb6\xbc\x2f\x5a\x58\xbd\xd2\x81\x65\x42\x4c\x16\xca\xf6\xc1\x73\xf6\x15\x0e\x87\xc4\x39\x63\xfa\x20\x48\x13\x4a\x38\xf4\x31\xaa\x94\xa7\xd0\x74\xe8\x7c\xf7\x15\x55\x09\x9c\x0b\xe2\xe1\x92\x99\x9c\x1f\xfb\x0c\x90\xfd\x37\x39\x56\x2c\xcb\xf6\xfa\xdd\x91\x75\xc9\xd2\x9e\xcf\xe2\x0b\x2e\x2c\x1b\x7f\xa4\x99\xe7\xf1\xb4\xaa\xa2\x28\x31\xab\x48\x66\x9d\x38\xe9\x5d\x64\x52\x72\x91\xb6\xb0\xc8\x47\x26\x37\x89\xa7\xbe\x57\xa3\x2e\x5e\x12\x75\xac\x35\x51\x28\x2f\xc2\xb8\x37\x0b\x7d\x79\x69\x81\x1c\x59\x1b\x83\xc1\xe4\xc6\xb2\xad\x75\xfc\xdb\xc0\xa4\x37\x7e\x5e\x9d\x59\x1e\xcb\x5e\x2a\x05\x97\xde\x65\x53\x3b\xf5\x55\x44\x22\x3d\x73\x57\xb6\x88\x81\x0e\xfc\xe6\x1c\x9b\x78\x1c\x82\x44\x14\x78\x01\xb7\x11\xe5\x71\x8f\xd4\x3c\x60\xd5\x99\xfe\xea\x57\x7c\x85\x1b\xb7\x27\x4f\x26\xf6\xc9\xa7\x26\xe7\xd8\x03\xc7\xc9\xf2\x46\x1f\x99\xaa\xf7\x22\x76\x0e\x09\x83\xb0\xe2\x7b\x67\x4c\x33\xba\x23\xe9\xda\x33\x72\xea\xc3\x03\x64\xa3\xfb\xac\xa0\x34\xd2\x9d\x53\x90\x1e\x69\x49\xef\xfb\xc9\xa7\x23\xd5\xfb\x90\xda\x58\x33\xf6\xc8\x07\xd6\x80\x06\x2b\x8b\xd4\x73\x65\x5c\x2e\xd4\x32\x67\x36\x11\xe1\x98\x89\x5b\x4b\x9d\x74\x12\x50\x48\x1a\x58\x2e\xc5\xd5\xc9\x51\x7d\x27\xbc\x24\xea\xb1\x4c\x26\x9d\xda\xd7\x14\xf1\x58\x6f\xda\xbe\xc6\xad\x1b\xdf\xc5\x2b\x6e\x33\xf2\x86\x19\xec\xd5\x28\x26\xb8\x3c\x8a\xb4\xf0\xd3\x4b\x96\xc4\x05\xe3\xbf\x52\x99\x49\xc9\xcb\xc0\x8c\xbc\x29\xfb\x52\xf0\xa1\xd8\xec\x05\x5e\xbb\x5c\x8c\x2d\xa4\x2c\x1d\x03\x54\xd0\xe1\xfd\x8b\xbe\xd5\xcc\xec\x22\x01\x40\x76\x74\x6c\x60\xdb\x65\x29\x47\x0c\x8d\xb8\xf8\x03\x23\xbb\x3e\x2d\xfb\xde\xf5\xcb\xd1\x99\xfc\x12\x62\x7a\x4f\x85\xa7\x9c\x36\xe8\x2a\x73\x05\x0f\xa5\xa3\x22\x40\x9c\xb5\xa4\xab\x54\x43\xf6\x45\x32\xf1\x93\x99\x3e\xfc\xa8\x59\x04\x44\x4a\x62\x8a\xde\xfe\x95\x41\x0a\x43\x38\xf8\x74\x15\x8b\x51\xb2\x11\xc1\x93\x8e\x1f\xba\x9d\xb1\xbb\xde\x19\x8b\x46\x61\xdc\xe3\x9a\x88\xad\x64\x23\x8e\xd4\x97\x15\x8b\x20\x1b\xc0\x7c\x52\x01\x34\xb5\x47\x46\x35\x6a\x46\x9a\x4d\xef\x49\xe2\xd8\xd4\x79\x06\xde\xd4\xd9\x1c\x40\x32\x55\x44\x2b\x9c\x3a\x1b\xcf\x20\x98\xde\x33\xab\x77\x6e\xf2\x53\xa4\xf5\xbe\xd0\x89\x37\x0b\x2c\xa2\xe4\x7f\x5a\x4d\xee\xed\x4e\x97\x92\x7b\xe7\x54\x83\x7d\xc5\x1b\x75\xf7\x1a\x95\xa2\x6e\x84\x5a\x51\x4f\x6a\xab\xd2\x7d\xf4\x39\x18\x07\xa0\x69\x03\x62\xa5\x56\x32\x1a\x4c\x73\x15\xcd\x7b\x89\x11\x63\x33\xb4\xda\x15\x23\xa6\xb0\x8f\xde\x61\xa9\xef\x9d\xc7\x01\xec\x64\x7f\xa5\x47\x8e\xbd\x35\x91\x96\x3b\xf0\x73\x3d\x2b\x61\x0b\x6e\x96\x6d\xb8\xd9\x9b\x16\xab\x9a\x7a\x24\x2c\x30\x71\xb8\xf0\x3e\x29\xde\x27\xe5\x7b\x31\xc2\x89\x32\x83\xcd\xc5\x14\xb1\x79\x86\x1a\xc9\x7c\x35\xf2\x66\x6c\x5a\x41\xec\x32\xef\xc1\xf5\x0a\xc4\xfe\xdd\x27\x0c\x5d\x35\x4a\xe4\x6e\xfc\x70\xf5\xf2\x66\x53\x8d\xdc\x59\x05\xb9\xb3\xd5\xc8\x9d\x4d\xe9\x48\x7d\x61\x48\x6d\x66\x90\x7b\xe8\xa1\x72\x29\xc0\xef\x2a\x6e\x05\x99\x43\x2f\x4b\x2d\x70\xb9\xda\x53\x0a\x91\x47\x02\x6c\xf4\x2f\x24\x03\x02\xb1\x40\x4c\x9b\x49\xc1\x9c\x42\xda\xce\xf5\x18\xf8\xf5\x24\x64\xce\x03\x14\xac\x25\xc4\x7d\x6f\x97\x2a\x31\xfb\x55\x90\x1b\x74\x32\x49\x24\x55\xcc\x4b\xb6\x82\x75\xc1\x20\x54\x0d\x22\x2d\xba\xf9\xf0\x29\x58\xaf\x63\x54\xf1\x68\xae\x5f\x4b\x66\x68\x2b\x23\x20\xf3\xaa\xfe\x62\x62\x54\x0a\xc9\x12\xfe\x2e\x27\xe4\xb7\x73\x42\xac\x81\x13\xaa\xf3\x3f\xee\x14\x45\x5f\xc0\x65\xa8\x28\x5c\x81\x2b\x19\xe2\xfe\x94\xe8\x9d\x12\x0f\x3b\xd7\xfc\xb6\x13\x24\xa2\x98\x74\xae\x5f\x30\x2a\xf6\x7f\x51\x77\xff\x14\x9d\x0b\xbd\x45\xbd\x72\xf9\xcd\xca\x5b\xc3\x5e\x10\xee\x90\xcc\x89\xa9\xc2\x78\xb1\x93\x29\xfc\x57\x5d\x65\xc5\x67\x78\xc9\x78\x12\x71\xc9\x7b\x63\x1e\x67\x1d\x6b\x8d\x90\xac\xcf\xd6\x7f\xfd\xca\xfa\xee\x16\x7d\xf8\x50\x1d\x3d\x2b\xbd\x4c\x66\x8a\xd6\x51\x0a\x9e\x47\x18\x9e\x1b\x0a\x89\x7e\x1c\xd2\x06\xa6\xa4\xa0\x80\xaa\xd7\x52\xfd\x21\xa7\x0a\xc7\x69\x1d\x60\x4e\x97\x91\x79\x98\x53\x88\xa6\xce\xcf\xae\xfd\x64\x0e\xfe\x1d\xc4\xb7\x91\xc8\x96\x92\xa1\xdd\x54\xbe\x4a\xda\x3b\x63\x64\xea\x11\xd1\xf7\x8e\x97\xf4\x3f\x71\x4e\xa6\xfb\xee\x38\xbf\x18\x9c\x9a\x74\x53\xdd\x06\x42\xa8\x91\x7e\xac\xd0\x7e\x99\xbe\xb8\x3d\xd5\xa4\xbc\x23\x61\x69\xdc\xf7\x8e\xe7\x85\x90\xf7\x51\xea\x68\x01\x62\x51\x2d\xbc\x2c\x49\x2b\x56\x51\x6d\xad\x06\x30\x01\x96\x64\xee\x5e\xec\xf3\x1b\x4c\xc5\x33\xa4\xf9\x5a\x54\x2e\x0d\x04\x8f\x98\x5e\xc0\x16\xe1\x77\x71\x8b\xd5\x92\x4d\x35\x95\xd1\x8a\x0d\xd1\xdb\x5c\xb1\xc4\x25\x88\x1e\x19\xcb\x3a\xbb\x63\xad\x69\x03\x2e\xb4\xad\x11\x15\x2c\x38\x9e\xb6\x9b\x02\xb9\x6f\x7f\xfd\x12\x7d\x77\x6b\xa4\x00\x5b\x3a\x82\x2a\xc4\x88\xd9\xb4\x76\x31\x40\xa6\xa6\xe1\x68\xc2\x78\xfd\x97\x08\xb9\x34\x84\xdc\x2b\xc8\xb6\xbc\x7f\x7b\xa9\xdb\xae\xda\x0a\x5c\xa1\xce\xf2\x79\xc3\x93\x85\xa7\xaa\x5b\x9e\xaa\xae\x3e\x55\xff\xc4\x2e\x95\x9c\x41\x3d\xdd\xaa\x62\x03\x38\xd2\xb7\x68\x8a\xc4\xf0\xd2\x5b\x62\x0a\xaa\xc6\x9f\x53\x3a\x1a\x7b\xc4\xa3\xb6\x61\x07\xd4\x2f\xa6\x7f\x25\x53\x3a\x9a\x7a\x5a\xd6\xe6\xd0\x16\x29\x51\xc3\x70\x46\x29\xb5\x8f\xdd\xb9\x81\x99\x2a\xb8\x18\x27\x79\x0b\x2c\x37\x4a\xbc\xeb\x52\x5d\x6b\xf0\xfd\x70\x30\xf8\x7f\x4a\xa5\x54\x0b\x8a\xe9\x2c\xfc\xea\x89\xf0\xe2\x52\x96\x68\xc7\x9f\x2a\xb1\x54\x6a\x7c\x63\xcf\x48\x77\x8a\xf6\xf7\xee\x0f\x5a\xde\x95\x03\x03\x6f\x4e\xe1\x72\x05\x0d\xde\xd5\x81\xbe\x9f\xeb\x8b\xf5\x73\x7d\xb3\x7e\xab\x7d\x68\x4f\x00\x3d\x83\x5e\xeb\x20\x73\xdb\x78\x3d\xe1\x25\xab\x95\xc9\x1e\x13\x7e\xa7\x4a\x7e\xeb\x85\xbd\x4b\xce\xfc\x2a\x33\xdf\xad\x02\x58\x47\x01\x99\x64\x6e\xda\xa9\xd4\xc5\x17\x79\x83\x1b\xf2\xd6\x87\x01\x78\x1a\x85\x9c\x30\x4c\xaa\x68\x4e\xe2\x8e\xb1\x42\x54\x64\x02\xeb\x0d\x1b\xeb\x89\x3e\xbb\x18\x59\x68\x2f\xdb\x21\x9a\x23\xa0\x96\xad\x5f\xe4\xe2\x5e\xe6\x2f\x64\x6d\x9c\x91\xf1\x54\xad\xee\x0d\x30\x8a\xb6\x95\x3e\xe4\x02\xa3\x6f\x81\xe8\x9f\xc0\x11\xcb\xdf\x97\xf7\x68\x20\xfa\xc7\xf0\x99\xe5\x94\x6b\x69\x29\x0c\x70\xe8\x89\x79\xa3\xf2\xab\x32\xff\x6a\x3a\x05\x0e\x6a\xbc\xb8\xc7\x79\xb1\xc8\x8b\xe3\x29\xfc\xf4\xbe\xd8\x0f\x06\x08\x8d\xd5\xcf\xec\xb4\xb1\x6f\xb9\xcc\x5c\x5e\x80\x2c\x99\x9a\x6c\x68\x02\xa2\xef\x31\x86\x4a\x00\x7a\x10\xcf\x0d\x45\x8c\x47\xab\xee\xf1\x37\xaa\x02\xf5\x56\x12\x45\x6c\x92\xf2\x0e\x8b\x22\xa3\x0b\xb7\xe8\x37\x7b\xc5\xbd\xfc\x42\x73\x6d\xa6\xb8\xd8\x38\x9f\xa0\xe7\xc3\x2d\xf9\xec\x43\x02\xa1\xe2\x93\xa4\x41\xb3\x17\x53\x54\xc0\x5d\x0e\x2d\x0a\x93\xe9\x82\x65\xd3\xc5\xb4\xb4\x6c\x8a\x13\x99\x2b\xe5\xf3\x1e\x75\x43\xc4\x27\xa9\xb6\x9b\x98\xa2\xf2\xee\x52\x8e\xa3\x9d\x44\x51\xd4\x9b\x7b\x4a\xb3\x77\x29\xc5\x05\xf3\xc3\xe4\x9f\xd2\x88\x0b\x44\x78\x31\x62\x3a\xf9\x9f\xd4\x82\xcf\x29\x5c\x4f\x8d\xe9\xeb\xcb\xa9\x09\xd1\x78\x8c\x0f\xc3\xc1\x1c\x5e\xe3\xd3\xb3\x39\x5c\xe1\xc3\xe6\x1c\xb6\xa6\xad\xf7\xa1\x15\xf9\x78\xf0\x87\x83\x11\xbb\x75\xb4\x6e\x25\xaa\xe4\x97\x29\x92\x64\x1a\x9b\x79\x4e\x0c\x89\x23\x20\x74\x24\x04\x0e\x07\xd7\xc1\xbb\x2a\x06\x19\x85\xd4\x31\x41\x79\x05\x26\xed\x7d\x11\x3b\x1e\x08\x27\x01\xe9\x84\xc0\x9d\x00\x32\xc7\x05\xe6\xa4\x4a\x98\xde\x9b\xae\xf4\xa5\xea\x04\x64\x6b\x8a\x4e\x16\x6f\x24\xe6\xe0\x41\x97\xf0\x1d\xb8\x25\x1f\x52\x0c\xf1\xa7\x03\x12\x1e\x4e\xdb\x0c\xdb\x4c\xfe\xb1\x5a\x22\xc9\xe1\xe6\xe6\x80\xd2\xa5\x54\x53\x35\x8f\xfa\xcd\x7a\x12\xaf\x5a\x18\x85\x27\x35\xff\xc0\x41\x3d\x75\x57\xdd\x48\x6e\x1d\xb3\x67\xb4\xa1\xfe\x4b\x8c\x2f\xce\x9d\x29\xd9\x9b\xc2\x70\x08\x18\x9f\x56\x02\xc9\x9c\x1b\x45\xfe\x0e\xa7\x26\xba\x6e\x11\x79\xf7\x69\x4f\xfb\x7c\x8d\x32\x5b\x5f\x5e\x3e\xb5\xb3\xde\x90\xe6\x41\x78\x21\x73\xca\xa8\xbc\x0a\x36\x1d\x12\x57\xbb\xd2\x1a\x85\x22\x8c\xaf\xe9\xeb\xa9\xe3\x38\xf1\x68\x60\xc7\xd8\x27\x3e\x99\x3e\x33\x0a\x0b\x43\x8b\xab\x5a\x04\xb9\x38\xec\x0c\x24\x45\x1b\xca\x23\x92\xef\xd1\x49\x2b\xc3\x85\x11\x3a\xfe\x78\x3a\x12\xf6\x8c\x9c\x4c\x15\x33\xe9\x62\xca\x38\x98\x91\xf7\x09\x08\x7d\x2a\x53\xfd\xce\xa8\xd8\x3e\x23\x3c\x6f\xcc\xe1\x68\x7a\xa7\x45\xe7\xaf\x5f\xc6\xbf\xcc\x38\x7f\x2c\x44\x3a\x9d\x53\xd8\x59\xe8\x42\x62\x42\x1d\x63\x01\x70\x41\xae\x3d\xb5\x29\xd7\x9e\x3a\x04\x23\x0b\xaf\xb7\xc7\x49\x2c\x2f\x95\x9c\x02\x59\xfb\x35\x48\x11\x41\x37\x2e\x82\xba\x0a\xba\x32\xb0\xf0\x83\xb8\xdf\x7d\xf8\x70\x88\xd6\xde\x19\x9a\x69\x70\x3a\x12\xb6\x65\xcd\x35\xbf\x88\x03\x0e\xc1\xea\x70\x85\x25\x51\x97\x84\xaf\xbe\x83\xd5\x19\x27\x59\xca\x13\x74\x25\x41\x95\x10\x16\xdc\x82\xd5\x31\x62\x30\xc4\xcd\x01\xc9\xd8\x2d\x76\xda\x16\x8c\xcc\xb2\x8a\xfb\x57\x17\x6d\xa9\x0e\x89\x19\x1b\x8e\x4e\xd1\x64\x8c\x5f\x87\xed\x45\x9f\xfd\xfa\xb5\x8e\x81\x65\xdd\xb2\xbd\x79\xe8\xb8\x5c\xce\x38\x8f\xad\x39\xa1\x39\x0b\x7d\x48\x30\x4f\x81\xa2\xd1\x47\x53\x68\xcd\x55\x16\x02\x3a\x6e\x97\x83\x6d\xd4\x5c\xf9\x4c\xf2\x8e\xcb\xbc\x6b\x6b\x8d\xb0\x3e\x53\xff\xb8\x6b\x31\x6d\x14\x1a\x55\xd5\x40\x24\xb1\xb4\xd6\x92\x35\x12\xae\x11\x6f\xcd\x44\x4e\x4b\x7d\xb0\xca\xb5\x04\x17\x3d\xb4\xf3\x24\xc2\x8a\x00\x7f\x9e\x96\x38\xf8\x4a\x81\xd2\x01\xc6\x06\xe7\xb4\x22\x59\xbc\x5f\x49\x96\x16\x38\xb4\x9d\xfc\xba\x7e\x67\x9a\xe7\xd5\x32\x9d\xbc\x9b\xb6\x7a\x59\x9e\xa3\x44\x80\x01\xa6\xd4\xcf\x93\x29\x48\xd8\xa1\x6d\x37\x45\x57\x59\x2a\xc3\xe0\xb6\xb8\xa1\xa9\x2b\x6e\x2b\xd6\x57\x9c\x5f\x57\x38\x41\x1c\xd9\x4b\x0f\xce\x4c\xf4\x69\x6f\x3a\xfa\xd3\x3a\xce\x94\x80\x70\x80\x62\xc2\x49\xa6\x24\x82\x33\xc5\x54\x59\x27\x97\x99\x05\xd6\x8e\x08\x2d\xb0\x8e\x99\xb4\xbe\xd9\x7f\xde\xa3\x16\xba\x34\x68\xc3\xd3\xfa\x06\xe1\x78\x59\x18\x9b\x8b\xd2\x72\x63\x32\x89\xfb\x72\x35\xa5\x15\x6b\x87\xf7\xf5\xe5\x3b\x43\xfc\x30\x98\xc3\xf6\xd4\x04\xac\x7c\xab\xf9\x09\x57\xf5\x77\xaa\x9f\x27\x16\x85\x1f\xed\x9b\xf5\xb3\x6b\x3f\xa9\x66\x82\xc6\x38\x1c\x2b\xab\x3f\xae\x57\x3f\xd0\x1f\x4a\xa5\x48\xe2\x0b\x8b\xae\xc8\x1d\xbd\x14\xb3\xc3\xa4\x1f\x8b\xfb\xdd\xd1\x69\x62\xbb\x9c\x6a\xa7\x35\x98\x91\x90\x17\x09\xfb\xda\xad\x17\xf2\x14\x43\x95\xec\x42\xc4\xf2\x8a\xb8\x75\x5e\x12\x4f\xb9\x90\x9d\x54\x8a\x10\x47\xf6\x69\xaa\x2f\x69\x3f\x1a\x7e\x76\xc6\x81\x09\x4a\xe1\xeb\xb4\xcd\xdd\xb3\x0e\x68\x32\x1c\xf3\x49\xe8\x5d\x57\x81\xeb\x60\x9a\xd7\x48\x33\xf7\x8a\x7b\xb2\xc2\xc2\xc8\x91\xf5\x3a\xf6\x2d\xdb\x3a\xce\x55\x75\x8b\x50\x70\x99\x64\xa2\x49\xe9\x99\x4d\x7a\xda\xb4\xdc\x5c\x4b\x14\x10\xad\xce\xe7\x2d\xf9\xa1\x8e\xc3\x00\x86\xed\x57\x4e\x2c\xbe\x88\x78\xcf\x68\xb8\x77\xcc\x77\x77\xcc\x15\xbf\x02\x34\x37\xca\x84\xa5\x17\x7a\x46\x76\xb1\x3f\xb5\x40\x14\x99\xbc\x0a\x1e\x0d\x03\x22\x73\x6c\x2a\xfa\xec\xaa\x1a\xa7\xd3\x1a\xa0\xf1\x50\x9e\x76\x40\x3b\x20\x60\xb5\x1f\x77\x56\x23\x94\xc2\x6b\x8f\xac\x6b\xae\x73\x1a\xf2\x59\x7d\xaa\x39\xd7\x58\xb9\x0b\x52\x42\xe9\x3d\xd6\xa5\x77\xe7\xc2\xa8\x9e\xca\xa5\x59\x12\x92\x92\x68\xf9\x0b\xa5\xe2\xc5\x5e\xde\x47\x25\x89\x48\xfe\x77\x77\x72\xf8\x2f\xde\xc9\xe1\xdf\xdf\xc9\xef\xf7\xdb\xc9\xef\x2b\x77\xf2\xef\xef\xdd\xf0\x9f\xde\x3b\x75\x42\x5b\xa0\x49\x6f\x1f\x89\xdb\xd1\x93\xf6\x82\x6c\x53\xbf\x28\x74\x63\x59\xf6\x8c\x4c\x12\x78\x06\x02\xd9\x9e\xb9\x41\x52\x78\xf9\xac\x38\xae\x2b\xf5\x4f\xa8\xa4\x66\xfc\xf9\x43\xfd\x73\x5b\x50\x4e\x64\x35\xe2\x39\x85\xef\xd3\xa6\xf0\xef\xc6\xad\xa7\xbc\xdb\x6f\x30\x20\x1c\x77\xdc\x8b\x5e\xc0\x7c\xee\x2f\x1b\x14\xae\xa3\x5d\x71\x8b\x01\xa6\xe4\x37\x72\x95\xf5\xe5\x62\x79\xa3\xe9\xa5\xae\xb2\x78\xe3\xfa\x22\x5e\x8c\x1b\x9d\xfc\xcd\x21\x9f\x55\x0d\x6d\xcb\x79\x37\x5e\x09\x88\x92\x11\x79\x35\x6d\x4d\xbe\xda\x66\x22\xac\x23\x06\x39\x3a\x62\x10\x46\xde\x74\xd1\x6a\xf3\x88\x10\x79\x87\xdd\x79\x6e\xc2\x87\x26\xe4\xb2\xef\x2e\xf3\x69\xab\x6d\xc8\xff\x9e\x85\x78\xc5\x1e\x5c\x18\x7b\x70\x61\xec\xc1\x5f\xfa\xc8\x14\x14\x76\xde\xe8\x10\x50\xb3\xf3\x56\x32\x80\xb6\x43\x5e\xf4\xaa\xb8\xbf\x1d\xb2\x68\xb7\x43\xfe\x3e\x45\xa7\x0c\x9e\xef\xc7\xc7\x69\xfb\xf1\xaa\x18\x4e\xb2\x81\xfe\x5c\xf9\xa5\x57\x53\x25\x84\x68\x33\xc4\x0f\xd3\xfb\xf8\x70\x2d\x25\x44\x59\x5a\x44\xb3\x70\xb2\xef\x8e\xfe\x9c\x91\xd3\x69\xce\x95\x1e\x54\xb4\x32\x2f\xf3\x80\xa2\xfa\x3e\xc9\xee\xa0\x33\xdd\xbe\xc0\x64\x20\x05\x82\xf9\x38\x45\x67\x0a\x54\x23\x35\xf7\xf3\x3e\x29\x63\x93\x56\x6e\xba\xcc\xb9\xaf\x25\xff\xc5\x75\xac\x49\xe2\x15\x7f\xb3\xd6\x59\x69\x03\x4f\xfc\xb7\x37\x63\x22\x46\xbe\xa6\xe4\x36\x70\x0f\xe6\x14\xf6\xa7\xed\x71\xf7\x1a\xbc\xe0\xaa\xc3\x19\x94\x81\x80\x32\x1f\xb6\x3c\x98\x91\x0f\x7a\xee\xf5\x6c\x37\xc6\x1b\x6e\x29\xfd\x95\xaf\x46\xf0\x66\xea\x58\x63\xd9\x7b\x6c\xc1\x97\x65\x4b\x82\xe5\x89\x1d\x91\x37\x53\x30\xbe\x25\x23\xab\xb3\x70\xdf\x6a\xd1\x9a\x0d\xe8\xed\xb4\xd9\x8a\xaa\xc2\x90\x15\xbb\x72\x60\x12\xf1\x75\x34\xcd\xc6\x0d\xc7\xcd\x29\x37\x2b\x37\xe8\x4e\x2b\x86\x38\x5a\x13\xfb\x3d\xab\xe8\x3a\x87\x88\xea\x61\x91\xa0\x6a\x51\xb0\x0c\x6a\xae\x17\xb2\xa0\xb1\x4d\x5b\x58\x35\x25\xe8\x05\x9c\xfb\x28\xd0\xd5\xb4\x5e\x73\x62\x8e\x53\xdc\x35\xac\xbd\xe8\x3a\x2b\xd5\xaa\xe6\x42\x1a\xf1\xe8\x99\x5b\xe1\x56\x3e\x08\x4d\x0f\x5f\x96\x30\x09\xb2\x7b\x9f\x43\xaa\xf0\x53\xc7\x5a\x7b\x93\x8b\x83\xa2\x5b\x73\xce\x40\x96\x00\xbd\xa3\xb9\x65\x5b\xa7\x98\x65\xb1\xd1\x88\x67\x1c\xf5\xd6\x3b\xcb\x17\xf9\x0a\xe5\x77\x97\x66\x1e\x2f\x32\x0e\x0b\x6d\x0d\x50\x60\x53\xb1\x70\x17\x86\xea\xd3\x82\x2a\x70\x5c\xb6\xe7\x73\xc8\xba\x46\x87\xc2\xba\x77\xf8\x48\xb0\x3e\xf3\xf3\xa8\x6c\x5e\x25\xa2\x76\x93\x1c\xae\x4d\x85\x34\x7f\x91\x44\xd9\x58\x27\x1b\x30\x56\xea\x15\xfb\x80\x8e\xb5\x26\xab\xc0\xbb\x8c\x36\x44\x85\x87\x51\xaf\xbe\xf8\x4a\xa8\xff\x8c\x4a\x56\x7d\xb7\x94\x2d\x5b\xa0\x74\x16\x60\xa8\xd8\xfa\x9c\x25\x5a\x3a\xc8\xf7\x1d\xf9\xbf\x67\xb4\x95\x81\xe5\xa8\x42\x47\x7f\xf3\x9a\x0f\xc8\xdd\xab\xab\xa1\xe0\x3f\xb2\xb8\x39\xc0\x2d\xdf\x99\xdd\x75\x8e\x93\x1c\x1a\xe7\x14\xbc\xee\x1d\xda\x92\x8a\x88\x59\x39\x70\x01\x61\x5d\xd8\xf1\xc1\x48\x8f\xa0\x04\x93\x5e\x74\xd1\xdb\x44\xcb\xc6\x27\x26\xe2\xca\xb9\x07\x33\x8f\xc2\xcc\x23\x59\x97\x42\x4c\xa1\x68\xb6\x6d\x52\x61\x94\x2d\x37\x16\x5a\xee\x2d\xb4\x94\x95\x96\xaf\xd1\xf2\xbb\xf1\x73\x87\x0b\x8d\x44\xc3\xa2\xe8\x8d\x32\x8d\x87\xa6\xf1\xd2\x4e\xa9\x63\xfb\xff\xfd\xbf\xd6\x92\x43\xce\xc2\x5e\xd4\xd1\x9c\xc1\x1d\x1d\x7d\x2f\xde\x5b\x30\x3c\x98\x79\x84\x77\xdb\x85\x07\x8f\x45\x5c\x71\x9d\x0b\xa2\xc3\x37\x7a\x97\x7f\x44\x22\xe0\xd1\xff\x90\x24\xfe\xa5\xc6\xf5\x12\x6b\x74\xe9\xa3\xb0\x2f\x79\x2a\x49\xec\xc4\x74\x64\xf9\x4c\xb2\x9e\xb5\x16\xdb\x31\x3c\xfa\x9f\x7f\xa4\xbf\x93\x2b\x36\x65\xfa\x42\xc3\xfe\xa5\x0a\x6d\xc5\xc4\xfe\xe3\xd1\xa5\x1c\x47\x45\x53\xe1\x08\xb4\x2e\xd2\x79\xbc\x14\x72\x67\x02\x74\x57\x17\x82\x8d\xc7\xe7\xdc\x0f\x31\xe8\x44\x1e\x64\x09\xc2\x6e\x9b\x4f\x6e\xee\x8c\x2b\xfe\x11\xff\xfa\x87\xf8\xf5\x8f\x58\xfb\xe4\x06\x5d\xed\x79\xc9\x6f\x24\x13\x9c\x59\x14\xdc\xee\xca\x1c\x4d\x98\x01\x7f\x03\x86\x8f\x61\x5f\x90\xb0\xab\x33\x7e\xe2\x4d\x7e\xf6\xef\x41\x8e\x62\xf5\xf9\x8d\xcb\xf3\x1b\x74\xcd\x01\xce\xf2\x03\x2c\xf1\x00\xf3\x7b\x63\x47\x38\xf1\x08\x53\x4b\xfd\x2f\x40\x93\xff\x96\x71\x37\x0d\xf1\x5f\x8d\x30\xff\x03\x0b\x5e\x90\xe8\x85\xc9\xfc\x6d\xd4\x99\x76\x1b\x9d\x5f\x74\xac\x54\xde\x67\x9b\x80\xb9\xc5\xce\xc1\x53\x7f\xba\x90\x38\xbc\xef\xbe\x83\x50\xfd\xda\xc3\x8c\x63\x6c\x0b\xe3\x40\x75\x47\x87\xc4\x7a\xcf\x67\x79\xc2\x00\x85\xce\x8e\x13\x4c\x90\x46\xed\x43\x62\xbd\xf6\x43\x59\x96\xfd\xe0\xc4\x04\x97\xab\xc6\xf0\x6a\xb9\xd9\xcd\x05\xac\x5b\xe2\x75\x21\xeb\xbb\x87\x90\xf5\xd9\x19\x64\xfd\x54\xcd\xfb\xcb\x14\x3c\x60\x15\xe4\x5a\xc4\x93\x79\x33\xd5\x28\x75\x52\xa0\xd4\x97\x53\x4c\x65\x79\x4a\x61\x4a\xdc\x2e\x58\x5b\x79\x2e\xea\xbc\xea\x45\x51\xf5\x5a\x57\x3d\x5e\x58\xda\x82\x74\xdc\x92\xfd\x29\x70\x48\x74\xf0\x22\xd9\x45\x29\x51\x8d\xf1\xca\xd3\xde\x79\x73\x0a\x51\xd7\x91\x02\xfc\xae\xc3\x05\x4c\x1b\x91\xcb\x97\x98\x84\x02\xd5\x1c\x26\x5e\x43\xb7\x7b\x4f\x8b\xe5\x71\x6b\x45\xf7\x19\x8a\x91\x2f\x05\xef\xdc\x26\x59\x27\xcd\xcc\xc3\x8c\xc5\xb2\x23\x93\x8e\x4e\x25\xbe\xc0\x92\x8f\x2c\x0a\x6c\xc3\x5e\x6d\x15\x7a\xc6\xc8\x01\x27\x33\xd2\xd5\x73\xad\x1a\xf3\x6d\x25\x71\x10\x8a\xb1\xa6\x35\x6e\x62\x1f\x70\xf2\x32\xa1\xe0\x6d\xda\xd6\x6b\xfd\xb5\x7c\xdf\x31\xd9\xca\x0a\xe6\xb8\x49\x2f\x61\x2c\x0b\x53\x1e\x05\x46\xdb\x54\xda\xf8\x9b\x63\xb5\x86\x96\xdf\x7b\xbe\x09\x3b\x87\x36\x03\xad\x5f\x41\x5f\x82\x35\x62\x3d\x34\x41\xb3\xb4\xcb\xb3\x16\x8c\x27\x1a\xa5\x5f\x3e\x56\x52\xf8\x8a\x71\xfe\xef\xf4\x67\x5f\x0e\xd5\x64\xb2\x1d\x2c\xd3\x93\xcb\x2e\x32\x21\xa9\x05\xa2\xcf\xce\xea\xa2\xa7\x2e\x47\xae\x08\x6b\xb8\x87\x4b\x62\xa8\xe9\xc2\xe4\xa5\xd7\xbd\x60\xa4\x27\x54\x91\xb8\x4a\xae\xfd\x4b\x4e\xe0\x8b\xe1\xd6\x65\x05\xba\x3e\x19\x8d\x27\xa5\x70\xe4\x11\x41\x1b\xa3\x92\x2f\xcc\x73\xb5\x1e\x60\x95\x98\x67\x10\xf9\x62\xfc\x89\x03\x4e\x3e\x7b\x24\x5e\x00\x79\x3d\x71\x6b\x81\x88\xfc\x2b\xfb\xdf\xe6\x11\xd7\x0a\x6f\xb5\xaa\x62\x49\x0d\x5b\xea\x75\xde\x7b\x6a\x83\xae\x28\x85\x77\x1e\x89\x47\x78\x37\x3d\xee\x82\x80\x07\x43\x4a\xed\x6b\x59\xa8\x45\xe5\x9c\xc2\x6d\xb7\x21\xe2\x9d\x59\x43\x59\x53\xfd\xf0\x4a\x50\x3e\x0c\x18\x61\x82\x70\xb6\xd9\xce\xb1\x9b\xb9\x36\x26\x5d\x69\x25\xea\x9e\xcc\x8d\x75\x28\x5e\x78\x52\x8a\xae\xf3\xd3\xee\xb2\x3b\x72\x39\xc5\x86\x8e\x0e\xd5\x96\x2b\x20\x9b\x75\x73\xff\x7f\xf5\x82\xa2\xc5\xee\x1c\x3d\x96\x8d\xcf\x6e\x0e\x55\xb9\x69\xd0\xb2\x96\x45\xb6\x6a\x59\x6e\xba\xd5\x7c\x1b\xd7\x2b\xa4\x0c\x34\x4b\x93\x79\x24\xcf\x33\x0f\x0e\x78\x5d\x97\x21\x8a\xcb\x81\x4a\xa4\x43\xe9\x93\x1d\x46\xb6\x3d\xd4\x86\x2d\x98\xf0\xb8\xcc\xbf\xe0\x1d\xfc\xb7\x37\x09\xa3\x28\x99\x99\x1f\x66\xa4\x06\x0d\x20\x9e\x94\xc9\x64\xc1\xe7\x4a\x5f\x2c\xc7\x86\x05\x9f\xb7\x7c\xee\x1b\xde\x2d\xcc\x29\xbc\xec\x2e\xa9\x90\xc2\x80\xe8\x44\x0e\x15\x65\x79\x8b\x45\x5f\x81\xb8\xcc\x3d\xea\xb5\x02\xbe\x81\x5a\xbc\x65\xe5\xf5\xbd\xba\x68\x8c\x3a\x81\xbd\xc6\x7d\xef\x12\x14\x0c\x15\xf7\x02\xc7\xff\x9b\xe9\xe0\x27\xbc\xfa\xc7\x0b\xdd\xbe\xeb\xcd\x5b\x29\xe2\xeb\xae\x31\xac\xba\xd2\xe4\xc7\xb5\x28\x6c\xdd\x4b\x85\x54\xe1\x43\x45\x32\x6b\x72\xe6\x56\xd2\xe1\x3a\xca\x88\x58\x77\x21\xaa\xc2\x8c\x5c\x75\x9b\x78\xd3\x85\x9b\xba\xde\x70\x70\xb7\xef\x96\x28\x64\xbd\xbd\x66\x7e\xb2\x3d\xac\xc2\xa7\x22\x82\xbd\x09\xab\xf0\x41\xdf\xc3\xd3\x3b\x98\xc2\xd2\xb1\x0f\x6a\xb1\x14\x0a\x1b\xd5\x23\x64\xaa\xff\xd3\xe4\xe2\x13\x27\xaf\xbb\xff\x2e\x5a\xb1\xd4\xf9\x22\xa1\x90\xe5\x26\x6e\x75\xc1\xda\xdb\xb6\x00\xf9\x67\xc4\x8c\xfa\x9d\xa6\xea\x1d\x26\xb1\x68\x0f\x4b\x0f\x69\x51\xac\x98\x82\x85\x42\x76\x56\x16\x6a\x15\xa4\xbf\xd8\xf8\x53\x59\xae\x75\x95\x7e\xc7\xbd\x35\xdf\x65\xa7\x65\x59\xce\x6a\xeb\x82\xe3\xea\x80\x24\xc7\xd7\x0a\x2f\x99\x9d\x29\x4a\x73\x05\xb3\x05\x8b\xc6\x2e\x7b\x09\xec\x78\xe6\x76\x84\x82\xd4\xe9\x61\x40\x28\x22\xc8\x35\x11\x3c\xee\x82\x84\x07\x83\x82\x08\xce\x29\x1c\x1a\xf6\x6e\xdd\xa2\x70\xae\x9f\xbd\xc4\x57\x20\x74\xa2\x7f\x4d\xd4\x3e\xc1\x67\x53\xcd\xd8\xb1\xc0\x91\xfe\x1d\xb3\xa9\x45\x61\xa7\xeb\xfc\xbc\xb1\xcb\x1c\x00\xe0\x8e\x6d\xa3\xf3\xb6\xe6\xf0\x5e\x97\xea\xb7\xd6\x1c\xde\xe5\xb5\x8b\x20\xbb\x58\x72\x9c\xff\x9a\xc3\x59\x51\xa3\x48\x20\x84\x35\xf2\x5f\x73\xd8\x2e\x6a\x98\xd4\x26\x58\xae\x9f\xe7\xf0\xb6\xdb\x1e\xe5\xab\x11\xf4\x97\x72\xf7\xef\x74\x6b\x20\xff\xbe\x9b\x13\x49\xc3\x0e\x18\x13\xc5\x45\xcb\xc4\xb3\x6e\x2d\xf5\xe5\x76\xb7\x96\x99\xe1\x7d\xb7\x0e\xef\xef\xba\xf3\xb9\xbe\x86\x1a\x59\x9d\xd2\x31\x75\x4e\xe1\xf4\x4e\x6e\x7b\x21\x00\xc3\xda\x8c\xbc\x35\x08\x78\xd9\x8f\xb8\x12\xf8\x60\x9b\x11\xd1\xbf\xc1\x00\x33\xc6\x31\xa6\xe6\x26\xa3\x01\xe3\x07\x12\xfa\x9d\x2e\x9c\x75\x61\xbb\x0b\xef\xba\xa0\x56\x3c\x0f\x6b\x37\x11\xc9\x98\xcb\x4b\x9e\xa5\xfd\x30\x79\xe4\x27\x5e\xaa\x37\x3f\x8c\x2f\xf4\xc3\x98\xc5\xec\x82\x8b\x47\x7a\x6b\x76\x79\x34\xb1\xe6\xdf\x28\xec\xae\xc6\xe1\x0b\x46\xf6\x46\x96\x60\xbe\x8f\xfe\x6b\xd6\x26\x7a\x8e\xe5\x6e\x2a\x26\xf6\x8e\x58\xbe\xa5\x85\x03\x8f\xc4\x7d\xef\x53\x1d\x6f\x1b\xc3\xa7\xce\xa4\x22\x56\xec\x7a\x55\x9b\xdf\x83\xae\x93\x08\x62\x09\x8c\x34\xf2\xa9\xdb\x9e\xa4\xfc\x95\x07\x16\x2e\xa7\xea\x03\xe3\x35\xc1\x41\x97\x58\xa9\xbc\x8d\x78\x7a\xc9\xb9\x2c\xac\xab\xa2\x84\xf9\x68\x59\x25\x08\xc3\xc0\xc8\x85\x41\x1c\x17\x22\x11\xa6\xe8\x0b\x27\xd6\x0e\x0b\x23\xee\x2b\x3a\xac\xda\x74\xb6\x8e\x8f\x3b\x81\x48\xc6\x3a\xd5\x0f\x35\xae\x8c\x3a\x56\xe7\x59\x4c\x7e\x7a\xaf\xec\x2d\xf0\x0e\xed\x43\x06\xde\x07\xbb\x91\xb1\xec\x8e\x14\x19\xd1\xc4\x96\xcd\x6d\xf5\x63\x13\x98\x7d\x4b\x3e\x44\x60\xfd\x97\x05\x24\xd6\x19\xc6\xd8\x63\xd0\xef\x46\x96\xe2\x27\x8e\xd0\xb7\xef\x63\xa4\xd8\x8a\x14\x8e\x14\x69\x98\x8d\xca\x80\x86\x76\x19\xe5\x30\xee\xbb\xae\xe2\x2f\xfb\x0c\x43\xe1\xcf\xc1\x5b\xb7\xa7\x92\xbc\x12\x14\xbc\xa7\xf6\x79\x04\xde\x73\xbb\x29\x80\xea\x33\xfb\x8c\xfc\xf9\x51\x2f\x3d\x72\x00\x2f\x2b\x20\x63\xcd\xe7\x73\xfa\x82\x27\xce\xcf\x03\x16\xc6\xf6\xcf\x30\x0e\xa5\xfd\x4a\x90\x77\x21\x25\x03\xf5\x91\xb8\xff\x3a\x1a\x8f\x8a\x94\xff\xc6\x7c\x2a\x48\x04\x41\x1e\xbd\x13\xc6\x1d\x49\xf1\x8f\x18\x61\xe8\x26\xcb\x71\xf8\xe8\x92\x3c\xa1\x76\x4c\xc4\x9f\xfc\x1b\xc8\x3f\xf9\x37\x6a\xab\x47\x47\x3d\xce\x09\x76\x09\x3c\xa1\x36\x3e\x39\x3c\x99\x13\xc5\x06\xd1\x17\xff\x7f\x00\x00\x00\xff\xff\xe8\x33\x75\xc0\x2b\xae\x01\x00"), }, "/templates": &vfsgen۰DirInfo{ name: "templates", @@ -163,16 +163,16 @@ var Assets = func() http.FileSystem { "/templates/default.tmpl": &vfsgen۰CompressedFileInfo{ name: "default.tmpl", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 4554, + uncompressedSize: 5233, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\xc1\x6e\xe3\x38\x0c\xbd\xe7\x2b\x08\xef\xa5\x39\xc4\xdd\x73\x81\x62\x51\x2c\x76\xe7\x52\x0c\x06\x29\x32\x97\xc1\xc0\x50\x6d\xc6\x55\x2b\x4b\xae\x44\xa7\x0d\x1c\xff\xfb\x40\xb6\x9b\x58\x91\x9d\xda\x41\xe7\x34\xbd\xd5\x2c\xf9\x48\x3d\x3e\x91\x4a\x59\x42\x82\x6b\x2e\x11\x82\x28\x62\x02\x35\x65\x4c\xb2\x14\x75\x00\x55\x75\xd3\xf9\x2e\x4b\x40\x99\x40\x55\xcd\x06\x43\x56\xcb\x5b\x1b\x55\x96\x10\xfe\xf7\x4a\xa8\x25\x13\xab\xe5\x2d\x54\xd5\xe5\x5f\x97\xb5\x9f\xf9\x47\x63\x8c\x7c\x83\xfa\xda\x3a\x2d\xdb\x0f\xd8\x41\xa1\xc5\x73\x81\x7a\xdb\x84\xb7\x89\xdc\x4c\xa6\xb8\x7f\xc4\x98\x6c\x86\x1f\x36\xfa\x8e\x18\x15\x06\x76\x40\x6a\x95\xe7\xa8\x9b\x50\xbe\x06\x7c\xde\xff\x33\x58\x73\xcd\x65\x6a\x63\xae\x6c\x4c\x7d\x20\x13\xfe\x5f\x5b\x61\x07\x02\x65\x37\xe3\x4f\xb0\x4e\x5f\xb4\x2a\xf2\x5b\x76\x8f\xc2\x84\x77\x4a\x13\x26\xdf\x18\xd7\x26\xfc\xce\x44\x81\x36\xe1\xa3\xe2\x12\x02\xb0\xa8\xd0\xa4\x4c\x09\x2e\x2c\x56\xf8\xaf\xca\x32\x25\x9b\xe0\x79\x6b\xeb\xe0\xcd\xa1\xaa\x2e\xca\x12\x5e\x38\x3d\xb8\xce\xe1\x12\x33\xb5\x41\x37\xfb\x57\x96\xa1\x69\x19\xed\xcb\xbe\x2f\x7c\xbe\xff\x6b\xa0\x4d\x09\x9a\x58\xf3\x9c\xb8\x92\xc1\x09\x8e\x09\x5f\xa9\x69\x69\x24\xb8\xa1\xd6\x55\x33\x99\x22\x84\x50\x55\x4d\x5d\x57\xb3\x83\xd1\xe7\xc9\xb2\xb2\xa8\x89\xb4\xe5\xdb\xaf\x6b\xd8\x1f\xa0\x2d\xac\x49\x7e\x23\xa5\x22\x66\x6b\x72\x20\x3b\xe6\xf3\x70\xef\x54\xa1\x63\xbc\x6a\x9a\x89\x12\x35\x23\xa5\x1b\x25\xce\x7a\x88\x72\x38\x30\x82\xc5\x4f\x61\x82\x6b\x56\x08\x0a\x89\x93\xc0\x96\x05\xc2\x2c\x17\x8c\x5c\x2d\x86\x43\x94\xbb\x38\x85\xb1\xb7\x21\xeb\x83\x72\xef\xdc\x48\xbc\x35\x13\xe2\x9e\xc5\x4f\x1e\x5e\x6f\xf9\x16\x14\x76\xf0\x9e\xa3\xe0\xf2\x69\x74\x05\x71\x5b\x01\x4f\x82\x71\x01\xb9\x46\xab\xae\x91\xde\x9d\x82\x4e\x32\x56\x8f\x9c\x91\x25\xf3\x58\x49\xcc\xd4\x23\x0f\xc6\xfb\x17\x5a\x8c\xad\x78\xfc\xe1\xd6\x4a\x51\x33\x60\x07\x44\x98\xdb\xa3\x25\x05\x6d\xf7\x21\xfe\xfd\x9d\x26\x47\x1f\x31\x16\x1c\x25\x9d\x2f\xc8\x21\xc4\xc3\x12\x38\xaf\x67\x3e\x2e\x97\x86\x98\x8c\xd1\xf4\xe0\x7a\x03\x2b\x1c\x66\x55\xe5\x26\x45\xc9\x71\x0f\x9c\xa1\x31\x2c\x3d\xef\x7e\x7b\x60\x7e\x87\xda\xf9\x3e\x30\xce\x7a\x07\xfa\xec\x68\x9d\x38\xfb\x6a\x0e\x7f\xc3\xa2\xaa\x66\x8d\x11\x1a\x63\x3d\x38\x4f\x33\xe2\x2e\xbd\x3a\xc9\xa2\x73\xa2\x9e\x7c\x4b\x34\x4a\x6c\x30\x39\xca\xf8\x66\x1e\x9f\xf3\x2d\xc2\xcb\xba\x18\x43\xa9\xa9\xe7\xf8\x74\x35\x39\x5d\x7f\xc1\xf8\x81\xd1\xd4\x9e\xcf\x3e\xfb\x77\xa2\x7f\xdd\x77\xe1\x4a\x0b\x0f\xaf\xb7\x3f\x03\x5d\x3f\xea\x0f\xa9\xc8\x2e\xcb\xc1\x49\xea\xbb\xe7\x4c\xd3\x76\x82\x3f\xb1\x74\xac\x37\x4b\x51\x52\x74\xbc\xe2\x5c\x7d\x6d\x78\x4c\x4a\xab\xdc\x1c\x64\x4b\x8c\x30\x72\x85\xf6\xa9\xa5\x69\xb3\xc0\x67\x15\x25\x71\xda\x46\x09\x37\xb9\x60\xdb\x68\xe0\x35\xf5\xfe\xe0\xf6\x91\x33\x25\x39\x29\x4b\x48\x44\x4a\x89\x89\x2b\xd1\xd9\x5d\x85\x79\x50\x1b\xd4\x1f\xf0\x7e\xf4\xa0\x7e\xbf\x9e\x3e\x46\x4e\xe3\xd5\xf4\x71\x62\xea\xe4\x1c\xc1\xe4\xe1\x4d\x37\x65\xa7\x74\x5f\x73\xb2\x73\xd9\x0f\xbf\x4a\xa7\xff\x46\xe8\xe0\x7c\xb6\x77\x4a\x7b\xbb\x2c\x12\x0a\x4c\x35\xcb\xfa\xa8\xfc\x93\x48\xf9\x15\x00\x00\xff\xff\xf5\xfe\x1f\x22\xca\x11\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\x41\x6f\xdb\x3c\x0c\xbd\xe7\x57\x10\xfe\x2e\xcd\x21\xee\x77\x2e\x50\x0c\xc5\xb0\xed\x52\x0c\x43\x8a\xec\x32\x0c\x86\x62\x33\xae\x5a\x59\x72\x25\x3a\x6d\xe0\xf8\xbf\x0f\xb2\xdd\xc4\x8e\xec\xd4\x0e\xb2\xd3\x72\xab\x59\xf2\x91\x7a\x7c\x22\x95\x3c\x87\x08\x57\x5c\x22\x78\x41\xc0\x04\x6a\x4a\x98\x64\x31\x6a\x0f\x8a\xe2\xae\xf1\x9d\xe7\x80\x32\x82\xa2\x98\xf4\x86\x2c\xe6\xf7\x36\x2a\xcf\xc1\xff\xf2\x46\xa8\x25\x13\x8b\xf9\x3d\x14\xc5\xf5\x7f\xd7\xa5\x9f\xf9\xa4\x31\x44\xbe\x46\x7d\x6b\x9d\xe6\xf5\x07\x6c\x21\xd3\xe2\x25\x43\xbd\xa9\xc2\xeb\x44\xed\x4c\x26\x5b\x3e\x61\x48\x36\xc3\x2f\x1b\xfd\x40\x8c\x32\x03\x5b\x20\xb5\x48\x53\xd4\x55\x28\x5f\x01\xbe\xec\xfe\xe9\xad\xb8\xe6\x32\xb6\x31\x37\x36\xa6\x3c\x90\xf1\xbf\x96\x56\xd8\x82\x40\xd9\xcc\xf8\x1b\xac\xd3\x37\xad\xb2\xf4\x9e\x2d\x51\x18\xff\x41\x69\xc2\xe8\x07\xe3\xda\xf8\x3f\x99\xc8\xd0\x26\x7c\x52\x5c\x82\x07\x16\x15\xaa\x94\x31\xc1\x95\xc5\xf2\x3f\xab\x24\x51\xb2\x0a\x9e\xd6\xb6\x06\xde\x14\x8a\xe2\x2a\xcf\xe1\x95\xd3\x63\xdb\xd9\x9f\x63\xa2\xd6\xd8\xce\xfe\x9d\x25\x68\x6a\x46\xbb\xb2\xef\x0a\x9f\xee\xfe\xea\x69\x53\x84\x26\xd4\x3c\x25\xae\xa4\x77\x84\x63\xc2\x37\xaa\x5a\x1a\x08\x6e\xa8\x76\xd5\x4c\xc6\x08\x3e\x14\x45\x55\xd7\xcd\x64\x6f\x74\x79\xb2\xac\xcc\x4a\x22\x6d\xf9\xf6\xeb\x16\x76\x07\xa8\x0b\xab\x92\xdf\x49\xa9\x88\xd9\x9a\x5a\x90\x0d\xf3\x69\xb8\x0f\x2a\xd3\x21\xde\x54\xcd\x44\x89\x9a\x91\xd2\x95\x12\x27\x1d\x44\xb5\x38\x30\x82\x85\xcf\x7e\x84\x2b\x96\x09\xf2\x89\x93\xc0\x9a\x05\xc2\x24\x15\x8c\xda\x5a\xf4\xfb\x28\x6f\xe3\x64\xc6\xde\x86\xa4\x0b\xaa\x7d\xe7\x06\xe2\xad\x98\x10\x4b\x16\x3e\x3b\x78\x9d\xe5\x5b\x50\xd8\xc2\x47\x8e\x82\xcb\xe7\xc1\x15\x84\x75\x05\x3c\xf2\x86\x05\xa4\x1a\xad\xba\x06\x7a\x37\x0a\x3a\xca\x58\x39\x72\x06\x96\xcc\x43\x25\x31\x51\x4f\xdc\x1b\xee\x9f\x69\x31\xb4\xe2\xe1\x87\x5b\x29\x45\xd5\x80\xed\x11\x61\x6a\x8f\x16\x65\xb4\xd9\x85\xb8\xf7\x77\x9c\x1c\x5d\xc4\x50\x70\x94\x74\xba\x20\xfb\x10\xf7\x4b\xe0\xb4\x9e\xb9\xb8\x5c\x1a\x62\x32\x44\xd3\x81\xeb\x0c\x2c\xbf\x9f\x55\x95\x9a\x18\x25\xc7\x1d\x70\x82\xc6\xb0\xf8\xb4\xfb\xed\x80\xb9\x1d\xaa\xe7\x7b\xcf\x38\xeb\x1c\xe8\x93\x83\x75\xd2\xda\x57\x53\xf8\x1f\x66\x45\x31\xa9\x8c\x50\x19\xcb\xc1\x79\x9c\x91\xf6\xd2\x2b\x93\xcc\x1a\x27\xea\xc8\x37\x47\xa3\xc4\x1a\xa3\x83\x8c\xef\xe6\xe1\x39\xdf\x23\x9c\xac\xb3\x21\x94\x9a\x72\x8e\x8f\x57\x53\xab\xeb\xaf\x18\x3e\x32\x1a\xdb\xf3\xc9\xa5\x7f\x47\xfa\xd7\x7c\x17\x2e\xb4\x70\xf0\x3a\xfb\xd3\xd3\xf5\x83\xfe\x90\x0a\xec\xb2\xec\x9d\xa4\xae\x7b\xca\x34\x6d\x46\xf8\x13\x8b\x87\x7a\xb3\x18\x25\x05\x87\x2b\xae\xad\xaf\x35\x0f\x49\x69\x95\x9a\xbd\x6c\x89\x11\x06\x6d\xa1\x5d\xb4\x34\x6e\x16\xb8\xac\xa2\x24\x4e\x9b\x20\xe2\x26\x15\x6c\x13\xf4\xbc\xa6\x3e\x1e\xdc\x2e\x72\xa2\x24\x27\x65\x09\x09\x48\x29\x31\x72\x25\xb6\x76\x57\x66\x1e\xd5\x1a\xf5\x19\xde\x8f\x0e\xd4\xdf\xd7\xd3\x79\xe4\x34\x5c\x4d\xe7\x13\x53\x23\xe7\x00\x26\xf7\x6f\xba\x31\x3b\xa5\xf9\x9a\x93\x8d\xcb\xbe\xff\x55\x3a\xfe\x37\x42\x03\xe7\xd2\xde\x31\xed\x6d\xb2\x48\x28\x30\xd6\x2c\xe9\xa2\xf2\x9f\x25\x25\xe2\x26\x54\x3a\x3a\xc3\x20\x3a\x44\xba\xb0\x6b\x9f\x09\x4b\x7c\xbb\x5c\xdd\x93\x78\xfc\x13\x00\x00\xff\xff\x40\x8b\x18\x6d\x71\x14\x00\x00"), }, "/templates/email.tmpl": &vfsgen۰CompressedFileInfo{ name: "email.tmpl", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 13888, + uncompressedSize: 14084, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x1a\x7f\x6f\xda\x48\xf6\x7f\x7f\x8a\xb7\x5e\x9d\xb6\x95\x30\x90\xa4\x8d\x1a\x7e\xe9\x28\x98\xc4\x3a\x62\x22\x70\xda\xad\x4e\xa7\xd5\x60\x3f\xf0\xec\xda\x33\xbe\x99\x21\xc0\xe6\xf2\xdd\x4f\x33\x06\x62\x08\x69\xbb\xab\x53\x61\xaf\x69\xd4\xc4\x33\x7e\xef\xcd\xfb\xfd\xde\x78\xc6\xba\xbf\x87\x08\x27\x94\x21\xd8\x98\x12\x9a\x94\x23\x9c\x90\x59\xa2\xca\x72\x36\xfe\x15\x43\x65\xc3\xc3\xc3\xfd\x3d\x28\x4c\xb3\x84\x28\x04\xfb\x97\x5f\x36\x6f\xca\xf9\x3b\x64\x11\x3c\x3c\x3c\x4f\x29\x56\x69\xa2\xc9\x58\x8d\x1f\x1c\xc7\x1a\xa9\x65\x82\x40\x58\x04\x57\xc1\x75\x1f\x22\x14\xf4\x0e\x23\x98\x08\x9e\x42\xac\x54\x26\x6b\x95\xca\x94\xaa\x78\x36\x2e\x87\x3c\xad\x68\x4a\xd3\x19\xab\x28\x41\x98\x24\xa1\xa2\x9c\x91\xc4\x31\x0b\x38\x6b\xa6\xa4\x65\x59\x41\x8c\x70\xed\x05\xd0\xa7\x21\x32\x89\xf0\xea\xda\x0b\x5e\x5b\x56\x87\x67\x4b\x41\xa7\xb1\x82\x57\xe1\x6b\x38\xad\x9e\xbc\x81\xeb\x9c\xa2\x65\xdd\xa0\x48\xa9\x94\x94\x33\xa0\x12\x62\x14\x38\x5e\xc2\x54\x10\xa6\x30\x2a\xc1\x44\x20\x02\x9f\x40\x18\x13\x31\xc5\x12\x28\x0e\x84\x2d\x21\x43\x21\x39\x03\x3e\x56\x84\x32\xca\xa6\x40\x20\xe4\xd9\xd2\xe2\x13\x50\x31\x95\x20\xf9\x44\xcd\x89\xc8\x25\x24\x52\xf2\x90\x12\x85\x11\x44\x3c\x9c\xa5\xc8\x14\xd1\x12\xc0\x84\x26\x28\xe1\x95\x8a\x11\xec\xd1\x0a\xc3\x7e\x6d\x16\x89\x90\x24\x16\x65\xa0\xdf\xad\x5f\xc1\x9c\xaa\x98\xcf\x14\x08\x94\x4a\x50\xa3\x85\x12\x50\x16\x26\xb3\x48\xf3\xb0\x7e\x9d\xd0\x94\xae\x56\xd0\xe8\x46\x70\x69\x29\x0e\x33\x89\x25\xc3\x67\x09\x52\x1e\xd1\x89\xfe\x8b\x46\xac\x6c\x36\x4e\xa8\x8c\x4b\x10\x51\x4d\x7a\x3c\x53\x58\x02\xa9\x27\x8d\x1e\x4b\x5a\x8e\x0a\x17\x20\x31\x49\xac\x90\x67\x14\x25\x18\x59\x1f\xb9\x33\x30\x9a\xf5\x4c\x2b\x54\xad\x54\x24\xf5\xcc\x3c\xe6\xe9\xb6\x24\x54\x5a\x93\x99\x60\x54\xc6\x68\x70\x22\x0e\x92\x9b\x15\xb5\x4f\xe9\x19\x0d\x3e\xe1\x49\xc2\xe7\x5a\xb4\x90\xb3\x88\x6a\x89\x64\x2d\x37\x32\x19\xf3\x3b\x34\xb2\xe4\x76\x65\x5c\xd1\x30\x57\xb7\x31\x40\xf6\x68\xd5\xd5\x2b\x19\x93\x24\x81\x31\xae\x14\x86\x11\x50\x06\xa4\x20\x8e\xd0\xcb\x4b\x45\x98\xa2\x24\x81\x8c\x0b\xb3\xde\xae\x98\x65\xcb\x0a\xae\x5c\x18\x0d\x7a\xc1\xc7\xf6\xd0\x05\x6f\x04\x37\xc3\xc1\x07\xaf\xeb\x76\xc1\x6e\x8f\xc0\x1b\xd9\x25\xf8\xe8\x05\x57\x83\xdb\x00\x3e\xb6\x87\xc3\xb6\x1f\x7c\x82\x41\x0f\xda\xfe\x27\xf8\x87\xe7\x77\x4b\xe0\xfe\x7c\x33\x74\x47\x23\x18\x0c\x2d\xef\xfa\xa6\xef\xb9\xdd\x12\x78\x7e\xa7\x7f\xdb\xf5\xfc\x4b\x78\x7f\x1b\x80\x3f\x08\xa0\xef\x5d\x7b\x81\xdb\x85\x60\x00\x7a\xc1\x15\x29\xcf\x1d\x69\x62\xd7\xee\xb0\x73\xd5\xf6\x83\xf6\x7b\xaf\xef\x05\x9f\x4a\x56\xcf\x0b\x7c\x4d\xb3\x37\x18\x42\x1b\x6e\xda\xc3\xc0\xeb\xdc\xf6\xdb\x43\xb8\xb9\x1d\xde\x0c\x46\x2e\xb4\xfd\x2e\xf8\x03\xdf\xf3\x7b\x43\xcf\xbf\x74\xaf\x5d\x3f\x28\x83\xe7\x83\x3f\x00\xf7\x83\xeb\x07\x30\xba\x6a\xf7\xfb\x7a\x29\xab\x7d\x1b\x5c\x0d\x86\x9a\x3f\xe8\x0c\x6e\x3e\x0d\xbd\xcb\xab\x00\xae\x06\xfd\xae\x3b\x1c\xc1\x7b\x17\xfa\x5e\xfb\x7d\xdf\xcd\x97\xf2\x3f\x41\xa7\xdf\xf6\xae\x4b\xd0\x6d\x5f\xb7\x2f\x5d\x83\x35\x08\xae\xdc\xa1\xa5\xc1\x72\xee\xe0\xe3\x95\xab\xa7\xf4\x7a\x6d\x1f\xda\x9d\xc0\x1b\xf8\x5a\x8c\xce\xc0\x0f\x86\xed\x4e\x50\x82\x60\x30\x0c\x36\xa8\x1f\xbd\x91\x5b\x82\xf6\xd0\x1b\x69\x85\xf4\x86\x83\xeb\x92\xa5\xd5\x39\xe8\x69\x10\xcf\xd7\x78\xbe\x9b\x53\xd1\xaa\x86\x2d\x8b\x0c\x86\x66\x7c\x3b\x72\x37\x04\xa1\xeb\xb6\xfb\x9e\x7f\x39\xd2\xc8\x5a\xc4\x35\x70\xd9\x72\x9c\x96\xd5\xf8\xa1\x3b\xe8\x04\x9f\x6e\x5c\xd0\x49\x0a\x6e\x6e\xdf\xf7\xbd\x0e\xd8\x4e\xa5\xf2\xf1\xac\x53\xa9\x74\x83\x2e\xfc\x6c\xd2\xd4\x49\xb9\x0a\x81\x4e\x41\x34\xcf\x40\x95\x8a\xeb\xdb\x60\xeb\x84\x55\xab\x54\xe6\xf3\x79\x79\x7e\x56\xe6\x62\x5a\x09\x86\x95\x85\xa6\x75\xa2\x91\x57\x8f\x8e\x2a\x60\x96\x23\x15\xd9\x2d\xab\x61\x16\x5c\xa4\x09\x93\xcd\x3d\x64\x4e\x2e\x2e\x2e\x72\x6c\x1b\xa4\xce\x98\x4d\x3b\x25\x62\x4a\x59\x0d\xaa\x75\x98\x70\xa6\x9c\x09\x49\x69\xb2\xac\xc1\x4f\x57\x98\xdc\xa1\xa2\x21\x01\x1f\x67\xf8\x53\x09\x36\x13\x25\x68\x0b\x4a\x92\x12\x48\xc2\xa4\x23\x51\xd0\x49\x1d\xc6\x7c\xe1\x48\xfa\x3b\x65\xd3\x1a\x8c\xb9\x88\x50\x38\x63\xbe\x58\x11\x95\xf4\x77\xac\xc1\xc9\x9b\x6c\x51\x37\x4c\x22\x89\x5a\x56\x23\x45\x45\x80\x91\x14\x9b\xf6\x1d\xc5\xb9\x8e\x10\x5b\x47\xa6\x42\xa6\x9a\xf6\x9c\x46\x2a\x6e\x46\x78\x47\x43\x74\xcc\xc0\x5e\xe3\x68\xc1\x1c\xfc\xf7\x8c\xde\x35\xed\x4e\x0e\xef\x04\xcb\x0c\x0b\xd8\x0a\x17\xaa\xa2\x05\xad\x9b\x54\x2b\x51\x35\x6f\x83\x9e\xf3\x4e\xd3\x50\x54\x25\xd8\xfa\x5c\xf9\x69\x54\x72\x18\xab\x61\xb4\xd4\xb2\xfe\x9e\x62\x44\x09\x70\x96\x2c\x41\x86\x02\x91\x99\xd4\xf0\x2a\x25\x8b\x9c\xb7\x1a\x9c\xbf\xa9\x66\x8b\xd7\x70\x6f\x01\x8c\x79\xb4\x34\x0f\x00\x19\x89\x22\xa3\x93\x2a\xfc\x40\x53\x2d\x22\x61\xaa\x6e\x01\x3c\x58\x16\x40\x7c\x52\xb2\xe2\xd3\x92\x15\x9f\x95\xac\xf8\xcd\x0a\xc5\x68\x6c\x8e\x3a\x0d\xd5\xe0\x5d\x75\x17\x11\x60\x6d\xb2\xd3\x6a\xb6\x80\x2a\xbc\xcd\x16\xfb\x69\x17\xe9\xe5\x16\x38\x3d\x7d\x06\xf6\xf4\x29\xec\xc9\xbb\x67\x60\xcf\xf6\xc0\x9e\xef\x87\x2d\x6b\x7b\x10\xca\x50\x7c\x49\x1d\x00\x2b\x35\x9e\x54\xab\x7f\x7b\x96\x14\x32\xf5\x55\x7a\x5d\x03\x3b\x73\x41\xb2\x5d\x8c\x93\xea\x33\xcc\x52\x76\xc7\x75\x66\xbf\xff\x22\x3f\x0f\x56\xa3\xb2\xf2\x8c\x46\x25\xf7\x66\xab\x61\xac\x4e\x15\xa6\x32\xe4\x19\x9a\x27\xb5\xcc\x70\x13\x87\x32\x8c\x31\x25\x26\x0e\x5d\xdd\x68\x5c\xa3\x94\x64\x8a\x07\x8b\x44\x70\xe6\x38\xfe\x8d\x2a\x27\x7f\x91\x72\xae\x62\x83\x94\x57\x2c\x4a\x24\x46\x8f\x40\x3a\xa0\x0c\xb6\x43\xa2\x5f\x67\x52\xd5\x80\x71\x86\x75\x88\x57\x8e\xaa\xf5\x54\x87\x84\x32\x74\x36\x53\xe5\x73\x4c\xeb\x30\x26\xe1\x6f\x53\xc1\x67\x2c\x72\x42\x9e\x70\x51\x83\x1f\x27\xe7\xfa\xa7\x5e\xd4\xb1\x4e\x0c\x56\x43\x91\x71\x82\x10\x26\x44\xca\xa6\xad\x15\x6a\x2c\x78\x38\x15\x7d\x2d\xef\xf9\xa8\x69\xeb\x91\x0d\xe3\xa9\x01\x6e\xda\x2b\x60\xbb\x65\x01\x34\x94\x38\x60\xce\xd5\x1e\xdd\x50\xd1\xc1\x14\x79\x87\x42\x13\x49\x1c\x92\xd0\x29\xab\x81\xe2\x59\xdd\x86\x3b\x33\x6a\xda\x8a\x67\x76\xab\x51\x51\xd1\x23\xa3\x2b\x1f\xd8\x24\x90\x8d\x8a\xcf\xab\xd5\x8d\x3f\x1c\x8e\x77\xdd\xd8\x66\x09\x59\xd6\x60\x9c\xf0\xf0\xb7\x3a\x14\x2b\x41\xb5\xaa\xd1\x36\x3a\x06\x32\x53\xbc\x0e\x61\x82\x44\xe8\xa5\x54\xbc\x2b\xba\x91\x1a\xa0\x11\xd1\xbb\xa2\xe0\xc8\xd4\x37\x16\xf5\xcb\x52\xec\xca\xbd\x49\xab\xba\x1a\xd5\x37\xa2\x68\x23\x16\x63\x39\x25\x94\xed\x44\x49\x88\x49\xb2\xc2\x6e\xda\xd5\x7c\x2c\x33\x12\xae\xc7\xc7\x14\xf3\x13\x43\x40\x23\xd5\xe0\x24\x5b\x80\xe4\x09\x8d\xe0\x47\xbc\xd0\x3f\xeb\x57\x8e\x20\x11\x9d\xc9\x1a\x9c\x69\x4d\x14\xb3\xc0\x64\x52\x50\xcc\x31\xa4\x82\xf5\xbf\xfb\x7b\xa0\x13\x98\x2a\x78\x95\x20\x83\x72\x3b\x41\xa1\x64\xb9\x47\x05\x65\xd3\xd7\x50\xd5\xdb\xeb\x22\x78\x21\x30\x89\x06\x05\xf3\xdb\x99\x13\xa1\x37\xad\xdf\xcc\x64\x7b\x03\x72\xa7\x1d\xa9\xc3\x96\xf1\xb6\x9a\xaa\xb7\xd5\xea\xae\xe3\x82\xa9\x6f\x2b\x7a\x21\x32\x85\x62\x9f\x59\xcd\xff\xaa\x16\x6c\x8f\x97\xb8\xe7\x6f\x4f\x4f\x3b\x3b\xb1\x0d\xab\xe7\x9c\x66\xd1\x2b\x72\xf0\xa7\x06\xc1\x44\xe2\x13\xc5\xeb\xdc\xf8\x8c\xf6\xa7\x9c\x47\xdf\xbb\xea\xcf\xdf\xbd\xbf\xa8\xf6\xbe\x5a\xf5\x39\xf8\x1e\xd5\xe7\xdf\x9c\x00\x76\xe6\x57\x81\x01\xff\x01\x1d\x26\x0f\x0f\xb9\xe6\xf7\x06\xcf\x6b\x38\x81\x87\x07\xb9\x21\x06\x13\x2e\x34\x09\x41\xd8\x14\xa1\x7c\x29\xf8\x2c\xeb\x93\x31\x26\xb2\x3c\xe2\x42\x61\x74\x43\xa8\x90\x4f\x57\xcd\xd7\xf5\x49\xaa\x7d\xa1\xa9\x9f\x3f\x90\x64\xa6\x07\xf0\x94\xbf\x3d\x7c\x3f\x56\xd3\xcd\x58\x1c\x67\x0e\xda\xa9\xf6\xeb\xb6\xdd\x3e\xaa\x5e\xe5\xd1\x67\xcf\x4c\x9d\xdb\x5b\xbf\x77\x4a\xdf\x5f\xa1\xda\x3d\xe1\xfd\x98\x3c\xe3\x59\xff\x30\xbd\xc7\xb1\x3a\x48\x15\xaa\xab\x66\xe8\xb3\x4e\xb2\x12\x8e\x40\x2c\x70\xd2\xb4\x77\x3e\x47\x98\x04\x93\x12\x46\xa6\x28\x6e\x87\xfd\xfc\xb3\x84\xbd\xd9\x19\x29\xe6\x64\x82\xa6\x44\x2c\x0f\xa7\x05\x93\xb6\x23\x0c\xb9\x30\xdf\x86\xd7\x1b\xc2\x75\x4a\xee\xf5\x7a\x7b\x53\xf5\xd9\x9b\x77\x18\x91\xc7\x76\x6a\xd5\x4a\x6d\x4f\x3b\x9b\xdd\x55\xb6\x58\x55\x89\xad\xad\xe5\xa9\xde\x58\x6e\x15\x96\x31\x4f\xa2\xfd\xa5\x24\x9c\x09\xa9\x57\xce\x38\xcd\x27\x36\x2d\x2c\x65\x86\xe8\xaa\x93\xdd\x29\x39\x6f\x37\x32\x9a\xef\x6c\x13\x2e\xd2\x1a\x84\x24\xa3\x8a\x24\xf4\x77\xac\xdb\xad\x0f\x14\xe7\x40\x19\x7c\xc6\x74\xeb\xcf\x49\x64\xaf\x63\xef\xa4\xe8\xfd\x89\xfa\x4f\x35\x6a\x2f\x71\xfc\xad\xe3\x58\x2a\xc1\xd9\xf4\x70\x0a\xff\xe7\x63\x9b\xb2\x72\x8b\x4d\xb7\xf2\x2f\xc8\x27\x1a\x95\x9c\xc9\xff\x81\x2f\xee\x6d\x94\xa0\xd0\xe7\x6c\x73\xf2\xe2\x9d\xdf\xb9\x77\xe6\x1d\xef\xc6\x01\x1b\xe3\xe3\x32\x7e\xd1\x73\xf7\x36\xe7\x85\x5e\x1c\x9a\x50\xec\xc6\x0f\x2a\xca\xf3\x91\xb8\xaf\x66\x30\xc6\xf3\x63\x5c\x99\x57\x8c\x83\x7b\x45\x81\xa3\xe3\x70\x8d\x2f\xea\x73\x9d\xdd\x1e\x19\xff\x7f\x70\x94\x62\x13\x5a\xbe\x44\x86\x82\x28\xae\xdb\x4e\xd3\x73\x1e\x2a\xff\xed\x36\x8c\x4f\xfa\xcd\x19\x8b\x50\xe8\x0e\xae\x6e\xb7\x46\x7c\x26\x42\xd4\x8d\xd6\xb1\xe5\x96\x3f\x57\x59\xbf\xb2\x01\x1c\xa2\xe4\xc9\x1d\x46\xcf\xb4\x80\x2f\x7d\xe3\xd1\x57\xe6\xa3\xab\x84\x8d\xf8\xe8\x38\xfa\x0b\x47\xf4\xe7\xba\xe5\x97\x40\xfb\x5e\x37\x68\xeb\xb4\x5d\xd8\xa2\xad\xa7\x0e\xb0\x49\xdb\x70\xf3\xe2\xa3\x2f\xdb\xb4\x97\x6d\xda\xcb\x36\xed\x65\x9b\xf6\xb2\x4d\x7b\xd9\xa6\xfd\xf1\xda\xda\xa8\x98\x83\xbf\xd6\x1f\x38\x88\xdd\xa0\x3c\xce\x14\x2e\x01\x4d\x38\x37\x27\xd7\x87\xb2\x6d\xf1\x8e\xdb\xd6\xfd\xa5\x8d\xd5\x2f\x2e\x2e\x3e\x73\x07\x68\xff\x51\xe8\xb1\x1c\x3b\x1f\x8f\x03\x6e\xdd\xec\xa0\x53\x96\x1f\x5d\xc1\x61\x3a\x9c\xaf\x6c\x68\xf6\x9f\xb5\x15\xdd\xa2\x28\xf6\xe9\x93\x16\x68\xe7\x76\x46\x6b\x2b\xbb\xb9\x0b\x85\x82\x91\xe4\x1b\x27\xb7\xcf\xe4\xae\x2f\x88\xd6\x1a\x21\x53\x30\x5e\x7e\xdd\x61\xe0\xd3\x64\xf3\xe4\x7a\xc6\x6e\x2a\x69\x54\x22\x7a\xd7\xca\x7f\x5b\xdb\x79\xe5\x2f\x72\xaf\x33\x17\xf1\x31\xe1\x35\x2a\x63\x1e\x2d\xcd\x0d\x6a\x95\x26\x2d\xcb\x7a\x4c\xab\xff\x0d\x00\x00\xff\xff\xdc\x5c\x67\xbb\x40\x36\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x5a\xfd\x6e\xdb\x38\x12\xff\x5f\x4f\x31\xab\xc5\x61\x5b\xc0\xb2\x9d\xa4\x0d\x1a\x7f\xe1\x5c\x47\x4e\x84\x73\xe4\xc0\x56\xda\x2d\x0e\x87\x05\x2d\x8d\x2d\xee\x4a\xa4\x8e\xa4\x63\x7b\x73\x79\xf7\x03\x29\xd9\xb1\x1d\x27\xcd\xed\x1d\x6a\xef\x35\x0d\x9a\x48\xd4\xcc\x70\xbe\x38\xf3\xa3\x44\xeb\xee\x0e\x22\x1c\x53\x86\x60\x63\x4a\x68\x52\x8e\x70\x4c\xa6\x89\x2a\xcb\xe9\xe8\x57\x0c\x95\x0d\xf7\xf7\x77\x77\xa0\x30\xcd\x12\xa2\x10\xec\x5f\x7e\x59\x3d\x29\xe7\xcf\x90\x45\x70\x7f\xff\xb4\xa4\x58\xa5\x89\x16\x63\x35\x7e\x70\x1c\x6b\xa8\x16\x09\x02\x61\x11\x5c\x06\x57\x3d\x88\x50\xd0\x5b\x8c\x60\x2c\x78\x0a\xb1\x52\x99\xac\x55\x2a\x13\xaa\xe2\xe9\xa8\x1c\xf2\xb4\xa2\x25\x4d\xa6\xac\xa2\x04\x61\x92\x84\x8a\x72\x46\x12\xc7\x4c\xe0\x2c\x95\x92\x96\x65\x05\x31\xc2\x95\x17\x40\x8f\x86\xc8\x24\xc2\x9b\x2b\x2f\x78\x6b\x59\x1d\x9e\x2d\x04\x9d\xc4\x0a\xde\x84\x6f\xe1\xb8\x7a\xf4\x0e\xae\x72\x89\x96\x75\x8d\x22\xa5\x52\x52\xce\x80\x4a\x88\x51\xe0\x68\x01\x13\x41\x98\xc2\xa8\x04\x63\x81\x08\x7c\x0c\x61\x4c\xc4\x04\x4b\xa0\x38\x10\xb6\x80\x0c\x85\xe4\x0c\xf8\x48\x11\xca\x28\x9b\x00\x81\x90\x67\x0b\x8b\x8f\x41\xc5\x54\x82\xe4\x63\x35\x23\x22\xb7\x90\x48\xc9\x43\x4a\x14\x46\x10\xf1\x70\x9a\x22\x53\x44\x5b\x00\x63\x9a\xa0\x84\x37\x2a\x46\xb0\x87\x05\x87\xfd\xd6\x4c\x12\x21\x49\x2c\xca\x40\x3f\x5b\x3e\x82\x19\x55\x31\x9f\x2a\x10\x28\x95\xa0\xc6\x0b\x25\xa0\x2c\x4c\xa6\x91\xd6\x61\xf9\x38\xa1\x29\x2d\x66\xd0\xec\xc6\x70\x69\x29\x0e\x53\x89\x25\xa3\x67\x09\x52\x1e\xd1\xb1\xfe\x8b\xc6\xac\x6c\x3a\x4a\xa8\x8c\x4b\x10\x51\x2d\x7a\x34\x55\x58\x02\xa9\x07\x8d\x1f\x4b\xda\x8e\x0a\x17\x20\x31\x49\xac\x90\x67\x14\x25\x18\x5b\x1f\xb4\x33\x34\x5a\xf5\x4c\x3b\x54\x15\x2e\x92\x7a\x64\x16\xf3\x74\xd3\x12\x2a\xad\xf1\x54\x30\x2a\x63\x34\x3c\x11\x07\xc9\xcd\x8c\x3a\xa7\xf4\x88\x26\x1f\xf3\x24\xe1\x33\x6d\x5a\xc8\x59\x44\xb5\x45\xb2\x96\x07\x99\x8c\xf8\x2d\x1a\x5b\xf2\xb8\x32\xae\x68\x98\xbb\xdb\x04\x20\x7b\x88\x6a\xf1\x48\xc6\x24\x49\x60\x84\x85\xc3\x30\x02\xca\x80\xac\x99\x23\xf4\xf4\x52\x11\xa6\x28\x49\x20\xe3\xc2\xcc\xb7\x6d\x66\xd9\xb2\x82\x4b\x17\x86\xfd\x6e\xf0\xb9\x3d\x70\xc1\x1b\xc2\xf5\xa0\xff\xc9\x3b\x77\xcf\xc1\x6e\x0f\xc1\x1b\xda\x25\xf8\xec\x05\x97\xfd\x9b\x00\x3e\xb7\x07\x83\xb6\x1f\x7c\x81\x7e\x17\xda\xfe\x17\xf8\x9b\xe7\x9f\x97\xc0\xfd\xf9\x7a\xe0\x0e\x87\xd0\x1f\x58\xde\xd5\x75\xcf\x73\xcf\x4b\xe0\xf9\x9d\xde\xcd\xb9\xe7\x5f\xc0\xc7\x9b\x00\xfc\x7e\x00\x3d\xef\xca\x0b\xdc\x73\x08\xfa\xa0\x27\x2c\x44\x79\xee\x50\x0b\xbb\x72\x07\x9d\xcb\xb6\x1f\xb4\x3f\x7a\x3d\x2f\xf8\x52\xb2\xba\x5e\xe0\x6b\x99\xdd\xfe\x00\xda\x70\xdd\x1e\x04\x5e\xe7\xa6\xd7\x1e\xc0\xf5\xcd\xe0\xba\x3f\x74\xa1\xed\x9f\x83\xdf\xf7\x3d\xbf\x3b\xf0\xfc\x0b\xf7\xca\xf5\x83\x32\x78\x3e\xf8\x7d\x70\x3f\xb9\x7e\x00\xc3\xcb\x76\xaf\xa7\xa7\xb2\xda\x37\xc1\x65\x7f\xa0\xf5\x83\x4e\xff\xfa\xcb\xc0\xbb\xb8\x0c\xe0\xb2\xdf\x3b\x77\x07\x43\xf8\xe8\x42\xcf\x6b\x7f\xec\xb9\xf9\x54\xfe\x17\xe8\xf4\xda\xde\x55\x09\xce\xdb\x57\xed\x0b\xd7\x70\xf5\x83\x4b\x77\x60\x69\xb2\x5c\x3b\xf8\x7c\xe9\xea\x21\x3d\x5f\xdb\x87\x76\x27\xf0\xfa\xbe\x36\xa3\xd3\xf7\x83\x41\xbb\x13\x94\x20\xe8\x0f\x82\x15\xeb\x67\x6f\xe8\x96\xa0\x3d\xf0\x86\xda\x21\xdd\x41\xff\xaa\x64\x69\x77\xf6\xbb\x9a\xc4\xf3\x35\x9f\xef\xe6\x52\xb4\xab\x61\x23\x22\xfd\x81\xb9\xbf\x19\xba\x2b\x81\x70\xee\xb6\x7b\x9e\x7f\x31\xd4\xcc\xda\xc4\x25\x71\xd9\x72\x9c\x96\xd5\xf8\xe1\xbc\xdf\x09\xbe\x5c\xbb\xa0\x8b\x14\x5c\xdf\x7c\xec\x79\x1d\xb0\x9d\x4a\xe5\xf3\x49\xa7\x52\x39\x0f\xce\xe1\x67\x53\xa6\x8e\xca\x55\x08\x74\x09\xa2\x79\x05\xaa\x54\x5c\xdf\x06\x5b\x17\xac\x5a\xa5\x32\x9b\xcd\xca\xb3\x93\x32\x17\x93\x4a\x30\xa8\xcc\xb5\xac\x23\xcd\x5c\x5c\x3a\x6a\x8d\xb3\x1c\xa9\xc8\x6e\x59\x0d\x33\xe1\x3c\x4d\x98\x6c\xee\x10\x73\x74\x76\x76\x96\x73\xdb\x20\x75\xc5\x6c\xda\x29\x11\x13\xca\x6a\x50\xad\xc3\x98\x33\xe5\x8c\x49\x4a\x93\x45\x0d\x7e\xba\xc4\xe4\x16\x15\x0d\x09\xf8\x38\xc5\x9f\x4a\xb0\x1a\x28\x41\x5b\x50\x92\x94\x40\x12\x26\x1d\x89\x82\x8e\xeb\x30\xe2\x73\x47\xd2\xdf\x29\x9b\xd4\x60\xc4\x45\x84\xc2\x19\xf1\x79\x21\x54\xd2\xdf\xb1\x06\x47\xef\xb2\x79\xdd\x28\x89\x24\x6a\x59\x8d\x14\x15\x01\x46\x52\x6c\xda\xb7\x14\x67\x7a\x85\xd8\x7a\x65\x2a\x64\xaa\x69\xcf\x68\xa4\xe2\x66\x84\xb7\x34\x44\xc7\xdc\xd8\x4b\x1e\x6d\x98\x83\xff\x9c\xd2\xdb\xa6\xdd\xc9\xe9\x9d\x60\x91\xe1\x1a\xb7\xc2\xb9\xaa\x68\x43\xeb\xa6\xd4\x4a\x54\xcd\x9b\xa0\xeb\x7c\xd0\x32\x14\x55\x09\xb6\x9e\x6b\x3f\x8d\x4a\x4e\x63\x35\x8c\x97\x5a\xd6\x5f\x53\x8c\x28\x01\xce\x92\x05\xc8\x50\x20\x32\x53\x1a\xde\xa4\x64\x9e\xeb\x56\x83\xd3\x77\xd5\x6c\xfe\x16\xee\x2c\x80\x11\x8f\x16\xe6\x02\x20\x23\x51\x64\x7c\x52\x85\x1f\x68\xaa\x4d\x24\x4c\xd5\x2d\x80\x7b\xcb\x02\x88\x8f\x4a\x56\x7c\x5c\xb2\xe2\x93\x92\x15\xbf\x2b\x58\x8c\xc7\x66\xa8\xcb\x50\x0d\x3e\x54\xb7\x19\x01\x96\x21\x3b\xae\x66\x73\xa8\xc2\xfb\x6c\xbe\x5b\xf6\xba\xbc\x3c\x02\xc7\xc7\x4f\xd0\x1e\x3f\xa6\x3d\xfa\xf0\x04\xed\xc9\x0e\xda\xd3\xdd\xb4\x65\x1d\x0f\x42\x19\x8a\xaf\xb9\x03\xa0\x70\xe3\x51\xb5\xfa\x97\x27\x45\x21\x53\x2f\xf2\xeb\x92\xd8\x99\x09\x92\x6d\x73\x1c\x55\x9f\x50\x96\xb2\x5b\xae\x2b\xfb\xdd\x57\xf5\xb9\xb7\x1a\x95\x22\x33\x1a\x95\x3c\x9b\xad\x86\x89\x3a\x55\x98\xca\x90\x67\x68\xae\xd4\x22\xc3\xd5\x3a\x94\x61\x8c\x29\x31\xeb\xd0\xd5\x40\xe3\x0a\xa5\x24\x13\xdc\xdb\x4a\x04\x67\x86\xa3\xdf\xa8\x72\xf2\x07\x29\xe7\x2a\x36\x4c\x79\xc7\xa2\x44\x62\xf4\x40\xa4\x17\x94\xe1\x76\x48\xf4\xeb\x54\xaa\x1a\x30\xce\xb0\x0e\x71\x91\xa8\xda\x4f\x75\x48\x28\x43\x67\x35\x54\x3e\xc5\xb4\x0e\x23\x12\xfe\x36\x11\x7c\xca\x22\x27\xe4\x09\x17\x35\xf8\x71\x7c\xaa\x7f\xea\xeb\x3e\xd6\x85\xc1\x6a\x28\x32\x4a\x10\xc2\x84\x48\xd9\xb4\xb5\x43\x4d\x04\xf7\xe7\xa2\x97\xea\x9e\xdf\x35\x6d\x7d\x67\xc3\x68\x62\x88\x9b\x76\x41\x6c\xb7\x2c\x80\x86\x12\x7b\xac\xb9\x3a\xa3\x1b\x2a\xda\x9b\x23\x6f\x51\x68\x21\x89\x43\x12\x3a\x61\x35\x50\x3c\xab\xdb\x70\x6b\xee\x9a\xb6\xe2\x99\xdd\x6a\x54\x54\xf4\xa0\x68\x91\x03\xab\x02\xb2\x72\xf1\x69\xb5\xba\xca\x87\xfd\xe9\xae\x81\x6d\x96\x90\x45\x0d\x46\x09\x0f\x7f\xab\xc3\x7a\x27\xa8\x56\x35\xdb\xca\xc7\x40\xa6\x8a\xd7\x21\x4c\x90\x08\x3d\x95\x8a\xb7\x4d\x37\x56\x03\x34\x22\x7a\xbb\x6e\x38\x32\xf5\x8d\x4d\xfd\xba\x15\xdb\x76\xaf\xca\xaa\xee\x46\xf5\x95\x29\x3a\x88\xeb\x6b\x39\x25\x94\x6d\xad\x92\x10\x93\xa4\xe0\x6e\xda\xd5\xfc\x5e\x66\x24\x5c\xde\x1f\xd2\x9a\x1f\x1b\x01\x9a\xa9\x06\x47\xd9\x1c\x24\x4f\x68\x04\x3f\xe2\x99\xfe\x59\x3e\x72\x04\x89\xe8\x54\xd6\xe0\x44\x7b\x62\xbd\x0a\x8c\xc7\x6b\x8e\x39\x84\x52\xb0\xfc\x77\x77\x07\x74\x0c\x13\x05\x6f\x12\x64\x50\x6e\x27\x28\x94\x2c\x77\xa9\xa0\x6c\xf2\x16\xaa\x7a\x7b\xbd\x4e\xbe\xb6\x30\x89\x26\x05\xf3\xdb\x99\x11\xa1\x37\xad\xdf\x2c\x64\x3b\x17\xe4\x16\x1c\xa9\xc3\x46\xf0\x36\x40\xd5\xfb\x6a\x75\x3b\x71\xc1\xf4\xb7\x42\x5e\x88\x4c\xa1\xd8\x15\x56\xf3\xbf\xaa\x0d\xdb\x91\x25\xee\xe9\xfb\xe3\xe3\xce\xd6\xda\x86\xe2\x3a\x97\xb9\x9e\x15\x39\xf9\x56\x40\x4c\x48\x8a\x38\xc0\xbf\x40\x47\xe5\xfe\x3e\x77\xf3\xce\x58\xbd\x85\x23\xb8\xbf\x97\xab\xf7\x25\x30\xe6\x42\x8b\x10\x84\x4d\x10\xca\x17\x82\x4f\xb3\x1e\x19\x61\x22\xcb\x43\x2e\x14\x46\xd7\x84\x0a\xb9\x1d\xd7\xe5\xbc\x3e\x49\x11\xee\xef\x9b\xfa\xfa\x13\x49\xa6\xf8\x98\xf0\xe1\xcd\xcc\x46\x62\xac\x6a\xf7\x3a\x5d\x22\xf1\x85\x19\x34\xe1\x3c\xfa\xde\xd3\xe7\xf4\xc3\xc7\xb3\x6a\xf7\xc5\xe9\x93\x93\x1f\x7c\xfa\xc0\x7f\x91\x3f\xdb\x74\x8d\x8a\x12\x87\x59\x47\xb7\x10\xcb\x72\xeb\x61\x1f\x14\xde\x7a\xc8\xd9\x13\xd3\xab\x77\x62\x90\xad\xf6\xfd\x67\xe8\xd8\x8f\x74\x3f\xa4\xcc\x78\x32\x3f\x0c\x7e\x3a\xd4\x04\xa9\x42\xb5\x00\x74\xcf\x26\x49\x61\x1c\x81\x58\xe0\xb8\x69\x6f\xbd\x52\x31\x65\x27\x25\x8c\x4c\x50\xdc\x0c\x7a\xf9\xab\x15\x7b\xb5\xbb\x53\xcc\xc9\x04\x4d\x89\x58\xec\xcf\x0b\xa6\x6c\x47\x18\x72\x61\xde\x6f\x2f\x37\xb5\xcb\x92\xdc\xed\x76\x77\x96\xea\x93\x77\x1f\x30\x22\x0f\x90\xb0\x80\x83\x9b\xc3\xce\x6a\x87\x98\xcd\x8b\x2e\xb1\xb1\x3d\x3e\xd6\x9b\xe3\x8d\xc6\x32\xe2\x49\xb4\xbb\x95\x84\x53\x21\xf5\xcc\x19\xa7\xf9\xc0\x0a\x86\x53\x66\x84\x16\x68\x7c\xab\xe5\xbc\x5f\xd9\x68\xde\x15\x8e\xb9\x48\x6b\x10\x92\x8c\x2a\x92\xd0\xdf\xb1\x6e\xb7\x3e\x51\x9c\x01\x65\xf0\x4c\xe8\x96\xaf\xc4\xc8\xce\xc4\x7e\x54\xb8\x77\x15\xea\x3f\x04\x36\x5f\xd7\xf1\xb7\x5e\xc7\x52\x09\xce\x26\xfb\x73\xf8\xdf\x1f\xc0\x4b\x91\x16\x2b\x0c\xf3\x0f\xc8\x07\x1a\x95\x5c\xc9\xff\x41\x2e\xee\x80\x21\xc5\x93\x02\xfd\x6c\x6a\xf2\x9a\x9d\xdf\x79\x76\xe6\x38\x78\x95\x80\x8d\xd1\x61\x05\x7f\x3d\x73\x77\x42\xf6\x35\x84\x0e\x4d\x58\xc7\xe8\x7b\x35\xe5\xe9\x95\xb8\xab\x67\x30\xc6\xf3\x4f\xd1\x32\xef\x18\x7b\xcf\x8a\x35\x8d\x0e\x23\x35\xbe\xea\xcf\x65\x75\x7b\x50\xfc\xff\x21\x51\xd6\x41\x68\xf9\x02\x19\x0a\xa2\xb8\x86\x9d\x06\x73\xee\xab\xfe\x6d\x03\xc6\x47\x78\x73\xca\x22\x14\x1a\xc1\xd5\xed\xd6\x90\x4f\x45\x88\x1a\x68\x1d\x5a\x6d\xf9\x63\x9d\xf5\x85\x00\x70\x80\x92\x27\xb7\x18\x3d\x01\x01\x5f\x71\xe3\xc1\x77\xe6\x83\xeb\x84\x8d\xf8\xe0\x34\xfa\x13\xaf\xe8\xe7\xd0\xf2\xeb\x42\xfb\x5e\x37\x68\xcb\xb2\xbd\xb6\x45\x5b\x0e\xed\x61\x93\xb6\xd2\xe6\x35\x47\x5f\xb7\x69\xaf\xdb\xb4\xd7\x6d\xda\xeb\x36\xed\x75\x9b\xf6\xba\x4d\xfb\xcf\x7b\x6b\xa3\x62\x3e\xfc\xb5\x9e\xfd\x3c\xbb\x29\x72\xc5\xf2\x30\xb2\x76\x90\x69\xcc\xb9\xf9\x72\xbd\xaf\xd8\xae\x9f\xd3\xdb\x38\x83\xb5\x8a\xfa\xd9\xd9\xd9\x33\xe7\x98\x76\x7f\x0a\x3d\x94\xcf\xce\x87\x93\x80\x1b\x27\x3b\xe8\x84\xe5\x9f\xae\x60\x3f\x08\xe7\x85\x80\x66\xf7\xb7\xb6\xf5\xb4\x58\x37\xfb\xf8\x11\x04\xda\x3a\x9d\xd1\xda\xa8\x6e\xee\x5c\xa1\x60\x24\xf9\xc6\xc5\xed\x99\xda\xf5\x15\xd3\x5a\x43\x64\x0a\x46\x8b\x97\x7d\x0c\x7c\x5c\x6c\x1e\x1d\xcf\xd8\x2e\x25\x8d\x4a\x44\x6f\x5b\xf9\x6f\x6b\xb3\xae\xfc\x49\xce\xa6\xe6\x26\x3e\x14\xbc\x46\x65\xc4\xa3\x85\x39\x05\xae\xd2\xa4\x65\x59\x0f\x65\xf5\xdf\x01\x00\x00\xff\xff\x4e\x2a\x1d\xf2\x04\x37\x00\x00"), }, } fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ diff --git a/vendor/github.com/prometheus/alertmanager/cluster/channel.go b/vendor/github.com/prometheus/alertmanager/cluster/channel.go index f4ea4dc075e..ba0b834cd10 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/channel.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/channel.go @@ -21,8 +21,9 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/hashicorp/memberlist" - "github.com/prometheus/alertmanager/cluster/clusterpb" "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/alertmanager/cluster/clusterpb" ) // Channel allows clients to send messages for a specific state type that will be diff --git a/vendor/github.com/prometheus/alertmanager/cluster/cluster.go b/vendor/github.com/prometheus/alertmanager/cluster/cluster.go index 6de11c53992..ba7fdb9e7f2 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/cluster.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/cluster.go @@ -119,7 +119,7 @@ func (s PeerStatus) String() string { const ( DefaultPushPullInterval = 60 * time.Second DefaultGossipInterval = 200 * time.Millisecond - DefaultTcpTimeout = 10 * time.Second + DefaultTCPTimeout = 10 * time.Second DefaultProbeTimeout = 500 * time.Millisecond DefaultProbeInterval = 1 * time.Second DefaultReconnectInterval = 10 * time.Second @@ -254,7 +254,8 @@ func Create( func (p *Peer) Join( reconnectInterval time.Duration, - reconnectTimeout time.Duration) error { + reconnectTimeout time.Duration, +) error { n, err := p.mlist.Join(p.resolvedPeers) if err != nil { level.Warn(p.logger).Log("msg", "failed to join cluster", "err", err) @@ -784,7 +785,7 @@ func resolvePeers(ctx context.Context, peers []string, myAddress string, res *ne return resolvedPeers, nil } -func removeMyAddr(ips []net.IPAddr, targetPort string, myAddr string) []net.IPAddr { +func removeMyAddr(ips []net.IPAddr, targetPort, myAddr string) []net.IPAddr { var result []net.IPAddr for _, ip := range ips { diff --git a/vendor/github.com/prometheus/alertmanager/cluster/connection_pool.go b/vendor/github.com/prometheus/alertmanager/cluster/connection_pool.go index 03014323397..bd6695aa872 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/connection_pool.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/connection_pool.go @@ -33,7 +33,7 @@ type connectionPool struct { func newConnectionPool(tlsClientCfg *tls.Config) (*connectionPool, error) { cache, err := lru.NewWithEvict( - capacity, func(_ interface{}, value interface{}) { + capacity, func(_, value interface{}) { conn, ok := value.(*tlsConn) if ok { _ = conn.Close() diff --git a/vendor/github.com/prometheus/alertmanager/cluster/delegate.go b/vendor/github.com/prometheus/alertmanager/cluster/delegate.go index 50addcde5d6..9957f69b916 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/delegate.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/delegate.go @@ -20,8 +20,9 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/hashicorp/memberlist" - "github.com/prometheus/alertmanager/cluster/clusterpb" "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/alertmanager/cluster/clusterpb" ) const ( diff --git a/vendor/github.com/prometheus/alertmanager/cluster/tls_config.go b/vendor/github.com/prometheus/alertmanager/cluster/tls_config.go index 4604a484ced..135c2fa210d 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/tls_config.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/tls_config.go @@ -14,7 +14,7 @@ package cluster import ( - "io/ioutil" + "os" "path/filepath" "github.com/prometheus/common/config" @@ -23,7 +23,7 @@ import ( ) type TLSTransportConfig struct { - TLSServerConfig *web.TLSStruct `yaml:"tls_server_config"` + TLSServerConfig *web.TLSConfig `yaml:"tls_server_config"` TLSClientConfig *config.TLSConfig `yaml:"tls_client_config"` } @@ -31,7 +31,7 @@ func GetTLSTransportConfig(configPath string) (*TLSTransportConfig, error) { if configPath == "" { return nil, nil } - bytes, err := ioutil.ReadFile(configPath) + bytes, err := os.ReadFile(configPath) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/alertmanager/cluster/tls_connection.go b/vendor/github.com/prometheus/alertmanager/cluster/tls_connection.go index f5a3326fac5..21ffc95970d 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/tls_connection.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/tls_connection.go @@ -66,7 +66,6 @@ func (conn *tlsConn) Write(b []byte) (int, error) { conn.mtx.Lock() defer conn.mtx.Unlock() n, err := conn.connection.Write(b) - if err != nil { conn.live = false } diff --git a/vendor/github.com/prometheus/alertmanager/cluster/tls_transport.go b/vendor/github.com/prometheus/alertmanager/cluster/tls_transport.go index ca8d04c2723..16fbe2519e9 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/tls_transport.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/tls_transport.go @@ -194,7 +194,7 @@ func (t *TLSTransport) Shutdown() error { // from the pool, and writes to it. It also returns a timestamp of when // the packet was written. func (t *TLSTransport) WriteTo(b []byte, addr string) (time.Time, error) { - conn, err := t.connPool.borrowConnection(addr, DefaultTcpTimeout) + conn, err := t.connPool.borrowConnection(addr, DefaultTCPTimeout) if err != nil { t.writeErrs.WithLabelValues("packet").Inc() return time.Now(), errors.Wrap(err, "failed to dial") diff --git a/vendor/github.com/prometheus/alertmanager/config/config.go b/vendor/github.com/prometheus/alertmanager/config/config.go index 448f18532cb..21698681ced 100644 --- a/vendor/github.com/prometheus/alertmanager/config/config.go +++ b/vendor/github.com/prometheus/alertmanager/config/config.go @@ -16,9 +16,9 @@ package config import ( "encoding/json" "fmt" - "io/ioutil" "net" "net/url" + "os" "path/filepath" "regexp" "sort" @@ -192,7 +192,7 @@ func Load(s string) (*Config, error) { // LoadFile parses the given YAML file into a Config. func LoadFile(filename string) (*Config, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -248,6 +248,12 @@ func resolveFilepaths(baseDir string, cfg *Config) { for _, cfg := range receiver.TelegramConfigs { cfg.HTTPConfig.SetDirectory(baseDir) } + for _, cfg := range receiver.DiscordConfigs { + cfg.HTTPConfig.SetDirectory(baseDir) + } + for _, cfg := range receiver.WebexConfigs { + cfg.HTTPConfig.SetDirectory(baseDir) + } } } @@ -335,6 +341,14 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("at most one of opsgenie_api_key & opsgenie_api_key_file must be configured") } + if c.Global.VictorOpsAPIKey != "" && len(c.Global.VictorOpsAPIKeyFile) > 0 { + return fmt.Errorf("at most one of victorops_api_key & victorops_api_key_file must be configured") + } + + if len(c.Global.SMTPAuthPassword) > 0 && len(c.Global.SMTPAuthPasswordFile) > 0 { + return fmt.Errorf("at most one of smtp_auth_password & smtp_auth_password_file must be configured") + } + names := map[string]struct{}{} for _, rcv := range c.Receivers { @@ -365,8 +379,9 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if ec.AuthUsername == "" { ec.AuthUsername = c.Global.SMTPAuthUsername } - if ec.AuthPassword == "" { + if ec.AuthPassword == "" && ec.AuthPasswordFile == "" { ec.AuthPassword = c.Global.SMTPAuthPassword + ec.AuthPasswordFile = c.Global.SMTPAuthPasswordFile } if ec.AuthSecret == "" { ec.AuthSecret = c.Global.SMTPAuthSecret @@ -471,11 +486,12 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if !strings.HasSuffix(voc.APIURL.Path, "/") { voc.APIURL.Path += "/" } - if voc.APIKey == "" { - if c.Global.VictorOpsAPIKey == "" { + if voc.APIKey == "" && len(voc.APIKeyFile) == 0 { + if c.Global.VictorOpsAPIKey == "" && len(c.Global.VictorOpsAPIKeyFile) == 0 { return fmt.Errorf("no global VictorOps API Key set") } voc.APIKey = c.Global.VictorOpsAPIKey + voc.APIKeyFile = c.Global.VictorOpsAPIKeyFile } } for _, sns := range rcv.SNSConfigs { @@ -492,6 +508,26 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { telegram.APIUrl = c.Global.TelegramAPIUrl } } + for _, discord := range rcv.DiscordConfigs { + if discord.HTTPConfig == nil { + discord.HTTPConfig = c.Global.HTTPConfig + } + if discord.WebhookURL == nil { + return fmt.Errorf("no discord webhook URL provided") + } + } + for _, webex := range rcv.WebexConfigs { + if webex.HTTPConfig == nil { + webex.HTTPConfig = c.Global.HTTPConfig + } + if webex.APIURL == nil { + if c.Global.WebexAPIURL == nil { + return fmt.Errorf("no global Webex URL set") + } + + webex.APIURL = c.Global.WebexAPIURL + } + } names[rcv.Name] = struct{}{} } @@ -580,7 +616,7 @@ func checkTimeInterval(r *Route, timeIntervals map[string]struct{}) error { // DefaultGlobalConfig returns GlobalConfig with default values. func DefaultGlobalConfig() GlobalConfig { - var defaultHTTPConfig = commoncfg.DefaultHTTPClientConfig + defaultHTTPConfig := commoncfg.DefaultHTTPClientConfig return GlobalConfig{ ResolveTimeout: model.Duration(5 * time.Minute), HTTPConfig: &defaultHTTPConfig, @@ -592,6 +628,7 @@ func DefaultGlobalConfig() GlobalConfig { WeChatAPIURL: mustParseURL("https://qyapi.weixin.qq.com/cgi-bin/"), VictorOpsAPIURL: mustParseURL("https://alert.victorops.com/integrations/generic/20131114/alert/"), TelegramAPIUrl: mustParseURL("https://api.telegram.org"), + WebexAPIURL: mustParseURL("https://webexapis.com/v1/messages"), } } @@ -693,26 +730,29 @@ type GlobalConfig struct { HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` - SMTPFrom string `yaml:"smtp_from,omitempty" json:"smtp_from,omitempty"` - SMTPHello string `yaml:"smtp_hello,omitempty" json:"smtp_hello,omitempty"` - SMTPSmarthost HostPort `yaml:"smtp_smarthost,omitempty" json:"smtp_smarthost,omitempty"` - SMTPAuthUsername string `yaml:"smtp_auth_username,omitempty" json:"smtp_auth_username,omitempty"` - SMTPAuthPassword Secret `yaml:"smtp_auth_password,omitempty" json:"smtp_auth_password,omitempty"` - SMTPAuthSecret Secret `yaml:"smtp_auth_secret,omitempty" json:"smtp_auth_secret,omitempty"` - SMTPAuthIdentity string `yaml:"smtp_auth_identity,omitempty" json:"smtp_auth_identity,omitempty"` - SMTPRequireTLS bool `yaml:"smtp_require_tls" json:"smtp_require_tls,omitempty"` - SlackAPIURL *SecretURL `yaml:"slack_api_url,omitempty" json:"slack_api_url,omitempty"` - SlackAPIURLFile string `yaml:"slack_api_url_file,omitempty" json:"slack_api_url_file,omitempty"` - PagerdutyURL *URL `yaml:"pagerduty_url,omitempty" json:"pagerduty_url,omitempty"` - OpsGenieAPIURL *URL `yaml:"opsgenie_api_url,omitempty" json:"opsgenie_api_url,omitempty"` - OpsGenieAPIKey Secret `yaml:"opsgenie_api_key,omitempty" json:"opsgenie_api_key,omitempty"` - OpsGenieAPIKeyFile string `yaml:"opsgenie_api_key_file,omitempty" json:"opsgenie_api_key_file,omitempty"` - WeChatAPIURL *URL `yaml:"wechat_api_url,omitempty" json:"wechat_api_url,omitempty"` - WeChatAPISecret Secret `yaml:"wechat_api_secret,omitempty" json:"wechat_api_secret,omitempty"` - WeChatAPICorpID string `yaml:"wechat_api_corp_id,omitempty" json:"wechat_api_corp_id,omitempty"` - VictorOpsAPIURL *URL `yaml:"victorops_api_url,omitempty" json:"victorops_api_url,omitempty"` - VictorOpsAPIKey Secret `yaml:"victorops_api_key,omitempty" json:"victorops_api_key,omitempty"` - TelegramAPIUrl *URL `yaml:"telegram_api_url,omitempty" json:"telegram_api_url,omitempty"` + SMTPFrom string `yaml:"smtp_from,omitempty" json:"smtp_from,omitempty"` + SMTPHello string `yaml:"smtp_hello,omitempty" json:"smtp_hello,omitempty"` + SMTPSmarthost HostPort `yaml:"smtp_smarthost,omitempty" json:"smtp_smarthost,omitempty"` + SMTPAuthUsername string `yaml:"smtp_auth_username,omitempty" json:"smtp_auth_username,omitempty"` + SMTPAuthPassword Secret `yaml:"smtp_auth_password,omitempty" json:"smtp_auth_password,omitempty"` + SMTPAuthPasswordFile string `yaml:"smtp_auth_password_file,omitempty" json:"smtp_auth_password_file,omitempty"` + SMTPAuthSecret Secret `yaml:"smtp_auth_secret,omitempty" json:"smtp_auth_secret,omitempty"` + SMTPAuthIdentity string `yaml:"smtp_auth_identity,omitempty" json:"smtp_auth_identity,omitempty"` + SMTPRequireTLS bool `yaml:"smtp_require_tls" json:"smtp_require_tls,omitempty"` + SlackAPIURL *SecretURL `yaml:"slack_api_url,omitempty" json:"slack_api_url,omitempty"` + SlackAPIURLFile string `yaml:"slack_api_url_file,omitempty" json:"slack_api_url_file,omitempty"` + PagerdutyURL *URL `yaml:"pagerduty_url,omitempty" json:"pagerduty_url,omitempty"` + OpsGenieAPIURL *URL `yaml:"opsgenie_api_url,omitempty" json:"opsgenie_api_url,omitempty"` + OpsGenieAPIKey Secret `yaml:"opsgenie_api_key,omitempty" json:"opsgenie_api_key,omitempty"` + OpsGenieAPIKeyFile string `yaml:"opsgenie_api_key_file,omitempty" json:"opsgenie_api_key_file,omitempty"` + WeChatAPIURL *URL `yaml:"wechat_api_url,omitempty" json:"wechat_api_url,omitempty"` + WeChatAPISecret Secret `yaml:"wechat_api_secret,omitempty" json:"wechat_api_secret,omitempty"` + WeChatAPICorpID string `yaml:"wechat_api_corp_id,omitempty" json:"wechat_api_corp_id,omitempty"` + VictorOpsAPIURL *URL `yaml:"victorops_api_url,omitempty" json:"victorops_api_url,omitempty"` + VictorOpsAPIKey Secret `yaml:"victorops_api_key,omitempty" json:"victorops_api_key,omitempty"` + VictorOpsAPIKeyFile string `yaml:"victorops_api_key_file,omitempty" json:"victorops_api_key_file,omitempty"` + TelegramAPIUrl *URL `yaml:"telegram_api_url,omitempty" json:"telegram_api_url,omitempty"` + WebexAPIURL *URL `yaml:"webex_api_url,omitempty" json:"webex_api_url,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface for GlobalConfig. @@ -844,6 +884,7 @@ type Receiver struct { // A unique identifier for this receiver. Name string `yaml:"name" json:"name"` + DiscordConfigs []*DiscordConfig `yaml:"discord_configs,omitempty" json:"discord_configs,omitempty"` EmailConfigs []*EmailConfig `yaml:"email_configs,omitempty" json:"email_configs,omitempty"` PagerdutyConfigs []*PagerdutyConfig `yaml:"pagerduty_configs,omitempty" json:"pagerduty_configs,omitempty"` SlackConfigs []*SlackConfig `yaml:"slack_configs,omitempty" json:"slack_configs,omitempty"` @@ -854,6 +895,7 @@ type Receiver struct { VictorOpsConfigs []*VictorOpsConfig `yaml:"victorops_configs,omitempty" json:"victorops_configs,omitempty"` SNSConfigs []*SNSConfig `yaml:"sns_configs,omitempty" json:"sns_configs,omitempty"` TelegramConfigs []*TelegramConfig `yaml:"telegram_configs,omitempty" json:"telegram_configs,omitempty"` + WebexConfigs []*WebexConfig `yaml:"webex_configs,omitempty" json:"webex_configs,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface for Receiver. diff --git a/vendor/github.com/prometheus/alertmanager/config/coordinator.go b/vendor/github.com/prometheus/alertmanager/config/coordinator.go index 499e939ecfa..5ee44f81d8e 100644 --- a/vendor/github.com/prometheus/alertmanager/config/coordinator.go +++ b/vendor/github.com/prometheus/alertmanager/config/coordinator.go @@ -150,7 +150,7 @@ func md5HashAsMetricValue(data []byte) float64 { sum := md5.Sum(data) // We only want 48 bits as a float64 only has a 53 bit mantissa. smallSum := sum[0:6] - var bytes = make([]byte, 8) + bytes := make([]byte, 8) copy(bytes, smallSum) return float64(binary.LittleEndian.Uint64(bytes)) } diff --git a/vendor/github.com/prometheus/alertmanager/config/notifiers.go b/vendor/github.com/prometheus/alertmanager/config/notifiers.go index a2cadf9e67d..e0abdcd572a 100644 --- a/vendor/github.com/prometheus/alertmanager/config/notifiers.go +++ b/vendor/github.com/prometheus/alertmanager/config/notifiers.go @@ -15,12 +15,13 @@ package config import ( "fmt" + "net/textproto" "regexp" "strings" + "text/template" "time" "github.com/pkg/errors" - commoncfg "github.com/prometheus/common/config" "github.com/prometheus/common/sigv4" ) @@ -33,6 +34,23 @@ var ( }, } + // DefaultWebexConfig defines default values for Webex configurations. + DefaultWebexConfig = WebexConfig{ + NotifierConfig: NotifierConfig{ + VSendResolved: true, + }, + Message: `{{ template "webex.default.message" . }}`, + } + + // DefaultDiscordConfig defines default values for Discord configurations. + DefaultDiscordConfig = DiscordConfig{ + NotifierConfig: NotifierConfig{ + VSendResolved: true, + }, + Title: `{{ template "discord.default.title" . }}`, + Message: `{{ template "discord.default.message" . }}`, + } + // DefaultEmailConfig defines default values for Email configurations. DefaultEmailConfig = EmailConfig{ NotifierConfig: NotifierConfig{ @@ -144,7 +162,7 @@ var ( }, DisableNotifications: false, Message: `{{ template "telegram.default.message" . }}`, - ParseMode: "MarkdownV2", + ParseMode: "HTML", } ) @@ -157,24 +175,72 @@ func (nc *NotifierConfig) SendResolved() bool { return nc.VSendResolved } +// WebexConfig configures notifications via Webex. +type WebexConfig struct { + NotifierConfig `yaml:",inline" json:",inline"` + HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` + APIURL *URL `yaml:"api_url,omitempty" json:"api_url,omitempty"` + + Message string `yaml:"message,omitempty" json:"message,omitempty"` + RoomID string `yaml:"room_id" json:"room_id"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *WebexConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultWebexConfig + type plain WebexConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + if c.RoomID == "" { + return fmt.Errorf("missing room_id on webex_config") + } + + if c.HTTPConfig == nil || c.HTTPConfig.Authorization == nil { + return fmt.Errorf("missing webex_configs.http_config.authorization") + } + + return nil +} + +// DiscordConfig configures notifications via Discord. +type DiscordConfig struct { + NotifierConfig `yaml:",inline" json:",inline"` + + HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` + WebhookURL *SecretURL `yaml:"webhook_url,omitempty" json:"webhook_url,omitempty"` + + Title string `yaml:"title,omitempty" json:"title,omitempty"` + Message string `yaml:"message,omitempty" json:"message,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *DiscordConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultDiscordConfig + type plain DiscordConfig + return unmarshal((*plain)(c)) +} + // EmailConfig configures notifications via mail. type EmailConfig struct { NotifierConfig `yaml:",inline" json:",inline"` // Email address to notify. - To string `yaml:"to,omitempty" json:"to,omitempty"` - From string `yaml:"from,omitempty" json:"from,omitempty"` - Hello string `yaml:"hello,omitempty" json:"hello,omitempty"` - Smarthost HostPort `yaml:"smarthost,omitempty" json:"smarthost,omitempty"` - AuthUsername string `yaml:"auth_username,omitempty" json:"auth_username,omitempty"` - AuthPassword Secret `yaml:"auth_password,omitempty" json:"auth_password,omitempty"` - AuthSecret Secret `yaml:"auth_secret,omitempty" json:"auth_secret,omitempty"` - AuthIdentity string `yaml:"auth_identity,omitempty" json:"auth_identity,omitempty"` - Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"` - HTML string `yaml:"html,omitempty" json:"html,omitempty"` - Text string `yaml:"text,omitempty" json:"text,omitempty"` - RequireTLS *bool `yaml:"require_tls,omitempty" json:"require_tls,omitempty"` - TLSConfig commoncfg.TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` + To string `yaml:"to,omitempty" json:"to,omitempty"` + From string `yaml:"from,omitempty" json:"from,omitempty"` + Hello string `yaml:"hello,omitempty" json:"hello,omitempty"` + Smarthost HostPort `yaml:"smarthost,omitempty" json:"smarthost,omitempty"` + AuthUsername string `yaml:"auth_username,omitempty" json:"auth_username,omitempty"` + AuthPassword Secret `yaml:"auth_password,omitempty" json:"auth_password,omitempty"` + AuthPasswordFile string `yaml:"auth_password_file,omitempty" json:"auth_password_file,omitempty"` + AuthSecret Secret `yaml:"auth_secret,omitempty" json:"auth_secret,omitempty"` + AuthIdentity string `yaml:"auth_identity,omitempty" json:"auth_identity,omitempty"` + Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"` + HTML string `yaml:"html,omitempty" json:"html,omitempty"` + Text string `yaml:"text,omitempty" json:"text,omitempty"` + RequireTLS *bool `yaml:"require_tls,omitempty" json:"require_tls,omitempty"` + TLSConfig commoncfg.TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -190,7 +256,7 @@ func (c *EmailConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // Header names are case-insensitive, check for collisions. normalizedHeaders := map[string]string{} for h, v := range c.Headers { - normalized := strings.Title(h) + normalized := textproto.CanonicalMIMEHeaderKey(h) if _, ok := normalizedHeaders[normalized]; ok { return fmt.Errorf("duplicate header %q in email config", normalized) } @@ -207,19 +273,22 @@ type PagerdutyConfig struct { HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` - ServiceKey Secret `yaml:"service_key,omitempty" json:"service_key,omitempty"` - RoutingKey Secret `yaml:"routing_key,omitempty" json:"routing_key,omitempty"` - URL *URL `yaml:"url,omitempty" json:"url,omitempty"` - Client string `yaml:"client,omitempty" json:"client,omitempty"` - ClientURL string `yaml:"client_url,omitempty" json:"client_url,omitempty"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` - Details map[string]string `yaml:"details,omitempty" json:"details,omitempty"` - Images []PagerdutyImage `yaml:"images,omitempty" json:"images,omitempty"` - Links []PagerdutyLink `yaml:"links,omitempty" json:"links,omitempty"` - Severity string `yaml:"severity,omitempty" json:"severity,omitempty"` - Class string `yaml:"class,omitempty" json:"class,omitempty"` - Component string `yaml:"component,omitempty" json:"component,omitempty"` - Group string `yaml:"group,omitempty" json:"group,omitempty"` + ServiceKey Secret `yaml:"service_key,omitempty" json:"service_key,omitempty"` + ServiceKeyFile string `yaml:"service_key_file,omitempty" json:"service_key_file,omitempty"` + RoutingKey Secret `yaml:"routing_key,omitempty" json:"routing_key,omitempty"` + RoutingKeyFile string `yaml:"routing_key_file,omitempty" json:"routing_key_file,omitempty"` + URL *URL `yaml:"url,omitempty" json:"url,omitempty"` + Client string `yaml:"client,omitempty" json:"client,omitempty"` + ClientURL string `yaml:"client_url,omitempty" json:"client_url,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Details map[string]string `yaml:"details,omitempty" json:"details,omitempty"` + Images []PagerdutyImage `yaml:"images,omitempty" json:"images,omitempty"` + Links []PagerdutyLink `yaml:"links,omitempty" json:"links,omitempty"` + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Severity string `yaml:"severity,omitempty" json:"severity,omitempty"` + Class string `yaml:"class,omitempty" json:"class,omitempty"` + Component string `yaml:"component,omitempty" json:"component,omitempty"` + Group string `yaml:"group,omitempty" json:"group,omitempty"` } // PagerdutyLink is a link @@ -242,12 +311,21 @@ func (c *PagerdutyConfig) UnmarshalYAML(unmarshal func(interface{}) error) error if err := unmarshal((*plain)(c)); err != nil { return err } - if c.RoutingKey == "" && c.ServiceKey == "" { + if c.RoutingKey == "" && c.ServiceKey == "" && c.RoutingKeyFile == "" && c.ServiceKeyFile == "" { return fmt.Errorf("missing service or routing key in PagerDuty config") } + if len(c.RoutingKey) > 0 && len(c.RoutingKeyFile) > 0 { + return fmt.Errorf("at most one of routing_key & routing_key_file must be configured") + } + if len(c.ServiceKey) > 0 && len(c.ServiceKeyFile) > 0 { + return fmt.Errorf("at most one of service_key & service_key_file must be configured") + } if c.Details == nil { c.Details = make(map[string]string) } + if c.Source == "" { + c.Source = c.Client + } for k, v := range DefaultPagerdutyDetails { if _, ok := c.Details[k]; !ok { c.Details[k] = v @@ -452,7 +530,7 @@ func (c *WechatConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if !wechatTypeMatcher.MatchString(c.MessageType) { - return errors.Errorf("WeChat message type %q does not match valid options %s", c.MessageType, wechatValidTypesRe) + return errors.Errorf("weChat message type %q does not match valid options %s", c.MessageType, wechatValidTypesRe) } return nil @@ -492,18 +570,25 @@ func (c *OpsGenieConfig) UnmarshalYAML(unmarshal func(interface{}) error) error return err } - if c.APIURL != nil && len(c.APIKeyFile) > 0 { + if c.APIKey != "" && len(c.APIKeyFile) > 0 { return fmt.Errorf("at most one of api_key & api_key_file must be configured") } for _, r := range c.Responders { if r.ID == "" && r.Username == "" && r.Name == "" { - return errors.Errorf("OpsGenieConfig responder %v has to have at least one of id, username or name specified", r) + return errors.Errorf("opsGenieConfig responder %v has to have at least one of id, username or name specified", r) } - r.Type = strings.ToLower(r.Type) - if !opsgenieTypeMatcher.MatchString(r.Type) { - return errors.Errorf("OpsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) + if strings.Contains(r.Type, "{{") { + _, err := template.New("").Parse(r.Type) + if err != nil { + return errors.Errorf("opsGenieConfig responder %v type is not a valid template: %v", r, err) + } + } else { + r.Type = strings.ToLower(r.Type) + if !opsgenieTypeMatcher.MatchString(r.Type) { + return errors.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) + } } } @@ -527,7 +612,7 @@ type VictorOpsConfig struct { HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` APIKey Secret `yaml:"api_key,omitempty" json:"api_key,omitempty"` - APIKeyFile Secret `yaml:"api_key_file,omitempty" json:"api_key_file,omitempty"` + APIKeyFile string `yaml:"api_key_file,omitempty" json:"api_key_file,omitempty"` APIURL *URL `yaml:"api_url" json:"api_url"` RoutingKey string `yaml:"routing_key" json:"routing_key"` MessageType string `yaml:"message_type" json:"message_type"` @@ -547,12 +632,15 @@ func (c *VictorOpsConfig) UnmarshalYAML(unmarshal func(interface{}) error) error if c.RoutingKey == "" { return fmt.Errorf("missing Routing key in VictorOps config") } + if c.APIKey != "" && len(c.APIKeyFile) > 0 { + return fmt.Errorf("at most one of api_key & api_key_file must be configured") + } reservedFields := []string{"routing_key", "message_type", "state_message", "entity_display_name", "monitoring_tool", "entity_id", "entity_state"} for _, v := range reservedFields { if _, ok := c.CustomFields[v]; ok { - return fmt.Errorf("VictorOps config contains custom field %s which cannot be used as it conflicts with the fixed/static fields", v) + return fmt.Errorf("victorOps config contains custom field %s which cannot be used as it conflicts with the fixed/static fields", v) } } @@ -662,9 +750,6 @@ func (c *TelegramConfig) UnmarshalYAML(unmarshal func(interface{}) error) error if c.ChatID == 0 { return fmt.Errorf("missing chat_id on telegram_config") } - if c.APIUrl == nil { - return fmt.Errorf("missing api_url on telegram_config") - } if c.ParseMode != "" && c.ParseMode != "Markdown" && c.ParseMode != "MarkdownV2" && diff --git a/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go b/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go index 65d7feacb3d..853d849688c 100644 --- a/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go +++ b/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go @@ -80,7 +80,6 @@ type Dispatcher struct { metrics *DispatcherMetrics limits Limits - marker types.Marker timeout func(time.Duration) time.Duration mtx sync.RWMutex @@ -121,7 +120,6 @@ func NewDispatcher( alerts: ap, stage: s, route: r, - marker: mk, timeout: to, logger: log.With(l, "component", "dispatcher"), metrics: m, diff --git a/vendor/github.com/prometheus/alertmanager/dispatch/route.go b/vendor/github.com/prometheus/alertmanager/dispatch/route.go index 643aa6dae4c..4b3673c5310 100644 --- a/vendor/github.com/prometheus/alertmanager/dispatch/route.go +++ b/vendor/github.com/prometheus/alertmanager/dispatch/route.go @@ -183,9 +183,6 @@ func (r *Route) Key() string { // Walk traverses the route tree in depth-first order. func (r *Route) Walk(visit func(*Route)) { visit(r) - if r.Routes == nil { - return - } for i := range r.Routes { r.Routes[i].Walk(visit) } diff --git a/vendor/github.com/prometheus/alertmanager/nflog/nflog.go b/vendor/github.com/prometheus/alertmanager/nflog/nflog.go index be2b47db3bb..54830bf9c80 100644 --- a/vendor/github.com/prometheus/alertmanager/nflog/nflog.go +++ b/vendor/github.com/prometheus/alertmanager/nflog/nflog.go @@ -30,9 +30,10 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/alertmanager/cluster" pb "github.com/prometheus/alertmanager/nflog/nflogpb" - "github.com/prometheus/client_golang/prometheus" ) // ErrNotFound is returned for empty query results. @@ -398,7 +399,7 @@ func stateKey(k string, r *pb.Receiver) string { return fmt.Sprintf("%s:%s", k, receiverKey(r)) } -func (l *Log) Log(r *pb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64) error { +func (l *Log) Log(r *pb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64, expiry time.Duration) error { // Write all st with the same timestamp. now := l.now() key := stateKey(gkey, r) @@ -414,6 +415,11 @@ func (l *Log) Log(r *pb.Receiver, gkey string, firingAlerts, resolvedAlerts []ui } } + expiresAt := now.Add(l.retention) + if expiry > 0 && l.retention > expiry { + expiresAt = now.Add(expiry) + } + e := &pb.MeshEntry{ Entry: &pb.Entry{ Receiver: r, @@ -422,7 +428,7 @@ func (l *Log) Log(r *pb.Receiver, gkey string, firingAlerts, resolvedAlerts []ui FiringAlerts: firingAlerts, ResolvedAlerts: resolvedAlerts, }, - ExpiresAt: now.Add(l.retention), + ExpiresAt: expiresAt, } b, err := marshalMeshEntry(e) diff --git a/vendor/github.com/prometheus/alertmanager/notify/email/email.go b/vendor/github.com/prometheus/alertmanager/notify/email/email.go index eebf20c7c6c..944aabdf850 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/email/email.go +++ b/vendor/github.com/prometheus/alertmanager/notify/email/email.go @@ -91,7 +91,11 @@ func (n *Email) auth(mechs string) (smtp.Auth, error) { return smtp.CRAMMD5Auth(username, secret), nil case "PLAIN": - password := string(n.conf.AuthPassword) + password, passwordErr := n.getPassword() + if passwordErr != nil { + err.Add(passwordErr) + continue + } if password == "" { err.Add(errors.New("missing password for PLAIN auth mechanism")) continue @@ -100,7 +104,11 @@ func (n *Email) auth(mechs string) (smtp.Auth, error) { return smtp.PlainAuth(identity, username, password, n.conf.Smarthost.Host), nil case "LOGIN": - password := string(n.conf.AuthPassword) + password, passwordErr := n.getPassword() + if passwordErr != nil { + err.Add(passwordErr) + continue + } if password == "" { err.Add(errors.New("missing password for LOGIN auth mechanism")) continue @@ -353,3 +361,14 @@ func (a *loginAuth) Next(fromServer []byte, more bool) ([]byte, error) { } return nil, nil } + +func (n *Email) getPassword() (string, error) { + if len(n.conf.AuthPasswordFile) > 0 { + content, err := os.ReadFile(n.conf.AuthPasswordFile) + if err != nil { + return "", fmt.Errorf("could not read %s: %w", n.conf.AuthPasswordFile, err) + } + return string(content), nil + } + return string(n.conf.AuthPassword), nil +} diff --git a/vendor/github.com/prometheus/alertmanager/notify/notify.go b/vendor/github.com/prometheus/alertmanager/notify/notify.go index bbdc6e7b014..f0691ba0eb1 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/notify.go +++ b/vendor/github.com/prometheus/alertmanager/notify/notify.go @@ -239,7 +239,7 @@ func (f StageFunc) Exec(ctx context.Context, l log.Logger, alerts ...*types.Aler } type NotificationLog interface { - Log(r *nflogpb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64) error + Log(r *nflogpb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64, expiry time.Duration) error Query(params ...nflog.QueryParam) ([]*nflogpb.Entry, error) } @@ -330,9 +330,9 @@ func (pb *PipelineBuilder) New( ms := NewGossipSettleStage(peer) is := NewMuteStage(inhibitor) - ss := NewMuteStage(silencer) - tms := NewTimeMuteStage(times) tas := NewTimeActiveStage(times) + tms := NewTimeMuteStage(times) + ss := NewMuteStage(silencer) for name := range receivers { st := createReceiverStage(name, receivers[name], wait, notificationLog, pb.metrics) @@ -785,7 +785,13 @@ func (n SetNotifiesStage) Exec(ctx context.Context, l log.Logger, alerts ...*typ return ctx, nil, errors.New("resolved alerts missing") } - return ctx, alerts, n.nflog.Log(n.recv, gkey, firing, resolved) + repeat, ok := RepeatInterval(ctx) + if !ok { + return ctx, nil, errors.New("repeat interval missing") + } + expiry := 2 * repeat + + return ctx, alerts, n.nflog.Log(n.recv, gkey, firing, resolved, expiry) } type timeStage struct { diff --git a/vendor/github.com/prometheus/alertmanager/notify/opsgenie/opsgenie.go b/vendor/github.com/prometheus/alertmanager/notify/opsgenie/opsgenie.go index d2b4c3bfe6e..426734f2756 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/opsgenie/opsgenie.go +++ b/vendor/github.com/prometheus/alertmanager/notify/opsgenie/opsgenie.go @@ -18,8 +18,8 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/http" + "os" "strings" "github.com/go-kit/log" @@ -34,6 +34,9 @@ import ( "github.com/prometheus/alertmanager/types" ) +// https://docs.opsgenie.com/docs/alert-api - 130 characters meaning runes. +const maxMessageLenRunes = 130 + // Notifier implements a Notifier for OpsGenie notifications. type Notifier struct { conf *config.OpsGenieConfig @@ -114,7 +117,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) } // Like Split but filter out empty strings. -func safeSplit(s string, sep string) []string { +func safeSplit(s, sep string) []string { a := strings.Split(strings.TrimSpace(s), sep) b := a[:0] for _, x := range a { @@ -160,7 +163,7 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h q := resolvedEndpointURL.Query() q.Set("identifierType", "alias") resolvedEndpointURL.RawQuery = q.Encode() - var msg = &opsGenieCloseMessage{Source: tmpl(n.conf.Source)} + msg := &opsGenieCloseMessage{Source: tmpl(n.conf.Source)} var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(msg); err != nil { return nil, false, err @@ -171,9 +174,9 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h } requests = append(requests, req.WithContext(ctx)) default: - message, truncated := notify.Truncate(tmpl(n.conf.Message), 130) + message, truncated := notify.TruncateInRunes(tmpl(n.conf.Message), maxMessageLenRunes) if truncated { - level.Debug(n.logger).Log("msg", "truncated message", "truncated_message", message, "alert", key) + level.Warn(n.logger).Log("msg", "Truncated message", "alert", key, "max_runes", maxMessageLenRunes) } createEndpointURL := n.conf.APIURL.Copy() @@ -209,7 +212,7 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h responders = append(responders, responder) } - var msg = &opsGenieCreateMessage{ + msg := &opsGenieCreateMessage{ Alias: alias, Message: message, Description: tmpl(n.conf.Description), @@ -233,11 +236,11 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h requests = append(requests, req.WithContext(ctx)) if n.conf.UpdateAlerts { - updateMessageEndpointUrl := n.conf.APIURL.Copy() - updateMessageEndpointUrl.Path += fmt.Sprintf("v2/alerts/%s/message", alias) - q := updateMessageEndpointUrl.Query() + updateMessageEndpointURL := n.conf.APIURL.Copy() + updateMessageEndpointURL.Path += fmt.Sprintf("v2/alerts/%s/message", alias) + q := updateMessageEndpointURL.Query() q.Set("identifierType", "alias") - updateMessageEndpointUrl.RawQuery = q.Encode() + updateMessageEndpointURL.RawQuery = q.Encode() updateMsgMsg := &opsGenieUpdateMessageMessage{ Message: msg.Message, } @@ -245,7 +248,7 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h if err := json.NewEncoder(&updateMessageBuf).Encode(updateMsgMsg); err != nil { return nil, false, err } - req, err := http.NewRequest("PUT", updateMessageEndpointUrl.String(), &updateMessageBuf) + req, err := http.NewRequest("PUT", updateMessageEndpointURL.String(), &updateMessageBuf) if err != nil { return nil, true, err } @@ -276,7 +279,7 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h if n.conf.APIKey != "" { apiKey = tmpl(string(n.conf.APIKey)) } else { - content, err := ioutil.ReadFile(n.conf.APIKeyFile) + content, err := os.ReadFile(n.conf.APIKeyFile) if err != nil { return nil, false, errors.Wrap(err, "read key_file error") } diff --git a/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go b/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go index 6c6465f5445..b6b816e039a 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go +++ b/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "net/http" + "os" "strings" "github.com/alecthomas/units" @@ -35,7 +36,13 @@ import ( "github.com/prometheus/alertmanager/types" ) -const maxEventSize int = 512000 +const ( + maxEventSize int = 512000 + // https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTc4-send-a-v1-event - 1024 characters or runes. + maxV1DescriptionLenRunes = 1024 + // https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTgx-send-an-alert-event - 1024 characters or runes. + maxV2SummaryLenRunes = 1024 +) // Notifier implements a Notifier for PagerDuty notifications. type Notifier struct { @@ -54,7 +61,7 @@ func New(c *config.PagerdutyConfig, t *template.Template, l log.Logger, httpOpts return nil, err } n := &Notifier{conf: c, tmpl: t, logger: l, client: client} - if c.ServiceKey != "" { + if c.ServiceKey != "" || c.ServiceKeyFile != "" { n.apiV1 = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" // Retrying can solve the issue on 403 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/trigger-events @@ -148,13 +155,22 @@ func (n *Notifier) notifyV1( var tmplErr error tmpl := notify.TmplText(n.tmpl, data, &tmplErr) - description, truncated := notify.Truncate(tmpl(n.conf.Description), 1024) + description, truncated := notify.TruncateInRunes(tmpl(n.conf.Description), maxV1DescriptionLenRunes) if truncated { - level.Debug(n.logger).Log("msg", "Truncated description", "description", description, "key", key) + level.Warn(n.logger).Log("msg", "Truncated description", "key", key, "max_runes", maxV1DescriptionLenRunes) + } + + serviceKey := string(n.conf.ServiceKey) + if serviceKey == "" { + content, fileErr := os.ReadFile(n.conf.ServiceKeyFile) + if fileErr != nil { + return false, errors.Wrap(fileErr, "failed to read service key from file") + } + serviceKey = strings.TrimSpace(string(content)) } msg := &pagerDutyMessage{ - ServiceKey: tmpl(string(n.conf.ServiceKey)), + ServiceKey: tmpl(serviceKey), EventType: eventType, IncidentKey: key.Hash(), Description: description, @@ -204,22 +220,31 @@ func (n *Notifier) notifyV2( n.conf.Severity = "error" } - summary, truncated := notify.Truncate(tmpl(n.conf.Description), 1024) + summary, truncated := notify.TruncateInRunes(tmpl(n.conf.Description), maxV2SummaryLenRunes) if truncated { - level.Debug(n.logger).Log("msg", "Truncated summary", "summary", summary, "key", key) + level.Warn(n.logger).Log("msg", "Truncated summary", "key", key, "max_runes", maxV2SummaryLenRunes) + } + + routingKey := string(n.conf.RoutingKey) + if routingKey == "" { + content, fileErr := os.ReadFile(n.conf.RoutingKeyFile) + if fileErr != nil { + return false, errors.Wrap(fileErr, "failed to read routing key from file") + } + routingKey = strings.TrimSpace(string(content)) } msg := &pagerDutyMessage{ Client: tmpl(n.conf.Client), ClientURL: tmpl(n.conf.ClientURL), - RoutingKey: tmpl(string(n.conf.RoutingKey)), + RoutingKey: tmpl(routingKey), EventAction: eventType, DedupKey: key.Hash(), Images: make([]pagerDutyImage, 0, len(n.conf.Images)), Links: make([]pagerDutyLink, 0, len(n.conf.Links)), Payload: &pagerDutyPayload{ Summary: summary, - Source: tmpl(n.conf.Client), + Source: tmpl(n.conf.Source), Severity: tmpl(n.conf.Severity), CustomDetails: details, Class: tmpl(n.conf.Class), diff --git a/vendor/github.com/prometheus/alertmanager/notify/pushover/pushover.go b/vendor/github.com/prometheus/alertmanager/notify/pushover/pushover.go index eae0af00755..c5fc287a7b9 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/pushover/pushover.go +++ b/vendor/github.com/prometheus/alertmanager/notify/pushover/pushover.go @@ -31,6 +31,15 @@ import ( "github.com/prometheus/alertmanager/types" ) +const ( + // https://pushover.net/api#limits - 250 characters or runes. + maxTitleLenRunes = 250 + // https://pushover.net/api#limits - 1024 characters or runes. + maxMessageLenRunes = 1024 + // https://pushover.net/api#limits - 512 characters or runes. + maxURLLenRunes = 512 +) + // Notifier implements a Notifier for Pushover notifications. type Notifier struct { conf *config.PushoverConfig @@ -78,9 +87,9 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) parameters.Add("token", tmpl(string(n.conf.Token))) parameters.Add("user", tmpl(string(n.conf.UserKey))) - title, truncated := notify.Truncate(tmpl(n.conf.Title), 250) + title, truncated := notify.TruncateInRunes(tmpl(n.conf.Title), maxTitleLenRunes) if truncated { - level.Debug(n.logger).Log("msg", "Truncated title", "truncated_title", title, "incident", key) + level.Warn(n.logger).Log("msg", "Truncated title", "incident", key, "max_runes", maxTitleLenRunes) } parameters.Add("title", title) @@ -91,9 +100,9 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) message = tmpl(n.conf.Message) } - message, truncated = notify.Truncate(message, 1024) + message, truncated = notify.TruncateInRunes(message, maxMessageLenRunes) if truncated { - level.Debug(n.logger).Log("msg", "Truncated message", "truncated_message", message, "incident", key) + level.Warn(n.logger).Log("msg", "Truncated message", "incident", key, "max_runes", maxMessageLenRunes) } message = strings.TrimSpace(message) if message == "" { @@ -102,9 +111,9 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) } parameters.Add("message", message) - supplementaryURL, truncated := notify.Truncate(tmpl(n.conf.URL), 512) + supplementaryURL, truncated := notify.TruncateInRunes(tmpl(n.conf.URL), maxURLLenRunes) if truncated { - level.Debug(n.logger).Log("msg", "Truncated URL", "truncated_url", supplementaryURL, "incident", key) + level.Warn(n.logger).Log("msg", "Truncated URL", "incident", key, "max_runes", maxURLLenRunes) } parameters.Add("url", supplementaryURL) parameters.Add("url_title", tmpl(n.conf.URLTitle)) @@ -130,5 +139,5 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) } defer notify.Drain(resp) - return n.retrier.Check(resp.StatusCode, nil) + return n.retrier.Check(resp.StatusCode, resp.Body) } diff --git a/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go b/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go index 5e8d515b09f..22bb30932e1 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go +++ b/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go @@ -18,13 +18,13 @@ import ( "context" "encoding/json" "fmt" - "github.com/go-kit/log/level" - "io/ioutil" "net/http" - - "github.com/pkg/errors" + "os" + "strings" "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" commoncfg "github.com/prometheus/common/config" "github.com/prometheus/alertmanager/config" @@ -33,6 +33,9 @@ import ( "github.com/prometheus/alertmanager/types" ) +// https://api.slack.com/reference/messaging/attachments#legacy_fields - 1024, no units given, assuming runes or characters. +const maxTitleLenRunes = 1024 + // Notifier implements a Notifier for Slack notifications. type Notifier struct { conf *config.SlackConfig @@ -99,13 +102,14 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) } else { markdownIn = n.conf.MrkdwnIn } - title, truncated := notify.Truncate(tmplText(n.conf.Title), 1024) + + title, truncated := notify.TruncateInRunes(tmplText(n.conf.Title), maxTitleLenRunes) if truncated { key, err := notify.ExtractGroupKey(ctx) if err != nil { return false, err } - level.Debug(n.logger).Log("msg", "Truncated title", "text", title, "key", key) + level.Warn(n.logger).Log("msg", "Truncated title", "key", key, "max_runes", maxTitleLenRunes) } att := &attachment{ Title: title, @@ -121,9 +125,9 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) MrkdwnIn: markdownIn, } - var numFields = len(n.conf.Fields) + numFields := len(n.conf.Fields) if numFields > 0 { - var fields = make([]config.SlackField, numFields) + fields := make([]config.SlackField, numFields) for index, field := range n.conf.Fields { // Check if short was defined for the field otherwise fallback to the global setting var short bool @@ -143,9 +147,9 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) att.Fields = fields } - var numActions = len(n.conf.Actions) + numActions := len(n.conf.Actions) if numActions > 0 { - var actions = make([]config.SlackAction, numActions) + actions := make([]config.SlackAction, numActions) for index, action := range n.conf.Actions { slackAction := config.SlackAction{ Type: tmplText(action.Type), @@ -191,11 +195,11 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) if n.conf.APIURL != nil { u = n.conf.APIURL.String() } else { - content, err := ioutil.ReadFile(n.conf.APIURLFile) + content, err := os.ReadFile(n.conf.APIURLFile) if err != nil { return false, err } - u = string(content) + u = strings.TrimSpace(string(content)) } resp, err := notify.PostJSON(ctx, n.client, u, &buf) diff --git a/vendor/github.com/prometheus/alertmanager/notify/sns/sns.go b/vendor/github.com/prometheus/alertmanager/notify/sns/sns.go index 104c4a4566c..1dfd42b4a5f 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/sns/sns.go +++ b/vendor/github.com/prometheus/alertmanager/notify/sns/sns.go @@ -71,9 +71,8 @@ func (n *Notifier) Notify(ctx context.Context, alert ...*types.Alert) (bool, err if err != nil { if e, ok := err.(awserr.RequestFailure); ok { return n.retrier.Check(e.StatusCode(), strings.NewReader(e.Message())) - } else { - return true, err } + return true, err } publishInput, err := n.createPublishInput(ctx, tmpl) @@ -85,9 +84,8 @@ func (n *Notifier) Notify(ctx context.Context, alert ...*types.Alert) (bool, err if err != nil { if e, ok := err.(awserr.RequestFailure); ok { return n.retrier.Check(e.StatusCode(), strings.NewReader(e.Message())) - } else { - return true, err } + return true, err } level.Debug(n.logger).Log("msg", "SNS message successfully published", "message_id", publishOutput.MessageId, "sequence number", publishOutput.SequenceNumber) @@ -96,7 +94,7 @@ func (n *Notifier) Notify(ctx context.Context, alert ...*types.Alert) (bool, err } func (n *Notifier) createSNSClient(tmpl func(string) string) (*sns.SNS, error) { - var creds *credentials.Credentials = nil + var creds *credentials.Credentials // If there are provided sigV4 credentials we want to use those to create a session. if n.conf.Sigv4.AccessKey != "" && n.conf.Sigv4.SecretKey != "" { creds = credentials.NewStaticCredentials(n.conf.Sigv4.AccessKey, string(n.conf.Sigv4.SecretKey), "") diff --git a/vendor/github.com/prometheus/alertmanager/notify/util.go b/vendor/github.com/prometheus/alertmanager/notify/util.go index 3d0a4a2419f..6b71ac7b29e 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/util.go +++ b/vendor/github.com/prometheus/alertmanager/notify/util.go @@ -18,19 +18,22 @@ import ( "crypto/sha256" "fmt" "io" - "io/ioutil" "net/http" "net/url" + "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/common/version" "github.com/prometheus/alertmanager/template" "github.com/prometheus/alertmanager/types" - "github.com/prometheus/common/version" ) +// truncationMarker is the character used to represent a truncation. +const truncationMarker = "…" + // UserAgentHeader is the default User-Agent for notification requests var UserAgentHeader = fmt.Sprintf("Alertmanager/%s", version.Version) @@ -59,11 +62,11 @@ func PostText(ctx context.Context, client *http.Client, url string, body io.Read return post(ctx, client, url, "text/plain", body) } -func post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { +func post(ctx context.Context, client *http.Client, url, bodyType string, body io.Reader) (*http.Response, error) { return request(ctx, client, http.MethodPost, url, bodyType, body) } -func request(ctx context.Context, client *http.Client, method string, url string, bodyType string, body io.Reader) (*http.Response, error) { +func request(ctx context.Context, client *http.Client, method, url, bodyType string, body io.Reader) (*http.Response, error) { req, err := http.NewRequest(method, url, body) if err != nil { return nil, err @@ -78,20 +81,52 @@ func request(ctx context.Context, client *http.Client, method string, url string // Drain consumes and closes the response's body to make sure that the // HTTP client can reuse existing connections. func Drain(r *http.Response) { - io.Copy(ioutil.Discard, r.Body) + io.Copy(io.Discard, r.Body) r.Body.Close() } -// Truncate truncates a string to fit the given size. -func Truncate(s string, n int) (string, bool) { +// TruncateInRunes truncates a string to fit the given size in Runes. +func TruncateInRunes(s string, n int) (string, bool) { r := []rune(s) if len(r) <= n { return s, false } + if n <= 3 { return string(r[:n]), true } - return string(r[:n-3]) + "...", true + + return string(r[:n-1]) + truncationMarker, true +} + +// TruncateInBytes truncates a string to fit the given size in Bytes. +func TruncateInBytes(s string, n int) (string, bool) { + // First, measure the string the w/o a to-rune conversion. + if len(s) <= n { + return s, false + } + + // The truncationMarker itself is 3 bytes, we can't return any part of the string when it's less than 3. + if n <= 3 { + switch n { + case 3: + return truncationMarker, true + default: + return strings.Repeat(".", n), true + } + } + + // Now, to ensure we don't butcher the string we need to remove using runes. + r := []rune(s) + truncationTarget := n - 3 + + // Next, let's truncate the runes to the lower possible number. + truncatedRunes := r[:truncationTarget] + for len(string(truncatedRunes)) > truncationTarget { + truncatedRunes = r[:len(truncatedRunes)-1] + } + + return string(truncatedRunes) + truncationMarker, true } // TmplText is using monadic error handling in order to make string templating @@ -161,7 +196,7 @@ func readAll(r io.Reader) string { if r == nil { return "" } - bs, err := ioutil.ReadAll(r) + bs, err := io.ReadAll(r) if err != nil { return "" } diff --git a/vendor/github.com/prometheus/alertmanager/notify/victorops/victorops.go b/vendor/github.com/prometheus/alertmanager/notify/victorops/victorops.go index 3fdbf41dc17..02c9362190e 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/victorops/victorops.go +++ b/vendor/github.com/prometheus/alertmanager/notify/victorops/victorops.go @@ -19,9 +19,12 @@ import ( "encoding/json" "fmt" "net/http" + "os" + "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/pkg/errors" commoncfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -31,6 +34,9 @@ import ( "github.com/prometheus/alertmanager/types" ) +// https://help.victorops.com/knowledge-base/incident-fields-glossary/ - 20480 characters. +const maxMessageLenRunes = 20480 + // Notifier implements a Notifier for VictorOps notifications. type Notifier struct { conf *config.VictorOpsConfig @@ -64,14 +70,25 @@ const ( // Notify implements the Notifier interface. func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { - var err error var ( data = notify.GetTemplateData(ctx, n.tmpl, as, n.logger) tmpl = notify.TmplText(n.tmpl, data, &err) apiURL = n.conf.APIURL.Copy() ) - apiURL.Path += fmt.Sprintf("%s/%s", n.conf.APIKey, tmpl(n.conf.RoutingKey)) + + var apiKey string + if n.conf.APIKey != "" { + apiKey = string(n.conf.APIKey) + } else { + content, fileErr := os.ReadFile(n.conf.APIKeyFile) + if fileErr != nil { + return false, errors.Wrap(fileErr, "failed to read API key from file") + } + apiKey = strings.TrimSpace(string(content)) + } + + apiURL.Path += fmt.Sprintf("%s/%s", apiKey, tmpl(n.conf.RoutingKey)) if err != nil { return false, fmt.Errorf("templating error: %s", err) } @@ -87,7 +104,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) } defer notify.Drain(resp) - return n.retrier.Check(resp.StatusCode, nil) + return n.retrier.Check(resp.StatusCode, resp.Body) } // Create the JSON payload to be sent to the VictorOps API. @@ -120,9 +137,9 @@ func (n *Notifier) createVictorOpsPayload(ctx context.Context, as ...*types.Aler messageType = victorOpsEventResolve } - stateMessage, truncated := notify.Truncate(stateMessage, 20480) + stateMessage, truncated := notify.TruncateInRunes(stateMessage, maxMessageLenRunes) if truncated { - level.Debug(n.logger).Log("msg", "truncated stateMessage", "truncated_state_message", stateMessage, "incident", key) + level.Warn(n.logger).Log("msg", "Truncated state_message", "incident", key, "max_runes", maxMessageLenRunes) } msg := map[string]string{ diff --git a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go index 00c525024bc..463a3416f5d 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go +++ b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go @@ -17,6 +17,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "net/http" @@ -53,8 +54,8 @@ func New(conf *config.WebhookConfig, t *template.Template, l log.Logger, httpOpt // Webhooks are assumed to respond with 2xx response codes on a successful // request and 5xx response codes are assumed to be recoverable. retrier: ¬ify.Retrier{ - CustomDetailsFunc: func(int, io.Reader) string { - return conf.URL.String() + CustomDetailsFunc: func(_ int, body io.Reader) string { + return errDetails(body, conf.URL.String()) }, }, }, nil @@ -104,7 +105,18 @@ func (n *Notifier) Notify(ctx context.Context, alerts ...*types.Alert) (bool, er if err != nil { return true, err } - notify.Drain(resp) + defer notify.Drain(resp) - return n.retrier.Check(resp.StatusCode, nil) + return n.retrier.Check(resp.StatusCode, resp.Body) +} + +func errDetails(body io.Reader, url string) string { + if body == nil { + return url + } + bs, err := io.ReadAll(body) + if err != nil { + return url + } + return fmt.Sprintf("%s: %s", url, string(bs)) } diff --git a/vendor/github.com/prometheus/alertmanager/notify/wechat/wechat.go b/vendor/github.com/prometheus/alertmanager/notify/wechat/wechat.go index 762d467ad72..a926b2e8fb5 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/wechat/wechat.go +++ b/vendor/github.com/prometheus/alertmanager/notify/wechat/wechat.go @@ -18,7 +18,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -171,7 +171,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) return true, fmt.Errorf("unexpected status code %v", resp.StatusCode) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return true, err } diff --git a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go index 0a242d506f3..a125d59d8a8 100644 --- a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go +++ b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go @@ -45,11 +45,12 @@ var ( // this comma and whitespace will be trimmed. // // Examples for valid input strings: -// {foo = "bar", dings != "bums", } -// foo=bar,dings!=bums -// foo=bar, dings!=bums -// {quote="She said: \"Hi, ladies! That's gender-neutral…\""} -// statuscode=~"5.." +// +// {foo = "bar", dings != "bums", } +// foo=bar,dings!=bums +// foo=bar, dings!=bums +// {quote="She said: \"Hi, ladies! That's gender-neutral…\""} +// statuscode=~"5.." // // See ParseMatcher for details on how an individual Matcher is parsed. func ParseMatchers(s string) ([]*Matcher, error) { @@ -127,7 +128,7 @@ func ParseMatcher(s string) (_ *Matcher, err error) { expectTrailingQuote bool ) - if rawValue[0] == '"' { + if strings.HasPrefix(rawValue, "\"") { rawValue = strings.TrimPrefix(rawValue, "\"") expectTrailingQuote = true } diff --git a/vendor/github.com/prometheus/alertmanager/provider/mem/mem.go b/vendor/github.com/prometheus/alertmanager/provider/mem/mem.go index 9a67cceb9a5..0e39ba1d5fb 100644 --- a/vendor/github.com/prometheus/alertmanager/provider/mem/mem.go +++ b/vendor/github.com/prometheus/alertmanager/provider/mem/mem.go @@ -20,6 +20,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/alertmanager/provider" @@ -33,8 +34,10 @@ const alertChannelLength = 200 type Alerts struct { cancel context.CancelFunc + alerts *store.Alerts + marker types.Marker + mtx sync.Mutex - alerts *store.Alerts listeners map[int]listeningAlerts next int @@ -62,14 +65,34 @@ type listeningAlerts struct { done chan struct{} } +func (a *Alerts) registerMetrics(r prometheus.Registerer) { + newMemAlertByStatus := func(s types.AlertState) prometheus.GaugeFunc { + return prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Name: "alertmanager_alerts", + Help: "How many alerts by state.", + ConstLabels: prometheus.Labels{"state": string(s)}, + }, + func() float64 { + return float64(a.count(s)) + }, + ) + } + + r.MustRegister(newMemAlertByStatus(types.AlertStateActive)) + r.MustRegister(newMemAlertByStatus(types.AlertStateSuppressed)) + r.MustRegister(newMemAlertByStatus(types.AlertStateUnprocessed)) +} + // NewAlerts returns a new alert provider. -func NewAlerts(ctx context.Context, m types.Marker, intervalGC time.Duration, alertCallback AlertStoreCallback, l log.Logger) (*Alerts, error) { +func NewAlerts(ctx context.Context, m types.Marker, intervalGC time.Duration, alertCallback AlertStoreCallback, l log.Logger, r prometheus.Registerer) (*Alerts, error) { if alertCallback == nil { alertCallback = noopCallback{} } ctx, cancel := context.WithCancel(ctx) a := &Alerts{ + marker: m, alerts: store.NewAlerts(), cancel: cancel, listeners: map[int]listeningAlerts{}, @@ -98,6 +121,11 @@ func NewAlerts(ctx context.Context, m types.Marker, intervalGC time.Duration, al } a.mtx.Unlock() }) + + if r != nil { + a.registerMetrics(r) + } + go a.alerts.Run(ctx, intervalGC) return a, nil @@ -212,6 +240,25 @@ func (a *Alerts) Put(alerts ...*types.Alert) error { return nil } +// count returns the number of non-resolved alerts we currently have stored filtered by the provided state. +func (a *Alerts) count(state types.AlertState) int { + var count int + for _, alert := range a.alerts.List() { + if alert.Resolved() { + continue + } + + status := a.marker.Status(alert.Fingerprint()) + if status.State != state { + continue + } + + count++ + } + + return count +} + type noopCallback struct{} func (n noopCallback) PreStore(_ *types.Alert, _ bool) error { return nil } diff --git a/vendor/github.com/prometheus/alertmanager/provider/provider.go b/vendor/github.com/prometheus/alertmanager/provider/provider.go index 91b5a741662..5fcce203606 100644 --- a/vendor/github.com/prometheus/alertmanager/provider/provider.go +++ b/vendor/github.com/prometheus/alertmanager/provider/provider.go @@ -21,10 +21,8 @@ import ( "github.com/prometheus/alertmanager/types" ) -var ( - // ErrNotFound is returned if a provider cannot find a requested item. - ErrNotFound = fmt.Errorf("item not found") -) +// ErrNotFound is returned if a provider cannot find a requested item. +var ErrNotFound = fmt.Errorf("item not found") // Iterator provides the functions common to all iterators. To be useful, a // specific iterator interface (e.g. AlertIterator) has to be implemented that diff --git a/vendor/github.com/prometheus/alertmanager/silence/silence.go b/vendor/github.com/prometheus/alertmanager/silence/silence.go index 9beaab1d274..d8837d28afe 100644 --- a/vendor/github.com/prometheus/alertmanager/silence/silence.go +++ b/vendor/github.com/prometheus/alertmanager/silence/silence.go @@ -27,17 +27,19 @@ import ( "sync" "time" + "github.com/benbjohnson/clock" "github.com/go-kit/log" "github.com/go-kit/log/level" uuid "github.com/gofrs/uuid" "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/alertmanager/cluster" "github.com/prometheus/alertmanager/pkg/labels" pb "github.com/prometheus/alertmanager/silence/silencepb" "github.com/prometheus/alertmanager/types" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" ) // ErrNotFound is returned if a silence was not found. @@ -46,10 +48,6 @@ var ErrNotFound = fmt.Errorf("silence not found") // ErrInvalidState is returned if the state isn't valid. var ErrInvalidState = fmt.Errorf("invalid state") -func utcNow() time.Time { - return time.Now().UTC() -} - type matcherCache map[*pb.Silence]labels.Matchers // Get retrieves the matchers for a given silence. If it is a missed cache @@ -153,7 +151,7 @@ func (s *Silencer) Mutes(lset model.LabelSet) bool { } if len(allSils) == 0 { // Easy case, neither active nor pending silences anymore. - s.marker.SetSilenced(fp, newVersion, nil, nil) + s.marker.SetActiveOrSilenced(fp, newVersion, nil, nil) return false } // It is still possible that nothing has changed, but finding out is not @@ -161,7 +159,7 @@ func (s *Silencer) Mutes(lset model.LabelSet) bool { // result. So let's do it in any case. Note that we cannot reuse the // current ID slices for concurrency reasons. activeIDs, pendingIDs = nil, nil - now := s.silences.now() + now := s.silences.nowUTC() for _, sil := range allSils { switch getState(sil, now) { case types.SilenceStatePending: @@ -182,16 +180,17 @@ func (s *Silencer) Mutes(lset model.LabelSet) bool { sort.Strings(activeIDs) sort.Strings(pendingIDs) - s.marker.SetSilenced(fp, newVersion, activeIDs, pendingIDs) + s.marker.SetActiveOrSilenced(fp, newVersion, activeIDs, pendingIDs) return len(activeIDs) > 0 } // Silences holds a silence state that can be modified, queried, and snapshot. type Silences struct { + clock clock.Clock + logger log.Logger metrics *metrics - now func() time.Time retention time.Duration mtx sync.RWMutex @@ -331,10 +330,10 @@ func New(o Options) (*Silences, error) { } } s := &Silences{ + clock: clock.New(), mc: matcherCache{}, logger: log.NewNopLogger(), retention: o.Retention, - now: utcNow, broadcast: func([]byte) {}, st: state{}, } @@ -351,12 +350,16 @@ func New(o Options) (*Silences, error) { return s, nil } +func (s *Silences) nowUTC() time.Time { + return s.clock.Now().UTC() +} + // Maintenance garbage collects the silence state at the given interval. If the snapshot // file is set, a snapshot is written to it afterwards. // Terminates on receiving from stopc. // If not nil, the last argument is an override for what to do as part of the maintenance - for advanced usage. func (s *Silences) Maintenance(interval time.Duration, snapf string, stopc <-chan struct{}, override MaintenanceFunc) { - t := time.NewTicker(interval) + t := s.clock.Ticker(interval) defer t.Stop() var doMaintenance MaintenanceFunc @@ -384,10 +387,10 @@ func (s *Silences) Maintenance(interval time.Duration, snapf string, stopc <-cha } runMaintenance := func(do MaintenanceFunc) error { - start := s.now() + start := s.nowUTC() level.Debug(s.logger).Log("msg", "Running maintenance") size, err := do() - level.Debug(s.logger).Log("msg", "Maintenance done", "duration", s.now().Sub(start), "size", size) + level.Debug(s.logger).Log("msg", "Maintenance done", "duration", s.clock.Since(start), "size", size) s.metrics.snapshotSize.Set(float64(size)) return err } @@ -418,7 +421,7 @@ func (s *Silences) GC() (int, error) { start := time.Now() defer func() { s.metrics.gcDuration.Observe(time.Since(start).Seconds()) }() - now := s.now() + now := s.nowUTC() var n int s.mtx.Lock() @@ -546,7 +549,7 @@ func (s *Silences) Set(sil *pb.Silence) (string, error) { s.mtx.Lock() defer s.mtx.Unlock() - now := s.now() + now := s.nowUTC() prev, ok := s.getSilence(sil.Id) if sil.Id != "" && !ok { @@ -556,7 +559,7 @@ func (s *Silences) Set(sil *pb.Silence) (string, error) { if canUpdate(prev, sil, now) { return sil.Id, s.setSilence(sil, now) } - if getState(prev, s.now()) != types.SilenceStateExpired { + if getState(prev, s.nowUTC()) != types.SilenceStateExpired { // We cannot update the silence, expire the old one. if err := s.expire(prev.Id); err != nil { return "", errors.Wrap(err, "expire previous silence") @@ -620,7 +623,7 @@ func (s *Silences) expire(id string) error { return ErrNotFound } sil = cloneSilence(sil) - now := s.now() + now := s.nowUTC() switch getState(sil, now) { case types.SilenceStateExpired: @@ -726,7 +729,7 @@ func (s *Silences) Query(params ...QueryParam) ([]*pb.Silence, int, error) { return nil, s.Version(), err } } - sils, version, err := s.query(q, s.now()) + sils, version, err := s.query(q, s.nowUTC()) if err != nil { s.metrics.queryErrorsTotal.Inc() } @@ -849,7 +852,7 @@ func (s *Silences) Merge(b []byte) error { s.mtx.Lock() defer s.mtx.Unlock() - now := s.now() + now := s.nowUTC() for _, e := range st { if merged := s.st.merge(e, now); merged { diff --git a/vendor/github.com/prometheus/alertmanager/store/store.go b/vendor/github.com/prometheus/alertmanager/store/store.go index 488cb451615..83f1495a0fb 100644 --- a/vendor/github.com/prometheus/alertmanager/store/store.go +++ b/vendor/github.com/prometheus/alertmanager/store/store.go @@ -19,15 +19,14 @@ import ( "sync" "time" - "github.com/prometheus/alertmanager/types" "github.com/prometheus/common/model" -) -var ( - // ErrNotFound is returned if a Store cannot find the Alert. - ErrNotFound = errors.New("alert not found") + "github.com/prometheus/alertmanager/types" ) +// ErrNotFound is returned if a Store cannot find the Alert. +var ErrNotFound = errors.New("alert not found") + // Alerts provides lock-coordinated to an in-memory map of alerts, keyed by // their fingerprint. Resolved alerts are removed from the map based on // gcInterval. An optional callback can be set which receives a slice of all diff --git a/vendor/github.com/prometheus/alertmanager/template/default.tmpl b/vendor/github.com/prometheus/alertmanager/template/default.tmpl index 4ce5de708df..82626b1a455 100644 --- a/vendor/github.com/prometheus/alertmanager/template/default.tmpl +++ b/vendor/github.com/prometheus/alertmanager/template/default.tmpl @@ -111,4 +111,27 @@ Alerts Firing: Alerts Resolved: {{ template "__text_alert_list" .Alerts.Resolved }} {{ end }} -{{ end }} \ No newline at end of file +{{ end }} + +{{ define "discord.default.title" }}{{ template "__subject" . }}{{ end }} +{{ define "discord.default.message" }} +{{ if gt (len .Alerts.Firing) 0 }} +Alerts Firing: +{{ template "__text_alert_list" .Alerts.Firing }} +{{ end }} +{{ if gt (len .Alerts.Resolved) 0 }} +Alerts Resolved: +{{ template "__text_alert_list" .Alerts.Resolved }} +{{ end }} +{{ end }} + +{{ define "webex.default.message" }}{{ .CommonAnnotations.SortedPairs.Values | join " " }} +{{ if gt (len .Alerts.Firing) 0 }} +Alerts Firing: +{{ template "__text_alert_list" .Alerts.Firing }} +{{ end }} +{{ if gt (len .Alerts.Resolved) 0 }} +Alerts Resolved: +{{ template "__text_alert_list" .Alerts.Resolved }} +{{ end }} +{{ end }} diff --git a/vendor/github.com/prometheus/alertmanager/template/email.html b/vendor/github.com/prometheus/alertmanager/template/email.html index 1f15ab8b474..4bd42f95b8d 100644 --- a/vendor/github.com/prometheus/alertmanager/template/email.html +++ b/vendor/github.com/prometheus/alertmanager/template/email.html @@ -325,13 +325,17 @@ {{ if gt (len .Alerts.Firing) 0 }} + {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} + {{ .Name }}={{ .Value }} + {{ end }} + {{ else }} - {{ end }} {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} {{ .Name }}={{ .Value }} {{ end }} + {{ end }} diff --git a/vendor/github.com/prometheus/alertmanager/template/email.tmpl b/vendor/github.com/prometheus/alertmanager/template/email.tmpl index 63a76525ff5..7477a304c4a 100644 --- a/vendor/github.com/prometheus/alertmanager/template/email.tmpl +++ b/vendor/github.com/prometheus/alertmanager/template/email.tmpl @@ -90,13 +90,17 @@ h4 { {{ if gt (len .Alerts.Firing) 0 }} + {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} + {{ .Name }}={{ .Value }} + {{ end }} + {{ else }} - - {{ end }} + {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} {{ .Name }}={{ .Value }} {{ end }} + {{ end }} diff --git a/vendor/github.com/prometheus/alertmanager/template/template.go b/vendor/github.com/prometheus/alertmanager/template/template.go index 2c1b62db3fa..3882187f3ad 100644 --- a/vendor/github.com/prometheus/alertmanager/template/template.go +++ b/vendor/github.com/prometheus/alertmanager/template/template.go @@ -16,7 +16,7 @@ package template import ( "bytes" tmplhtml "html/template" - "io/ioutil" + "io" "net/url" "path" "path/filepath" @@ -27,6 +27,8 @@ import ( "time" "github.com/prometheus/common/model" + "golang.org/x/text/cases" + "golang.org/x/text/language" "github.com/prometheus/alertmanager/asset" "github.com/prometheus/alertmanager/types" @@ -59,7 +61,7 @@ func FromGlobs(paths ...string) (*Template, error) { return nil, err } defer f.Close() - b, err := ioutil.ReadAll(f) + b, err := io.ReadAll(f) if err != nil { return nil, err } @@ -132,7 +134,7 @@ type FuncMap map[string]interface{} var DefaultFuncs = FuncMap{ "toUpper": strings.ToUpper, "toLower": strings.ToLower, - "title": strings.Title, + "title": cases.Title(language.AmericanEnglish).String, // join is equal to strings.Join but inverts the argument order // for easier pipelining in templates. "join": func(sep string, s []string) string { diff --git a/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go b/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go index 24c4a4ea033..a5018aaef97 100644 --- a/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go +++ b/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go @@ -17,7 +17,9 @@ import ( "encoding/json" "errors" "fmt" + "os" "regexp" + "runtime" "strconv" "strings" "time" @@ -33,6 +35,7 @@ type TimeInterval struct { DaysOfMonth []DayOfMonthRange `yaml:"days_of_month,flow,omitempty" json:"days_of_month,omitempty"` Months []MonthRange `yaml:"months,flow,omitempty" json:"months,omitempty"` Years []YearRange `yaml:"years,flow,omitempty" json:"years,omitempty"` + Location *Location `yaml:"location,flow,omitempty" json:"location,omitempty"` } // TimeRange represents a range of minutes within a 1440 minute day, exclusive of the End minute. A day consists of 1440 minutes. @@ -68,6 +71,11 @@ type YearRange struct { InclusiveRange } +// A Location is a container for a time.Location, used for custom unmarshalling/validation logic. +type Location struct { + *time.Location +} + type yamlTimeRange struct { StartTime string `yaml:"start_time" json:"start_time"` EndTime string `yaml:"end_time" json:"end_time"` @@ -125,6 +133,7 @@ var daysOfWeek = map[string]int{ "friday": 5, "saturday": 6, } + var daysOfWeekInv = map[int]string{ 0: "sunday", 1: "monday", @@ -165,6 +174,34 @@ var monthsInv = map[int]string{ 12: "december", } +// UnmarshalYAML implements the Unmarshaller interface for Location. +func (tz *Location) UnmarshalYAML(unmarshal func(interface{}) error) error { + var str string + if err := unmarshal(&str); err != nil { + return err + } + + loc, err := time.LoadLocation(str) + if err != nil { + if runtime.GOOS == "windows" { + if zoneinfo := os.Getenv("ZONEINFO"); zoneinfo != "" { + return fmt.Errorf("%w (ZONEINFO=%q)", err, zoneinfo) + } + return fmt.Errorf("%w (on Windows platforms, you may have to pass the time zone database using the ZONEINFO environment variable, see https://pkg.go.dev/time#LoadLocation for details)", err) + } + return err + } + + *tz = Location{loc} + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Location. +// It delegates to the YAML unmarshaller as it can parse JSON and has validation logic. +func (tz *Location) UnmarshalJSON(in []byte) error { + return yaml.Unmarshal(in, tz) +} + // UnmarshalYAML implements the Unmarshaller interface for WeekdayRange. func (r *WeekdayRange) UnmarshalYAML(unmarshal func(interface{}) error) error { var str string @@ -316,7 +353,7 @@ func (r WeekdayRange) MarshalYAML() (interface{}, error) { } // MarshalText implements the econding.TextMarshaler interface for WeekdayRange. -// It converts the range into a colon-seperated string, or a single weekday if possible. +// It converts the range into a colon-separated string, or a single weekday if possible. // e.g. "monday:friday" or "saturday". func (r WeekdayRange) MarshalText() ([]byte, error) { beginStr, ok := daysOfWeekInv[r.Begin] @@ -362,6 +399,26 @@ func (tr TimeRange) MarshalJSON() (out []byte, err error) { return json.Marshal(yTr) } +// MarshalText implements the econding.TextMarshaler interface for Location. +// It marshals a Location back into a string that represents a time.Location. +func (tz Location) MarshalText() ([]byte, error) { + if tz.Location == nil { + return nil, fmt.Errorf("unable to convert nil location into string") + } + return []byte(tz.Location.String()), nil +} + +// MarshalYAML implements the yaml.Marshaler interface for Location. +func (tz Location) MarshalYAML() (interface{}, error) { + bytes, err := tz.MarshalText() + return string(bytes), err +} + +// MarshalJSON implements the json.Marshaler interface for Location. +func (tz Location) MarshalJSON() (out []byte, err error) { + return json.Marshal(tz.String()) +} + // MarshalText implements the encoding.TextMarshaler interface for InclusiveRange. // It converts the struct into a colon-separated string, or a single element if // appropriate. e.g. "monday:friday" or "monday" @@ -373,7 +430,7 @@ func (ir InclusiveRange) MarshalText() ([]byte, error) { return []byte(out), nil } -//MarshalYAML implements the yaml.Marshaler interface for InclusiveRange. +// MarshalYAML implements the yaml.Marshaler interface for InclusiveRange. func (ir InclusiveRange) MarshalYAML() (interface{}, error) { bytes, err := ir.MarshalText() return string(bytes), err @@ -382,8 +439,10 @@ func (ir InclusiveRange) MarshalYAML() (interface{}, error) { // TimeLayout specifies the layout to be used in time.Parse() calls for time intervals. const TimeLayout = "15:04" -var validTime string = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)" -var validTimeRE *regexp.Regexp = regexp.MustCompile(validTime) +var ( + validTime = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)" + validTimeRE = regexp.MustCompile(validTime) +) // Given a time, determines the number of days in the month that time occurs in. func daysInMonth(t time.Time) int { @@ -405,6 +464,9 @@ func clamp(n, min, max int) int { // ContainsTime returns true if the TimeInterval contains the given time, otherwise returns false. func (tp TimeInterval) ContainsTime(t time.Time) bool { + if tp.Location != nil { + t = t.In(tp.Location.Location) + } if tp.Times != nil { in := false for _, validMinutes := range tp.Times { diff --git a/vendor/github.com/prometheus/alertmanager/types/types.go b/vendor/github.com/prometheus/alertmanager/types/types.go index a412453d3a1..f3101f8234c 100644 --- a/vendor/github.com/prometheus/alertmanager/types/types.go +++ b/vendor/github.com/prometheus/alertmanager/types/types.go @@ -18,9 +18,10 @@ import ( "sync" "time" - "github.com/prometheus/alertmanager/pkg/labels" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + + "github.com/prometheus/alertmanager/pkg/labels" ) // AlertState is used as part of AlertStatus. @@ -52,18 +53,18 @@ type AlertStatus struct { // Marker helps to mark alerts as silenced and/or inhibited. // All methods are goroutine-safe. type Marker interface { - // SetSilenced replaces the previous SilencedBy by the provided IDs of + // SetActiveOrSilenced replaces the previous SilencedBy by the provided IDs of // active and pending silences, including the version number of the // silences state. The set of provided IDs is supposed to represent the // complete set of relevant silences. If no active silence IDs are provided and // InhibitedBy is already empty, it sets the provided alert to AlertStateActive. // Otherwise, it sets the provided alert to AlertStateSuppressed. - SetSilenced(alert model.Fingerprint, version int, activeSilenceIDs []string, pendingSilenceIDs []string) + SetActiveOrSilenced(alert model.Fingerprint, version int, activeSilenceIDs, pendingSilenceIDs []string) // SetInhibited replaces the previous InhibitedBy by the provided IDs of - // alerts. In contrast to SetSilenced, the set of provided IDs is not + // alerts. In contrast to SetActiveOrSilenced, the set of provided IDs is not // expected to represent the complete set of inhibiting alerts. (In // practice, this method is only called with one or zero IDs. However, - // this expectation might change in the future.) If no IDs are provided + // this expectation might change in the future. If no IDs are provided // and InhibitedBy is already empty, it sets the provided alert to // AlertStateActive. Otherwise, it sets the provided alert to // AlertStateSuppressed. @@ -85,7 +86,7 @@ type Marker interface { // result is based on. Unprocessed(model.Fingerprint) bool Active(model.Fingerprint) bool - Silenced(model.Fingerprint) (activeIDs []string, pendingIDs []string, version int, silenced bool) + Silenced(model.Fingerprint) (activeIDs, pendingIDs []string, version int, silenced bool) Inhibited(model.Fingerprint) ([]string, bool) } @@ -107,11 +108,11 @@ type memMarker struct { } func (m *memMarker) registerMetrics(r prometheus.Registerer) { - newAlertMetricByState := func(st AlertState) prometheus.GaugeFunc { + newMarkedAlertMetricByState := func(st AlertState) prometheus.GaugeFunc { return prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Name: "alertmanager_alerts", - Help: "How many alerts by state.", + Name: "alertmanager_marked_alerts", + Help: "How many alerts by state are currently marked in the Alertmanager regardless of their expiry.", ConstLabels: prometheus.Labels{"state": string(st)}, }, func() float64 { @@ -120,11 +121,13 @@ func (m *memMarker) registerMetrics(r prometheus.Registerer) { ) } - alertsActive := newAlertMetricByState(AlertStateActive) - alertsSuppressed := newAlertMetricByState(AlertStateSuppressed) + alertsActive := newMarkedAlertMetricByState(AlertStateActive) + alertsSuppressed := newMarkedAlertMetricByState(AlertStateSuppressed) + alertStateUnprocessed := newMarkedAlertMetricByState(AlertStateUnprocessed) r.MustRegister(alertsActive) r.MustRegister(alertsSuppressed) + r.MustRegister(alertStateUnprocessed) } // Count implements Marker. @@ -147,8 +150,8 @@ func (m *memMarker) Count(states ...AlertState) int { return count } -// SetSilenced implements Marker. -func (m *memMarker) SetSilenced(alert model.Fingerprint, version int, activeIDs []string, pendingIDs []string) { +// SetActiveOrSilenced implements Marker. +func (m *memMarker) SetActiveOrSilenced(alert model.Fingerprint, version int, activeIDs, pendingIDs []string) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,7 @@ func (m *memMarker) Inhibited(alert model.Fingerprint) ([]string, bool) { // Silenced returns whether the alert for the given Fingerprint is in the // Silenced state, any associated silence IDs, and the silences state version // the result is based on. -func (m *memMarker) Silenced(alert model.Fingerprint) (activeIDs []string, pendingIDs []string, version int, silenced bool) { +func (m *memMarker) Silenced(alert model.Fingerprint) (activeIDs, pendingIDs []string, version int, silenced bool) { s := m.Status(alert) return s.SilencedBy, s.pendingSilences, s.silencesVersion, s.State == AlertStateSuppressed && len(s.SilencedBy) > 0 diff --git a/vendor/github.com/prometheus/alertmanager/ui/web.go b/vendor/github.com/prometheus/alertmanager/ui/web.go index fa1460d42f8..4d41a307329 100644 --- a/vendor/github.com/prometheus/alertmanager/ui/web.go +++ b/vendor/github.com/prometheus/alertmanager/ui/web.go @@ -76,10 +76,16 @@ func Register(r *route.Router, reloadCh chan<- chan error, logger log.Logger) { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, "OK") })) + r.Head("/-/healthy", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) r.Get("/-/ready", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, "OK") })) + r.Head("/-/ready", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) r.Get("/debug/*subpath", http.DefaultServeMux.ServeHTTP) r.Post("/debug/*subpath", http.DefaultServeMux.ServeHTTP) diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index b47347e4b51..3ba7f99cc15 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -21,10 +21,11 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" "net" "net/http" "net/url" + "os" + "path/filepath" "strings" "sync" "time" @@ -80,7 +81,7 @@ func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { } func (tv *TLSVersion) MarshalYAML() (interface{}, error) { - if tv != nil || *tv == 0 { + if tv == nil || *tv == 0 { return []byte("null"), nil } for s, v := range TLSVersions { @@ -106,7 +107,7 @@ func (tv *TLSVersion) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaler interface for TLSVersion. func (tv *TLSVersion) MarshalJSON() ([]byte, error) { - if tv != nil || *tv == 0 { + if tv == nil || *tv == 0 { return []byte("null"), nil } for s, v := range TLSVersions { @@ -117,6 +118,19 @@ func (tv *TLSVersion) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("unknown TLS version: %d", tv) } +// String implements the fmt.Stringer interface for TLSVersion. +func (tv *TLSVersion) String() string { + if tv == nil || *tv == 0 { + return "" + } + for s, v := range TLSVersions { + if *tv == v { + return s + } + } + return fmt.Sprintf("%d", tv) +} + // BasicAuth contains basic HTTP authentication credentials. type BasicAuth struct { Username string `yaml:"username" json:"username"` @@ -235,6 +249,30 @@ func (a *OAuth2) SetDirectory(dir string) { a.TLSConfig.SetDirectory(dir) } +// LoadHTTPConfig parses the YAML input s into a HTTPClientConfig. +func LoadHTTPConfig(s string) (*HTTPClientConfig, error) { + cfg := &HTTPClientConfig{} + err := yaml.UnmarshalStrict([]byte(s), cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// LoadHTTPConfigFile parses the given YAML file into a HTTPClientConfig. +func LoadHTTPConfigFile(filename string) (*HTTPClientConfig, []byte, error) { + content, err := os.ReadFile(filename) + if err != nil { + return nil, nil, err + } + cfg, err := LoadHTTPConfig(string(content)) + if err != nil { + return nil, nil, err + } + cfg.SetDirectory(filepath.Dir(filepath.Dir(filename))) + return cfg, content, nil +} + // HTTPClientConfig configures an HTTP client. type HTTPClientConfig struct { // The HTTP basic authentication credentials for the targets. @@ -527,7 +565,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT return newRT(tlsConfig) } - return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT) + return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, cfg.TLSConfig.CertFile, cfg.TLSConfig.KeyFile, newRT) } type authorizationCredentialsRoundTripper struct { @@ -571,7 +609,7 @@ func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile s func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { if len(req.Header.Get("Authorization")) == 0 { - b, err := ioutil.ReadFile(rt.authCredentialsFile) + b, err := os.ReadFile(rt.authCredentialsFile) if err != nil { return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err) } @@ -609,7 +647,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } req = cloneRequest(req) if rt.passwordFile != "" { - bs, err := ioutil.ReadFile(rt.passwordFile) + bs, err := os.ReadFile(rt.passwordFile) if err != nil { return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err) } @@ -651,7 +689,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro ) if rt.config.ClientSecretFile != "" { - data, err := ioutil.ReadFile(rt.config.ClientSecretFile) + data, err := os.ReadFile(rt.config.ClientSecretFile) if err != nil { return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err) } @@ -696,7 +734,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro if len(rt.config.TLSConfig.CAFile) == 0 { t, _ = tlsTransport(tlsConfig) } else { - t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, tlsTransport) + t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, rt.config.TLSConfig.CertFile, rt.config.TLSConfig.KeyFile, tlsTransport) if err != nil { return nil, err } @@ -766,6 +804,13 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { tlsConfig := &tls.Config{ InsecureSkipVerify: cfg.InsecureSkipVerify, MinVersion: uint16(cfg.MinVersion), + MaxVersion: uint16(cfg.MaxVersion), + } + + if cfg.MaxVersion != 0 && cfg.MinVersion != 0 { + if cfg.MaxVersion < cfg.MinVersion { + return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") + } } // If a CA cert is provided then let's read it in so we can validate the @@ -813,6 +858,8 @@ type TLSConfig struct { InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"` // Minimum TLS version. MinVersion TLSVersion `yaml:"min_version,omitempty" json:"min_version,omitempty"` + // Maximum TLS version. + MaxVersion TLSVersion `yaml:"max_version,omitempty" json:"max_version,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -825,18 +872,45 @@ func (c *TLSConfig) SetDirectory(dir string) { c.KeyFile = JoinDir(dir, c.KeyFile) } +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain TLSConfig + return unmarshal((*plain)(c)) +} + +// readCertAndKey reads the cert and key files from the disk. +func readCertAndKey(certFile, keyFile string) ([]byte, []byte, error) { + certData, err := os.ReadFile(certFile) + if err != nil { + return nil, nil, err + } + + keyData, err := os.ReadFile(keyFile) + if err != nil { + return nil, nil, err + } + + return certData, keyData, nil +} + // getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) +func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { + certData, keyData, err := readCertAndKey(c.CertFile, c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to read specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) + } + + cert, err := tls.X509KeyPair(certData, keyData) if err != nil { return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) } + return &cert, nil } // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } @@ -856,23 +930,30 @@ func updateRootCA(cfg *tls.Config, b []byte) bool { // tlsRoundTripper is a RoundTripper that updates automatically its TLS // configuration whenever the content of the CA file changes. type tlsRoundTripper struct { - caFile string + caFile string + certFile string + keyFile string + // newRT returns a new RoundTripper. newRT func(*tls.Config) (http.RoundTripper, error) - mtx sync.RWMutex - rt http.RoundTripper - hashCAFile []byte - tlsConfig *tls.Config + mtx sync.RWMutex + rt http.RoundTripper + hashCAFile []byte + hashCertFile []byte + hashKeyFile []byte + tlsConfig *tls.Config } func NewTLSRoundTripper( cfg *tls.Config, - caFile string, + caFile, certFile, keyFile string, newRT func(*tls.Config) (http.RoundTripper, error), ) (http.RoundTripper, error) { t := &tlsRoundTripper{ caFile: caFile, + certFile: certFile, + keyFile: keyFile, newRT: newRT, tlsConfig: cfg, } @@ -882,7 +963,7 @@ func NewTLSRoundTripper( return nil, err } t.rt = rt - _, t.hashCAFile, err = t.getCAWithHash() + _, t.hashCAFile, t.hashCertFile, t.hashKeyFile, err = t.getTLSFilesWithHash() if err != nil { return nil, err } @@ -890,25 +971,36 @@ func NewTLSRoundTripper( return t, nil } -func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) { - b, err := readCAFile(t.caFile) +func (t *tlsRoundTripper) getTLSFilesWithHash() ([]byte, []byte, []byte, []byte, error) { + b1, err := readCAFile(t.caFile) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err + } + h1 := sha256.Sum256(b1) + + var h2, h3 [32]byte + if t.certFile != "" { + b2, b3, err := readCertAndKey(t.certFile, t.keyFile) + if err != nil { + return nil, nil, nil, nil, err + } + h2, h3 = sha256.Sum256(b2), sha256.Sum256(b3) } - h := sha256.Sum256(b) - return b, h[:], nil + return b1, h1[:], h2[:], h3[:], nil } // RoundTrip implements the http.RoundTrip interface. func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b, h, err := t.getCAWithHash() + caData, caHash, certHash, keyHash, err := t.getTLSFilesWithHash() if err != nil { return nil, err } t.mtx.RLock() - equal := bytes.Equal(h[:], t.hashCAFile) + equal := bytes.Equal(caHash[:], t.hashCAFile) && + bytes.Equal(certHash[:], t.hashCertFile) && + bytes.Equal(keyHash[:], t.hashKeyFile) rt := t.rt t.mtx.RUnlock() if equal { @@ -917,8 +1009,10 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { } // Create a new RoundTripper. + // The cert and key files are read separately by the client + // using GetClientCertificate. tlsConfig := t.tlsConfig.Clone() - if !updateRootCA(tlsConfig, b) { + if !updateRootCA(tlsConfig, caData) { return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile) } rt, err = t.newRT(tlsConfig) @@ -929,7 +1023,9 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { t.mtx.Lock() t.rt = rt - t.hashCAFile = h[:] + t.hashCAFile = caHash[:] + t.hashCertFile = certHash[:] + t.hashKeyFile = keyHash[:] t.mtx.Unlock() return rt.RoundTrip(req) diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b0654..2946b8f1a64 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 3e2a7ee50e4..39bcfba6411 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -46,7 +46,7 @@ func NewCollector(program string) prometheus.Collector { ), ConstLabels: prometheus.Labels{ "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "goversion": GoVersion, }, @@ -69,7 +69,7 @@ func Print(program string) string { m := map[string]string{ "program": program, "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "buildUser": BuildUser, "buildDate": BuildDate, @@ -87,7 +87,7 @@ func Print(program string) string { // Info returns version, branch and revision information. func Info() string { - return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision) + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision()) } // BuildContext returns goVersion, buildUser and buildDate information. diff --git a/vendor/github.com/prometheus/common/version/info_default.go b/vendor/github.com/prometheus/common/version/info_default.go new file mode 100644 index 00000000000..2ab0be009c2 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_default.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.18 +// +build !go1.18 + +package version + +func getRevision() string { + return Revision +} diff --git a/vendor/github.com/prometheus/common/version/info_go118.go b/vendor/github.com/prometheus/common/version/info_go118.go new file mode 100644 index 00000000000..bed0f499429 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_go118.go @@ -0,0 +1,58 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package version + +import "runtime/debug" + +var computedRevision string + +func getRevision() string { + if Revision != "" { + return Revision + } + return computedRevision +} + +func init() { + computedRevision = computeRevision() +} + +func computeRevision() string { + var ( + rev = "unknown" + modified bool + ) + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return rev + } + for _, v := range buildInfo.Settings { + if v.Key == "vcs.revision" { + rev = v.Value + } + if v.Key == "vcs.modified" { + if v.Value == "true" { + modified = true + } + } + } + if modified { + return rev + "-modified" + } + return rev +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go index 328c5e0efa6..47bbca1747b 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go @@ -16,16 +16,18 @@ package web import ( "crypto/tls" "crypto/x509" + "errors" "fmt" - "io/ioutil" "net" "net/http" + "os" "path/filepath" + "github.com/coreos/go-systemd/v22/activation" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" config_util "github.com/prometheus/common/config" + "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" ) @@ -34,47 +36,53 @@ var ( ) type Config struct { - TLSConfig TLSStruct `yaml:"tls_server_config"` - HTTPConfig HTTPStruct `yaml:"http_server_config"` + TLSConfig TLSConfig `yaml:"tls_server_config"` + HTTPConfig HTTPConfig `yaml:"http_server_config"` Users map[string]config_util.Secret `yaml:"basic_auth_users"` } -type TLSStruct struct { +type TLSConfig struct { TLSCertPath string `yaml:"cert_file"` TLSKeyPath string `yaml:"key_file"` ClientAuth string `yaml:"client_auth_type"` ClientCAs string `yaml:"client_ca_file"` - CipherSuites []cipher `yaml:"cipher_suites"` - CurvePreferences []curve `yaml:"curve_preferences"` - MinVersion tlsVersion `yaml:"min_version"` - MaxVersion tlsVersion `yaml:"max_version"` + CipherSuites []Cipher `yaml:"cipher_suites"` + CurvePreferences []Curve `yaml:"curve_preferences"` + MinVersion TLSVersion `yaml:"min_version"` + MaxVersion TLSVersion `yaml:"max_version"` PreferServerCipherSuites bool `yaml:"prefer_server_cipher_suites"` } +type FlagConfig struct { + WebListenAddresses *[]string + WebSystemdSocket *bool + WebConfigFile *string +} + // SetDirectory joins any relative file paths with dir. -func (t *TLSStruct) SetDirectory(dir string) { +func (t *TLSConfig) SetDirectory(dir string) { t.TLSCertPath = config_util.JoinDir(dir, t.TLSCertPath) t.TLSKeyPath = config_util.JoinDir(dir, t.TLSKeyPath) t.ClientCAs = config_util.JoinDir(dir, t.ClientCAs) } -type HTTPStruct struct { +type HTTPConfig struct { HTTP2 bool `yaml:"http2"` Header map[string]string `yaml:"headers,omitempty"` } func getConfig(configPath string) (*Config, error) { - content, err := ioutil.ReadFile(configPath) + content, err := os.ReadFile(configPath) if err != nil { return nil, err } c := &Config{ - TLSConfig: TLSStruct{ + TLSConfig: TLSConfig{ MinVersion: tls.VersionTLS12, MaxVersion: tls.VersionTLS13, PreferServerCipherSuites: true, }, - HTTPConfig: HTTPStruct{HTTP2: true}, + HTTPConfig: HTTPConfig{HTTP2: true}, } err = yaml.UnmarshalStrict(content, c) if err == nil { @@ -92,8 +100,8 @@ func getTLSConfig(configPath string) (*tls.Config, error) { return ConfigToTLSConfig(&c.TLSConfig) } -// ConfigToTLSConfig generates the golang tls.Config from the TLSStruct config. -func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { +// ConfigToTLSConfig generates the golang tls.Config from the TLSConfig struct. +func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { if c.TLSCertPath == "" && c.TLSKeyPath == "" && c.ClientAuth == "" && c.ClientCAs == "" { return nil, errNoTLSConfig } @@ -109,7 +117,7 @@ func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { loadCert := func() (*tls.Certificate, error) { cert, err := tls.LoadX509KeyPair(c.TLSCertPath, c.TLSKeyPath) if err != nil { - return nil, errors.Wrap(err, "failed to load X509KeyPair") + return nil, fmt.Errorf("failed to load X509KeyPair: %w", err) } return &cert, nil } @@ -147,7 +155,7 @@ func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { if c.ClientCAs != "" { clientCAPool := x509.NewCertPool() - clientCAFile, err := ioutil.ReadFile(c.ClientCAs) + clientCAFile, err := os.ReadFile(c.ClientCAs) if err != nil { return nil, err } @@ -177,22 +185,54 @@ func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { return cfg, nil } -// ListenAndServe starts the server on the given address. Based on the file -// tlsConfigPath, TLS or basic auth could be enabled. -func ListenAndServe(server *http.Server, tlsConfigPath string, logger log.Logger) error { - listener, err := net.Listen("tcp", server.Addr) - if err != nil { - return err +// ServeMultiple starts the server on the given listeners. The FlagConfig is +// also passed on to Serve. +func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { + errs := new(errgroup.Group) + for _, l := range listeners { + l := l + errs.Go(func() error { + return Serve(l, server, flags, logger) + }) + } + return errs.Wait() +} + +// ListenAndServe starts the server on addresses given in WebListenAddresses in +// the FlagConfig or instead uses systemd socket activated listeners if +// WebSystemdSocket in the FlagConfig is true. The FlagConfig is also passed on +// to ServeMultiple. +func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) error { + if *flags.WebSystemdSocket { + level.Info(logger).Log("msg", "Listening on systemd activated listeners instead of port listeners.") + listeners, err := activation.Listeners() + if err != nil { + return err + } + if len(listeners) < 1 { + return errors.New("no socket activation file descriptors found") + } + return ServeMultiple(listeners, server, flags, logger) } - defer listener.Close() - return Serve(listener, server, tlsConfigPath, logger) + listeners := make([]net.Listener, 0, len(*flags.WebListenAddresses)) + for _, address := range *flags.WebListenAddresses { + listener, err := net.Listen("tcp", address) + if err != nil { + return err + } + defer listener.Close() + listeners = append(listeners, listener) + } + return ServeMultiple(listeners, server, flags, logger) } -// Server starts the server on the given listener. Based on the file -// tlsConfigPath, TLS or basic auth could be enabled. -func Serve(l net.Listener, server *http.Server, tlsConfigPath string, logger log.Logger) error { +// Server starts the server on the given listener. Based on the file path +// WebConfigFile in the FlagConfig, TLS or basic auth could be enabled. +func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { + level.Info(logger).Log("msg", "Listening on", "address", l.Addr().String()) + tlsConfigPath := *flags.WebConfigFile if tlsConfigPath == "" { - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false) + level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) } @@ -225,10 +265,10 @@ func Serve(l net.Listener, server *http.Server, tlsConfigPath string, logger log server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } // Valid TLS config. - level.Info(logger).Log("msg", "TLS is enabled.", "http2", c.HTTPConfig.HTTP2) + level.Info(logger).Log("msg", "TLS is enabled.", "http2", c.HTTPConfig.HTTP2, "address", l.Addr().String()) case errNoTLSConfig: // No TLS config, back to plain HTTP. - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false) + level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) default: // Invalid TLS config. @@ -269,9 +309,9 @@ func Validate(tlsConfigPath string) error { return err } -type cipher uint16 +type Cipher uint16 -func (c *cipher) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Cipher) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string err := unmarshal((*string)(&s)) if err != nil { @@ -279,27 +319,27 @@ func (c *cipher) UnmarshalYAML(unmarshal func(interface{}) error) error { } for _, cs := range tls.CipherSuites() { if cs.Name == s { - *c = (cipher)(cs.ID) + *c = (Cipher)(cs.ID) return nil } } return errors.New("unknown cipher: " + s) } -func (c cipher) MarshalYAML() (interface{}, error) { +func (c Cipher) MarshalYAML() (interface{}, error) { return tls.CipherSuiteName((uint16)(c)), nil } -type curve tls.CurveID +type Curve tls.CurveID -var curves = map[string]curve{ - "CurveP256": (curve)(tls.CurveP256), - "CurveP384": (curve)(tls.CurveP384), - "CurveP521": (curve)(tls.CurveP521), - "X25519": (curve)(tls.X25519), +var curves = map[string]Curve{ + "CurveP256": (Curve)(tls.CurveP256), + "CurveP384": (Curve)(tls.CurveP384), + "CurveP521": (Curve)(tls.CurveP521), + "X25519": (Curve)(tls.X25519), } -func (c *curve) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Curve) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string err := unmarshal((*string)(&s)) if err != nil { @@ -312,7 +352,7 @@ func (c *curve) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("unknown curve: " + s) } -func (c *curve) MarshalYAML() (interface{}, error) { +func (c *Curve) MarshalYAML() (interface{}, error) { for s, curveid := range curves { if *c == curveid { return s, nil @@ -321,16 +361,16 @@ func (c *curve) MarshalYAML() (interface{}, error) { return fmt.Sprintf("%v", c), nil } -type tlsVersion uint16 +type TLSVersion uint16 -var tlsVersions = map[string]tlsVersion{ - "TLS13": (tlsVersion)(tls.VersionTLS13), - "TLS12": (tlsVersion)(tls.VersionTLS12), - "TLS11": (tlsVersion)(tls.VersionTLS11), - "TLS10": (tlsVersion)(tls.VersionTLS10), +var tlsVersions = map[string]TLSVersion{ + "TLS13": (TLSVersion)(tls.VersionTLS13), + "TLS12": (TLSVersion)(tls.VersionTLS12), + "TLS11": (TLSVersion)(tls.VersionTLS11), + "TLS10": (TLSVersion)(tls.VersionTLS10), } -func (tv *tlsVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string err := unmarshal((*string)(&s)) if err != nil { @@ -343,7 +383,7 @@ func (tv *tlsVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("unknown TLS version: " + s) } -func (tv *tlsVersion) MarshalYAML() (interface{}, error) { +func (tv *TLSVersion) MarshalYAML() (interface{}, error) { for s, v := range tlsVersions { if *tv == v { return s, nil @@ -356,6 +396,6 @@ func (tv *tlsVersion) MarshalYAML() (interface{}, error) { // tlsConfigPath, TLS or basic auth could be enabled. // // Deprecated: Use ListenAndServe instead. -func Listen(server *http.Server, tlsConfigPath string, logger log.Logger) error { - return ListenAndServe(server, tlsConfigPath, logger) +func Listen(server *http.Server, flags *FlagConfig, logger log.Logger) error { + return ListenAndServe(server, flags, logger) } diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml b/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml index 7d40d9b7086..984aa0dbff3 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml +++ b/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml @@ -3,4 +3,3 @@ tls_server_config: cert_file: server.crt key_file: server.key - diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index a13f397f81e..8e8460d4c59 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -776,12 +776,13 @@ func CheckTargetAddress(address model.LabelValue) error { // RemoteWriteConfig is the configuration for writing to remote storage. type RemoteWriteConfig struct { - URL *config.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` - Name string `yaml:"name,omitempty"` - SendExemplars bool `yaml:"send_exemplars,omitempty"` + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` + Name string `yaml:"name,omitempty"` + SendExemplars bool `yaml:"send_exemplars,omitempty"` + SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. diff --git a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go index 043b2f7ba87..919567a53b3 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go +++ b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go @@ -114,7 +114,10 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { now := time.Now() - defer d.duration.Observe(time.Since(now).Seconds()) + defer func() { + d.duration.Observe(time.Since(now).Seconds()) + }() + tgs, err := d.refreshf(ctx) if err != nil { d.failures.Inc() diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go new file mode 100644 index 00000000000..d75afd10ed2 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -0,0 +1,871 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "strings" +) + +// FloatHistogram is similar to Histogram but uses float64 for all +// counts. Additionally, bucket counts are absolute and not deltas. +// +// A FloatHistogram is needed by PromQL to handle operations that might result +// in fractional counts. Since the counts in a histogram are unlikely to be too +// large to be represented precisely by a float64, a FloatHistogram can also be +// used to represent a histogram with integer counts and thus serves as a more +// generalized representation. +type FloatHistogram struct { + // Currently valid schema numbers are -4 <= n <= 8. They are all for + // base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. Or + // in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. Must be zero or positive. + ZeroCount float64 + // Total number of observations. Must be zero or positive. + Count float64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. Each represents an absolute count and + // must be zero or positive. + PositiveBuckets, NegativeBuckets []float64 +} + +// Copy returns a deep copy of the Histogram. +func (h *FloatHistogram) Copy() *FloatHistogram { + c := *h + + if h.PositiveSpans != nil { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if h.NegativeSpans != nil { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if h.PositiveBuckets != nil { + c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + if h.NegativeBuckets != nil { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + + return &c +} + +// CopyToSchema works like Copy, but the returned deep copy has the provided +// target schema, which must be ≤ the original schema (i.e. it must have a lower +// resolution). +func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { + if targetSchema == h.Schema { + // Fast path. + return h.Copy() + } + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) + } + c := FloatHistogram{ + Schema: targetSchema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.ZeroCount, + Count: h.Count, + Sum: h.Sum, + } + + // TODO(beorn7): This is a straight-forward implementation using merging + // iterators for the original buckets and then adding one merged bucket + // after another to the newly created FloatHistogram. It's well possible + // that a more involved implementation performs much better, which we + // could do if this code path turns out to be performance-critical. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); { + b := it.At() + c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); { + b := it.At() + c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + + return &c +} + +// String returns a string representation of the Histogram. +func (h *FloatHistogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[float64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. +func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + return Bucket[float64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// Scale scales the FloatHistogram by the provided factor, i.e. it scales all +// bucket counts including the zero bucket and the count and the sum of +// observations. The bucket layout stays the same. This method changes the +// receiving histogram directly (rather than acting on a copy). It returns a +// pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { + h.ZeroCount *= factor + h.Count *= factor + h.Sum *= factor + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] *= factor + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] *= factor + } + return h +} + +// Add adds the provided other histogram to the receiving histogram. Count, Sum, +// and buckets from the other histogram are added to the corresponding +// components of the receiving histogram. Buckets in the other histogram that do +// not exist in the receiving histogram are inserted into the latter. The +// resulting histogram might have buckets with a population of zero or directly +// adjacent spans (offset=0). To normalize those, call the Compact method. +// +// The method reconciles differences in the zero threshold and in the schema, +// but the schema of the other histogram must be ≥ the schema of the receiving +// histogram (i.e. must have an equal or higher resolution). This means that the +// schema of the receiving histogram won't change. Its zero threshold, however, +// will change if needed. The other histogram will not be modified in any case. +// +// This method returns a pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + h.Count += other.Count + h.Sum += other.Sum + + // TODO(beorn7): If needed, this can be optimized by inspecting the + // spans in other and create missing buckets in h in batches. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + return h +} + +// Sub works like Add but subtracts the other histogram. +func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + h.Count -= other.Count + h.Sum -= other.Sum + + // TODO(beorn7): If needed, this can be optimized by inspecting the + // spans in other and create missing buckets in h in batches. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + b.Count *= -1 + h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + b.Count *= -1 + h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + return h +} + +// addBucket takes the "coordinates" of the last bucket that was handled and +// adds the provided bucket after it. If a corresponding bucket exists, the +// count is added. If not, the bucket is inserted. The updated slices and the +// coordinates of the inserted or added-to bucket are returned. +func addBucket( + b Bucket[float64], + spans []Span, buckets []float64, + iSpan, iBucket int, + iInSpan, index int32, +) ( + newSpans []Span, newBuckets []float64, + newISpan, newIBucket int, newIInSpan int32, +) { + if iSpan == -1 { + // First add, check if it is before all spans. + if len(spans) == 0 || spans[0].Offset > b.Index { + // Add bucket before all others. + buckets = append(buckets, 0) + copy(buckets[1:], buckets) + buckets[0] = b.Count + if len(spans) > 0 && spans[0].Offset == b.Index+1 { + spans[0].Length++ + spans[0].Offset-- + return spans, buckets, 0, 0, 0 + } + spans = append(spans, Span{}) + copy(spans[1:], spans) + spans[0] = Span{Offset: b.Index, Length: 1} + if len(spans) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spans[1].Offset -= b.Index + 1 + } + return spans, buckets, 0, 0, 0 + } + if spans[0].Offset == b.Index { + // Just add to first bucket. + buckets[0] += b.Count + return spans, buckets, 0, 0, 0 + } + // We are behind the first bucket, so set everything to the + // first bucket and continue normally. + iSpan, iBucket, iInSpan = 0, 0, 0 + index = spans[0].Offset + } + deltaIndex := b.Index - index + for { + remainingInSpan := int32(spans[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + buckets[iBucket] += b.Count + return spans, buckets, iSpan, iBucket, iInSpan + } + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + buckets = append(buckets, 0) + copy(buckets[iBucket+1:], buckets[iBucket:]) + buckets[iBucket] = b.Count + if deltaIndex == 0 { + // Directly after previous span, extend previous span. + if iSpan < len(spans) { + spans[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spans[iSpan].Length) + spans[iSpan].Length++ + return spans, buckets, iSpan, iBucket, iInSpan + } + if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { + // Directly before next span, extend next span. + iInSpan = 0 + spans[iSpan].Offset-- + spans[iSpan].Length++ + return spans, buckets, iSpan, iBucket, iInSpan + } + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spans) { + spans[iSpan].Offset -= deltaIndex + 1 + } + spans = append(spans, Span{}) + copy(spans[iSpan+1:], spans[iSpan:]) + spans[iSpan] = Span{Length: 1, Offset: deltaIndex} + return spans, buckets, iSpan, iBucket, iInSpan + } + // Try start of next span. + deltaIndex -= spans[iSpan].Offset + iInSpan = 0 + } +} + +// Compact eliminates empty buckets at the beginning and end of each span, then +// merges spans that are consecutive or at most maxEmptyBuckets apart, and +// finally splits spans that contain more consecutive empty buckets than +// maxEmptyBuckets. (The actual implementation might do something more efficient +// but with the same result.) The compaction happens "in place" in the +// receiving histogram, but a pointer to it is returned for convenience. +// +// The ideal value for maxEmptyBuckets depends on circumstances. The motivation +// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to +// represent very few empty buckets explicitly within one span than cutting the +// one span into two to treat the empty buckets as a gap between the two spans, +// both in terms of storage requirement as well as in terms of encoding and +// decoding effort. However, the tradeoffs are subtle. For one, they are +// different in the exposition format vs. in a TSDB chunk vs. for the in-memory +// representation as Go types. In the TSDB, as an additional aspects, the span +// layout is only stored once per chunk, while many histograms with that same +// chunk layout are then only stored with their buckets (so that even a single +// empty bucket will be stored many times). +// +// For the Go types, an additional Span takes 8 bytes. Similarly, an additional +// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both +// options have the same storage requirement, but the single-span solution is +// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0 +// and only use a larger number if you know what you are doing. +func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false, + ) + return h +} + +// DetectReset returns true if the receiving histogram is missing any buckets +// that have a non-zero population in the provided previous histogram. It also +// returns true if any count (in any bucket, in the zero count, or in the count +// of observations, but NOT the sum of observations) is smaller in the receiving +// histogram compared to the previous histogram. Otherwise, it returns false. +// +// Special behavior in case the Schema or the ZeroThreshold are not the same in +// both histograms: +// +// - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an +// increase of resolution) can only happen together with a reset. Thus, the +// method returns true in either case. +// +// - Upon an increase of the ZeroThreshold, the buckets in the previous +// histogram that fall within the new ZeroThreshold are added to the ZeroCount +// of the previous histogram (without mutating the provided previous +// histogram). The scenario that a populated bucket of the previous histogram +// is partially within, partially outside of the new ZeroThreshold, can only +// happen together with a counter reset and therefore shortcuts to returning +// true. +// +// - Upon a decrease of the Schema, the buckets of the previous histogram are +// merged so that they match the new, lower-resolution schema (again without +// mutating the provided previous histogram). +// +// Note that this kind of reset detection is quite expensive. Ideally, resets +// are detected at ingest time and stored in the TSDB, so that the reset +// information can be read directly from there rather than be detected each time +// again. +func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { + if h.Count < previous.Count { + return true + } + if h.Schema > previous.Schema { + return true + } + if h.ZeroThreshold < previous.ZeroThreshold { + // ZeroThreshold decreased. + return true + } + previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold) + if newThreshold != h.ZeroThreshold { + // ZeroThreshold is within a populated bucket in previous + // histogram. + return true + } + if h.ZeroCount < previousZeroCount { + return true + } + currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + if detectReset(currIt, prevIt) { + return true + } + currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + return detectReset(currIt, prevIt) +} + +func detectReset(currIt, prevIt BucketIterator[float64]) bool { + if !prevIt.Next() { + return false // If no buckets in previous histogram, nothing can be reset. + } + prevBucket := prevIt.At() + if !currIt.Next() { + // No bucket in current, but at least one in previous + // histogram. Check if any of those are non-zero, in which case + // this is a reset. + for { + if prevBucket.Count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket := currIt.At() + for { + // Forward currIt until we find the bucket corresponding to prevBucket. + for currBucket.Index < prevBucket.Index { + if !currIt.Next() { + // Reached end of currIt early, therefore + // previous histogram has a bucket that the + // current one does not have. Unlass all + // remaining buckets in the previous histogram + // are unpopulated, this is a reset. + for { + if prevBucket.Count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket = currIt.At() + } + if currBucket.Index > prevBucket.Index { + // Previous histogram has a bucket the current one does + // not have. If it's populated, it's a reset. + if prevBucket.Count != 0 { + return true + } + } else { + // We have reached corresponding buckets in both iterators. + // We can finally compare the counts. + if currBucket.Count < prevBucket.Count { + return true + } + } + if !prevIt.Next() { + // Reached end of prevIt without finding offending buckets. + return false + } + prevBucket = prevIt.At() + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] { + return h.floatBucketIterator(true, 0, h.Schema) +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going +// down). +func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { + return h.floatBucketIterator(false, 0, h.Schema) +} + +// PositiveReverseBucketIterator returns a BucketIterator to iterate over all +// positive buckets in descending order (starting at the highest bucket and +// going down towards the zero bucket). +func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { + return newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) +} + +// NegativeReverseBucketIterator returns a BucketIterator to iterate over all +// negative buckets in ascending order (starting at the lowest bucket and going +// up towards the zero bucket). +func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { + return newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) +} + +// AllBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in ascending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + negIter: h.NegativeReverseBucketIterator(), + posIter: h.PositiveBucketIterator(), + state: -1, + } +} + +// zeroCountForLargerThreshold returns what the histogram's zero count would be +// if the ZeroThreshold had the provided larger (or equal) value. If the +// provided value is less than the histogram's ZeroThreshold, the method panics. +// If the largerThreshold ends up within a populated bucket of the histogram, it +// is adjusted upwards to the lower limit of that bucket (all in terms of +// absolute values) and that bucket's count is included in the returned +// count. The adjusted threshold is returned, too. +func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) { + // Fast path. + if largerThreshold == h.ZeroThreshold { + return h.ZeroCount, largerThreshold + } + if largerThreshold < h.ZeroThreshold { + panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold)) + } +outer: + for { + count = h.ZeroCount + i := h.PositiveBucketIterator() + for i.Next() { + b := i.At() + if b.Lower >= largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Upper > largerThreshold { + // New threshold ended up within a bucket. if it's + // populated, we need to adjust largerThreshold before + // we are done here. + if b.Count != 0 { + largerThreshold = b.Upper + } + break + } + } + i = h.NegativeBucketIterator() + for i.Next() { + b := i.At() + if b.Upper <= -largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Lower < -largerThreshold { + // New threshold ended up within a bucket. If + // it's populated, we need to adjust + // largerThreshold and have to redo the whole + // thing because the treatment of the positive + // buckets is invalid now. + if b.Count != 0 { + largerThreshold = -b.Lower + continue outer + } + break + } + } + return count, largerThreshold + } +} + +// trimBucketsInZeroBucket removes all buckets that are within the zero +// bucket. It assumes that the zero threshold is at a bucket boundary and that +// the counts in the buckets to remove are already part of the zero count. +func (h *FloatHistogram) trimBucketsInZeroBucket() { + i := h.PositiveBucketIterator() + bucketsIdx := 0 + for i.Next() { + b := i.At() + if b.Lower >= h.ZeroThreshold { + break + } + h.PositiveBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + i = h.NegativeBucketIterator() + bucketsIdx = 0 + for i.Next() { + b := i.At() + if b.Upper <= -h.ZeroThreshold { + break + } + h.NegativeBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + // We are abusing Compact to trim the buckets set to zero + // above. Premature compacting could cause additional cost, but this + // code path is probably rarely used anyway. + h.Compact(0) +} + +// reconcileZeroBuckets finds a zero bucket large enough to include the zero +// buckets of both histograms (the receiving histogram and the other histogram) +// with a zero threshold that is not within a populated bucket in either +// histogram. This method modifies the receiving histogram accourdingly, but +// leaves the other histogram as is. Instead, it returns the zero count the +// other histogram would have if it were modified. +func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { + otherZeroCount := other.ZeroCount + otherZeroThreshold := other.ZeroThreshold + + for otherZeroThreshold != h.ZeroThreshold { + if h.ZeroThreshold > otherZeroThreshold { + otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold) + } + if otherZeroThreshold > h.ZeroThreshold { + h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold) + h.trimBucketsInZeroBucket() + } + } + return otherZeroCount +} + +// floatBucketIterator is a low-level constructor for bucket iterators. +// +// If positive is true, the returned iterator iterates through the positive +// buckets, otherwise through the negative buckets. +// +// If absoluteStartValue is < the lowest absolute value of any upper bucket +// boundary, the iterator starts with the first bucket. Otherwise, it will skip +// all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. +// +// targetSchema must be ≤ the schema of FloatHistogram (and of course within the +// legal values for schemas in general). The buckets are merged to match the +// targetSchema prior to iterating (without mutating FloatHistogram). +func (h *FloatHistogram) floatBucketIterator( + positive bool, absoluteStartValue float64, targetSchema int32, +) *floatBucketIterator { + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) + } + i := &floatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: h.Schema, + positive: positive, + }, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + } + if positive { + i.spans = h.PositiveSpans + i.buckets = h.PositiveBuckets + } else { + i.spans = h.NegativeSpans + i.buckets = h.NegativeBuckets + } + return i +} + +// reverseFloatbucketiterator is a low-level constructor for reverse bucket iterators. +func newReverseFloatBucketIterator( + spans []Span, buckets []float64, schema int32, positive bool, +) *reverseFloatBucketIterator { + r := &reverseFloatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + }, + } + + r.spansIdx = len(r.spans) - 1 + r.bucketsIdx = len(r.buckets) - 1 + if r.spansIdx >= 0 { + r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1 + } + r.currIdx = 0 + for _, s := range r.spans { + r.currIdx += s.Offset + int32(s.Length) + } + + return r +} + +type floatBucketIterator struct { + baseBucketIterator[float64, float64] + + targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema. + origIdx int32 // The bucket index within the original schema. + absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. +} + +func (i *floatBucketIterator) Next() bool { + if i.spansIdx >= len(i.spans) { + return false + } + + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + +mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop + } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := i.targetIdx(origIdx) + if firstPass { + i.currIdx = currIdx + firstPass = false + } else if currIdx != i.currIdx { + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. + break mergeLoop + } + } + // Skip buckets before absoluteStartValue. + // TODO(beorn7): Maybe do something more efficient than this recursive call. + if getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + return i.Next() + } + return true +} + +// targetIdx returns the bucket index within i.targetSchema for the given bucket +// index within i.schema. +func (i *floatBucketIterator) targetIdx(idx int32) int32 { + if i.schema == i.targetSchema { + // Fast path for the common case. The below would yield the same + // result, just with more effort. + return idx + } + return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 +} + +type reverseFloatBucketIterator struct { + baseBucketIterator[float64, float64] + idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. +} + +func (i *reverseFloatBucketIterator) Next() bool { + i.currIdx-- + if i.bucketsIdx < 0 { + return false + } + + for i.idxInSpan < 0 { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + i.spansIdx-- + i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1 + i.currIdx -= i.spans[i.spansIdx+1].Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.bucketsIdx-- + i.idxInSpan-- + return true +} + +type allFloatBucketIterator struct { + h *FloatHistogram + negIter, posIter BucketIterator[float64] + // -1 means we are iterating negative buckets. + // 0 means it is time for the zero bucket. + // 1 means we are iterating positive buckets. + // Anything else means iteration is over. + state int8 + currBucket Bucket[float64] +} + +func (i *allFloatBucketIterator) Next() bool { + switch i.state { + case -1: + if i.negIter.Next() { + i.currBucket = i.negIter.At() + if i.currBucket.Upper > -i.h.ZeroThreshold { + i.currBucket.Upper = -i.h.ZeroThreshold + } + return true + } + i.state = 0 + return i.Next() + case 0: + i.state = 1 + if i.h.ZeroCount > 0 { + i.currBucket = Bucket[float64]{ + Lower: -i.h.ZeroThreshold, + Upper: i.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: i.h.ZeroCount, + // Index is irrelevant for the zero bucket. + } + return true + } + return i.Next() + case 1: + if i.posIter.Next() { + i.currBucket = i.posIter.At() + if i.currBucket.Lower < i.h.ZeroThreshold { + i.currBucket.Lower = i.h.ZeroThreshold + } + return true + } + i.state = 42 + return false + } + + return false +} + +func (i *allFloatBucketIterator) At() Bucket[float64] { + return i.currBucket +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go new file mode 100644 index 00000000000..c62be0b08cf --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -0,0 +1,536 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// BucketCount is a type constraint for the count in a bucket, which can be +// float64 (for type FloatHistogram) or uint64 (for type Histogram). +type BucketCount interface { + float64 | uint64 +} + +// internalBucketCount is used internally by Histogram and FloatHistogram. The +// difference to the BucketCount above is that Histogram internally uses deltas +// between buckets rather than absolute counts (while FloatHistogram uses +// absolute counts directly). Go type parameters don't allow type +// specialization. Therefore, where special treatment of deltas between buckets +// vs. absolute counts is important, this information has to be provided as a +// separate boolean parameter "deltaBuckets" +type internalBucketCount interface { + float64 | int64 +} + +// Bucket represents a bucket with lower and upper limit and the absolute count +// of samples in the bucket. It also specifies if each limit is inclusive or +// not. (Mathematically, inclusive limits create a closed interval, and +// non-inclusive limits an open interval.) +// +// To represent cumulative buckets, Lower is set to -Inf, and the Count is then +// cumulative (including the counts of all buckets for smaller values). +type Bucket[BC BucketCount] struct { + Lower, Upper float64 + LowerInclusive, UpperInclusive bool + Count BC + + // Index within schema. To easily compare buckets that share the same + // schema and sign (positive or negative). Irrelevant for the zero bucket. + Index int32 +} + +// String returns a string representation of a Bucket, using the usual +// mathematical notation of '['/']' for inclusive bounds and '('/')' for +// non-inclusive bounds. +func (b Bucket[BC]) String() string { + var sb strings.Builder + if b.LowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if b.UpperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +// BucketIterator iterates over the buckets of a Histogram, returning decoded +// buckets. +type BucketIterator[BC BucketCount] interface { + // Next advances the iterator by one. + Next() bool + // At returns the current bucket. + At() Bucket[BC] +} + +// baseBucketIterator provides a struct that is shared by most BucketIterator +// implementations, together with an implementation of the At method. This +// iterator can be embedded in full implementations of BucketIterator to save on +// code replication. +type baseBucketIterator[BC BucketCount, IBC internalBucketCount] struct { + schema int32 + spans []Span + buckets []IBC + + positive bool // Whether this is for positive buckets. + + spansIdx int // Current span within spans slice. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + bucketsIdx int // Current bucket within buckets slice. + + currCount IBC // Count in the current bucket. + currIdx int32 // The actual bucket index. +} + +func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { + bucket := Bucket[BC]{ + Count: BC(b.currCount), + Index: b.currIdx, + } + if b.positive { + bucket.Upper = getBound(b.currIdx, b.schema) + bucket.Lower = getBound(b.currIdx-1, b.schema) + } else { + bucket.Lower = -getBound(b.currIdx, b.schema) + bucket.Upper = -getBound(b.currIdx-1, b.schema) + } + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 + return bucket +} + +// compactBuckets is a generic function used by both Histogram.Compact and +// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { + // Fast path: If there are no empty buckets AND no offset in any span is + // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return + // immediately. We check that first because it's cheap and presumably + // common. + nothingToDo := true + var currentBucketAbsolute IBC + for _, bucket := range buckets { + if deltaBuckets { + currentBucketAbsolute += bucket + } else { + currentBucketAbsolute = bucket + } + if currentBucketAbsolute == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + for _, span := range spans { + if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + return buckets, spans + } + } + + var iBucket, iSpan int + var posInSpan uint32 + currentBucketAbsolute = 0 + + // Helper function. + emptyBucketsHere := func() int { + i := 0 + abs := currentBucketAbsolute + for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 { + i++ + if i+iBucket >= len(buckets) { + break + } + abs = buckets[i+iBucket] + } + return i + } + + // Merge spans with zero-offset to avoid special cases later. + if len(spans) > 1 { + for i, span := range spans[1:] { + if span.Offset == 0 { + spans[iSpan].Length += span.Length + continue + } + iSpan++ + if i+1 != iSpan { + spans[iSpan] = span + } + } + spans = spans[:iSpan+1] + iSpan = 0 + } + + // Merge spans with zero-length to avoid special cases later. + for i, span := range spans { + if span.Length == 0 { + if i+1 < len(spans) { + spans[i+1].Offset += span.Offset + } + continue + } + if i != iSpan { + spans[iSpan] = span + } + iSpan++ + } + spans = spans[:iSpan] + iSpan = 0 + + // Cut out empty buckets from start and end of spans, no matter + // what. Also cut out empty buckets from the middle of a span but only + // if there are more than maxEmptyBuckets consecutive empty buckets. + for iBucket < len(buckets) { + if deltaBuckets { + currentBucketAbsolute += buckets[iBucket] + } else { + currentBucketAbsolute = buckets[iBucket] + } + if nEmpty := emptyBucketsHere(); nEmpty > 0 { + if posInSpan > 0 && + nEmpty < int(spans[iSpan].Length-posInSpan) && + nEmpty <= maxEmptyBuckets { + // The empty buckets are in the middle of a + // span, and there are few enough to not bother. + // Just fast-forward. + iBucket += nEmpty + if deltaBuckets { + currentBucketAbsolute = 0 + } + posInSpan += uint32(nEmpty) + continue + } + // In all other cases, we cut out the empty buckets. + if deltaBuckets && iBucket+nEmpty < len(buckets) { + currentBucketAbsolute = -buckets[iBucket] + buckets[iBucket+nEmpty] += buckets[iBucket] + } + buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...) + if posInSpan == 0 { + // Start of span. + if nEmpty == int(spans[iSpan].Length) { + // The whole span is empty. + offset := spans[iSpan].Offset + spans = append(spans[:iSpan], spans[iSpan+1:]...) + if len(spans) > iSpan { + spans[iSpan].Offset += offset + int32(nEmpty) + } + continue + } + spans[iSpan].Length -= uint32(nEmpty) + spans[iSpan].Offset += int32(nEmpty) + continue + } + // It's in the middle or in the end of the span. + // Split the current span. + newSpan := Span{ + Offset: int32(nEmpty), + Length: spans[iSpan].Length - posInSpan - uint32(nEmpty), + } + spans[iSpan].Length = posInSpan + // In any case, we have to split to the next span. + iSpan++ + posInSpan = 0 + if newSpan.Length == 0 { + // The span is empty, so we were already at the end of a span. + // We don't have to insert the new span, just adjust the next + // span's offset, if there is one. + if iSpan < len(spans) { + spans[iSpan].Offset += int32(nEmpty) + } + continue + } + // Insert the new span. + spans = append(spans, Span{}) + if iSpan+1 < len(spans) { + copy(spans[iSpan+1:], spans[iSpan:]) + } + spans[iSpan] = newSpan + continue + } + iBucket++ + posInSpan++ + if posInSpan >= spans[iSpan].Length { + posInSpan = 0 + iSpan++ + } + } + if maxEmptyBuckets == 0 || len(buckets) == 0 { + return buckets, spans + } + + // Finally, check if any offsets between spans are small enough to merge + // the spans. + iBucket = int(spans[0].Length) + if deltaBuckets { + currentBucketAbsolute = 0 + for _, bucket := range buckets[:iBucket] { + currentBucketAbsolute += bucket + } + } + iSpan = 1 + for iSpan < len(spans) { + if int(spans[iSpan].Offset) > maxEmptyBuckets { + l := int(spans[iSpan].Length) + if deltaBuckets { + for _, bucket := range buckets[iBucket : iBucket+l] { + currentBucketAbsolute += bucket + } + } + iBucket += l + iSpan++ + continue + } + // Merge span with previous one and insert empty buckets. + offset := int(spans[iSpan].Offset) + spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length + spans = append(spans[:iSpan], spans[iSpan+1:]...) + newBuckets := make([]IBC, len(buckets)+offset) + copy(newBuckets, buckets[:iBucket]) + copy(newBuckets[iBucket+offset:], buckets[iBucket:]) + if deltaBuckets { + newBuckets[iBucket] = -currentBucketAbsolute + newBuckets[iBucket+offset] += currentBucketAbsolute + } + iBucket += offset + buckets = newBuckets + currentBucketAbsolute = buckets[iBucket] + // Note that with many merges, it would be more efficient to + // first record all the chunks of empty buckets to insert and + // then do it in one go through all the buckets. + } + + return buckets, spans +} + +func getBound(idx, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with an idx + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its idx + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := int(idx) << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := idx & ((1 << schema) - 1) + frac := exponentialBounds[schema][fracIdx] + exp := (int(idx) >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// exponentialBounds is a precalculated table of bucket bounds in the interval +// [0.5,1) in schema 0 to 8. +var exponentialBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go new file mode 100644 index 00000000000..934c4dde9c2 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -0,0 +1,448 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// Histogram encodes a sparse, high-resolution histogram. See the design +// document for full details: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# +// +// The most tricky bit is how bucket indices represent real bucket boundaries. +// An example for schema 0 (by which each bucket is twice as wide as the +// previous bucket): +// +// Bucket boundaries → [-2,-1) [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1] (1,2] .... +// ↑ ↑ ↑ ↑ ↑ ↑ ↑ +// Zero bucket (width e.g. 0.001) → | | | ZB | | | +// Positive bucket indices → | | | ... -1 0 1 2 3 +// Negative bucket indices → 3 2 1 0 -1 ... +// +// Which bucket indices are actually used is determined by the spans. +type Histogram struct { + // Currently valid schema numbers are -4 <= n <= 8. They are all for + // base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. Or + // in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. + ZeroCount uint64 + // Total number of observations. + Count uint64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. The first element is an absolute + // count. All following ones are deltas relative to the previous + // element. + PositiveBuckets, NegativeBuckets []int64 +} + +// A Span defines a continuous sequence of buckets. +type Span struct { + // Gap to previous span (always positive), or starting index for the 1st + // span (which can be negative). + Offset int32 + // Length of the span. + Length uint32 +} + +// Copy returns a deep copy of the Histogram. +func (h *Histogram) Copy() *Histogram { + c := *h + + if len(h.PositiveSpans) != 0 { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.PositiveBuckets) != 0 { + c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + + return &c +} + +// String returns a string representation of the Histogram. +func (h *Histogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[uint64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. +func (h *Histogram) ZeroBucket() Bucket[uint64] { + return Bucket[uint64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { + return newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going down). +func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { + return newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) +} + +// CumulativeBucketIterator returns a BucketIterator to iterate over a +// cumulative view of the buckets. This method currently only supports +// Histograms without negative buckets and panics if the Histogram has negative +// buckets. It is currently only used for testing. +func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { + if len(h.NegativeBuckets) > 0 { + panic("CumulativeBucketIterator called on Histogram with negative buckets") + } + return &cumulativeBucketIterator{h: h, posSpansIdx: -1} +} + +// Equals returns true if the given histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +func (h *Histogram) Equals(h2 *Histogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + + if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + return true +} + +// spansMatch returns true if both spans represent the same bucket layout +// after combining zero length spans with the next non-zero length span. +func spansMatch(s1, s2 []Span) bool { + if len(s1) == 0 && len(s2) == 0 { + return true + } + + s1idx, s2idx := 0, 0 + for { + if s1idx >= len(s1) { + return allEmptySpans(s2[s2idx:]) + } + if s2idx >= len(s2) { + return allEmptySpans(s1[s1idx:]) + } + + currS1, currS2 := s1[s1idx], s2[s2idx] + s1idx++ + s2idx++ + if currS1.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ { + currS1.Offset += s1[s1idx].Offset + } + if s1idx < len(s1) { + currS1.Offset += s1[s1idx].Offset + currS1.Length = s1[s1idx].Length + s1idx++ + } + } + if currS2.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ { + currS2.Offset += s2[s2idx].Offset + } + if s2idx < len(s2) { + currS2.Offset += s2[s2idx].Offset + currS2.Length = s2[s2idx].Length + s2idx++ + } + } + + if currS1.Length == 0 && currS2.Length == 0 { + // The last spans of both set are zero length. Previous spans match. + return true + } + + if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length { + return false + } + } +} + +func allEmptySpans(s []Span) bool { + for _, ss := range s { + if ss.Length > 0 { + return false + } + } + return true +} + +func bucketsMatch(b1, b2 []int64) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if b != b2[i] { + return false + } + } + return true +} + +// Compact works like FloatHistogram.Compact. See there for detailed +// explanations. +func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true, + ) + return h +} + +// ToFloat returns a FloatHistogram representation of the Histogram. It is a +// deep copy (e.g. spans are not shared). +func (h *Histogram) ToFloat() *FloatHistogram { + var ( + positiveSpans, negativeSpans []Span + positiveBuckets, negativeBuckets []float64 + ) + if len(h.PositiveSpans) != 0 { + positiveSpans = make([]Span, len(h.PositiveSpans)) + copy(positiveSpans, h.PositiveSpans) + } + if len(h.NegativeSpans) != 0 { + negativeSpans = make([]Span, len(h.NegativeSpans)) + copy(negativeSpans, h.NegativeSpans) + } + if len(h.PositiveBuckets) != 0 { + positiveBuckets = make([]float64, len(h.PositiveBuckets)) + var current float64 + for i, b := range h.PositiveBuckets { + current += float64(b) + positiveBuckets[i] = current + } + } + if len(h.NegativeBuckets) != 0 { + negativeBuckets = make([]float64, len(h.NegativeBuckets)) + var current float64 + for i, b := range h.NegativeBuckets { + current += float64(b) + negativeBuckets[i] = current + } + } + + return &FloatHistogram{ + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.ZeroCount), + Count: float64(h.Count), + Sum: h.Sum, + PositiveSpans: positiveSpans, + NegativeSpans: negativeSpans, + PositiveBuckets: positiveBuckets, + NegativeBuckets: negativeBuckets, + } +} + +type regularBucketIterator struct { + baseBucketIterator[uint64, int64] +} + +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) *regularBucketIterator { + i := baseBucketIterator[uint64, int64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + } + return ®ularBucketIterator{i} +} + +func (r *regularBucketIterator) Next() bool { + if r.spansIdx >= len(r.spans) { + return false + } + span := r.spans[r.spansIdx] + // Seed currIdx for the first bucket. + if r.bucketsIdx == 0 { + r.currIdx = span.Offset + } else { + r.currIdx++ + } + for r.idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + r.idxInSpan = 0 + r.spansIdx++ + if r.spansIdx >= len(r.spans) { + return false + } + span = r.spans[r.spansIdx] + r.currIdx += span.Offset + } + + r.currCount += r.buckets[r.bucketsIdx] + r.idxInSpan++ + r.bucketsIdx++ + return true +} + +type cumulativeBucketIterator struct { + h *Histogram + + posSpansIdx int // Index in h.PositiveSpans we are in. -1 means 0 bucket. + posBucketsIdx int // Index in h.PositiveBuckets. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + + initialized bool + currIdx int32 // The actual bucket index after decoding from spans. + currUpper float64 // The upper boundary of the current bucket. + currCount int64 // Current non-cumulative count for the current bucket. Does not apply for empty bucket. + currCumulativeCount uint64 // Current "cumulative" count for the current bucket. + + // Between 2 spans there could be some empty buckets which + // still needs to be counted for cumulative buckets. + // When we hit the end of a span, we use this to iterate + // through the empty buckets. + emptyBucketCount int32 +} + +func (c *cumulativeBucketIterator) Next() bool { + if c.posSpansIdx == -1 { + // Zero bucket. + c.posSpansIdx++ + if c.h.ZeroCount == 0 { + return c.Next() + } + + c.currUpper = c.h.ZeroThreshold + c.currCount = int64(c.h.ZeroCount) + c.currCumulativeCount = uint64(c.currCount) + return true + } + + if c.posSpansIdx >= len(c.h.PositiveSpans) { + return false + } + + if c.emptyBucketCount > 0 { + // We are traversing through empty buckets at the moment. + c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currIdx++ + c.emptyBucketCount-- + return true + } + + span := c.h.PositiveSpans[c.posSpansIdx] + if c.posSpansIdx == 0 && !c.initialized { + // Initializing. + c.currIdx = span.Offset + // The first bucket is an absolute value and not a delta with Zero bucket. + c.currCount = 0 + c.initialized = true + } + + c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] + c.currCumulativeCount += uint64(c.currCount) + c.currUpper = getBound(c.currIdx, c.h.Schema) + + c.posBucketsIdx++ + c.idxInSpan++ + c.currIdx++ + if c.idxInSpan >= span.Length { + // Move to the next span. This one is done. + c.posSpansIdx++ + c.idxInSpan = 0 + if c.posSpansIdx < len(c.h.PositiveSpans) { + c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset + } + } + + return true +} + +func (c *cumulativeBucketIterator) At() Bucket[uint64] { + return Bucket[uint64]{ + Upper: c.currUpper, + Lower: math.Inf(-1), + UpperInclusive: true, + LowerInclusive: true, + Count: c.currCumulativeCount, + Index: c.currIdx - 1, + } +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 9300e2a40c8..9efd942e832 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -17,16 +17,23 @@ import ( "mime" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) // Parser parses samples from a byte slice of samples in the official // Prometheus and OpenMetrics text exposition formats. type Parser interface { - // Series returns the bytes of the series, the timestamp if set, and the value - // of the current sample. + // Series returns the bytes of a series with a simple float64 as a + // value, the timestamp if set, and the value of the current sample. Series() ([]byte, *int64, float64) + // Histogram returns the bytes of a series with a sparse histogram as a + // value, the timestamp if set, and the histogram in the current sample. + // Depending on the parsed input, the function returns an (integer) Histogram + // or a FloatHistogram, with the respective other return value being nil. + Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. @@ -70,22 +77,30 @@ func New(b []byte, contentType string) (Parser, error) { } mediaType, _, err := mime.ParseMediaType(contentType) - if err == nil && mediaType == "application/openmetrics-text" { + if err != nil { + return NewPromParser(b), err + } + switch mediaType { + case "application/openmetrics-text": return NewOpenMetricsParser(b), nil + case "application/vnd.google.protobuf": + return NewProtobufParser(b), nil + default: + return NewPromParser(b), nil } - return NewPromParser(b), err } // Entry represents the type of a parsed entry. type Entry int const ( - EntryInvalid Entry = -1 - EntryType Entry = 0 - EntryHelp Entry = 1 - EntrySeries Entry = 2 - EntryComment Entry = 3 - EntryUnit Entry = 4 + EntryInvalid Entry = -1 + EntryType Entry = 0 + EntryHelp Entry = 1 + EntrySeries Entry = 2 // A series with a simple float64 as value. + EntryComment Entry = 3 + EntryUnit Entry = 4 + EntryHistogram Entry = 5 // A series with a sparse histogram as a value. ) // MetricType represents metric type values. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index be1ffcd0930..932a3d96db1 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -27,6 +27,7 @@ import ( "unicode/utf8" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) @@ -112,6 +113,12 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } +// Histogram always returns (nil, nil, nil, nil) because OpenMetrics does not support +// sparse histograms. +func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 758ea5a66af..a3bb8bb9bfd 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -28,6 +28,7 @@ import ( "unsafe" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) @@ -167,6 +168,12 @@ func (p *PromParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } +// Histogram always returns (nil, nil, nil, nil) because the Prometheus text format +// does not support sparse histograms. +func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go new file mode 100644 index 00000000000..a9c940879eb --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -0,0 +1,518 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "sort" + "strings" + "unicode/utf8" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus +// protobuf format and then present it as it if were parsed by a +// Prometheus-2-style text parser. This is only done so that we can easily plug +// in the protobuf format into Prometheus 2. For future use (with the final +// format that will be used for native histograms), we have to revisit the +// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing +// could be used in a similar fashion (byte-slice pointers into the raw +// payload), which requires some hand-coded protobuf handling. But the current +// parsers all expect the full series name (metric name plus label pairs) as one +// string, which is not how things are represented in the protobuf format. If +// the re-arrangement work is actually causing problems (which has to be seen), +// that expectation needs to be changed. +type ProtobufParser struct { + in []byte // The intput to parse. + inPos int // Position within the input. + metricPos int // Position within Metric slice. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 + // is the count. -1 is the sum. Otherwise it is the index within + // quantiles/buckets. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + // state is marked by the entry we are processing. EntryInvalid implies + // that we have to decode the next MetricFamily. + state Entry + + mf *dto.MetricFamily + + // The following are just shenanigans to satisfy the Parser interface. + metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. +} + +// NewProtobufParser returns a parser for the payload in the byte slice. +func NewProtobufParser(b []byte) Parser { + return &ProtobufParser{ + in: b, + state: EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + } +} + +// Series returns the bytes of a series with a simple float64 as a +// value, the timestamp if set, and the value of the current sample. +func (p *ProtobufParser) Series() ([]byte, *int64, float64) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + v float64 + ) + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + v = m.GetCounter().GetValue() + case dto.MetricType_GAUGE: + v = m.GetGauge().GetValue() + case dto.MetricType_UNTYPED: + v = m.GetUntyped().GetValue() + case dto.MetricType_SUMMARY: + s := m.GetSummary() + switch p.fieldPos { + case -2: + v = float64(s.GetSampleCount()) + case -1: + v = s.GetSampleSum() + // Need to detect a summaries without quantile here. + if len(s.GetQuantile()) == 0 { + p.fieldsDone = true + } + default: + v = s.GetQuantile()[p.fieldPos].GetValue() + } + case dto.MetricType_HISTOGRAM: + // This should only happen for a legacy histogram. + h := m.GetHistogram() + switch p.fieldPos { + case -2: + v = float64(h.GetSampleCount()) + case -1: + v = h.GetSampleSum() + default: + bb := h.GetBucket() + if p.fieldPos >= len(bb) { + v = float64(h.GetSampleCount()) + } else { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } + } + default: + panic("encountered unexpected metric type, this is a bug") + } + if ts != 0 { + return p.metricBytes.Bytes(), &ts, v + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, v +} + +// Histogram returns the bytes of a series with a native histogram as a value, +// the timestamp if set, and the native histogram in the current sample. +// +// The Compact method is called before returning the Histogram (or FloatHistogram). +// +// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0, +// the histogram is parsed and returned as a FloatHistogram and nil is returned +// as the (integer) Histogram return value. Otherwise, it is parsed and returned +// as an (integer) Histogram and nil is returned as the FloatHistogram return +// value. +func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + h = m.GetHistogram() + ) + if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { + // It is a float histogram. + fh := histogram.FloatHistogram{ + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveCount(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeCount(), + } + for i, span := range h.GetPositiveSpan() { + fh.PositiveSpans[i].Offset = span.GetOffset() + fh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + fh.NegativeSpans[i].Offset = span.GetOffset() + fh.NegativeSpans[i].Length = span.GetLength() + } + fh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, nil, &fh + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, nil, &fh + } + + sh := histogram.Histogram{ + Count: h.GetSampleCount(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCount(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveDelta(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeDelta(), + } + for i, span := range h.GetPositiveSpan() { + sh.PositiveSpans[i].Offset = span.GetOffset() + sh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + sh.NegativeSpans[i].Offset = span.GetOffset() + sh.NegativeSpans[i].Length = span.GetLength() + } + sh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, &sh, nil + } + return p.metricBytes.Bytes(), nil, &sh, nil +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Help() ([]byte, []byte) { + return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Type() ([]byte, MetricType) { + n := p.metricBytes.Bytes() + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + return n, MetricTypeCounter + case dto.MetricType_GAUGE: + return n, MetricTypeGauge + case dto.MetricType_HISTOGRAM: + return n, MetricTypeHistogram + case dto.MetricType_SUMMARY: + return n, MetricTypeSummary + } + return n, MetricTypeUnknown +} + +// Unit always returns (nil, nil) because units aren't supported by the protobuf +// format. +func (p *ProtobufParser) Unit() ([]byte, []byte) { + return nil, nil +} + +// Comment always returns nil because comments aren't supported by the protobuf +// format. +func (p *ProtobufParser) Comment() []byte { + return nil +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *ProtobufParser) Metric(l *labels.Labels) string { + *l = append(*l, labels.Label{ + Name: labels.MetricName, + Value: p.getMagicName(), + }) + + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + *l = append(*l, labels.Label{ + Name: lp.GetName(), + Value: lp.GetValue(), + }) + } + if needed, name, value := p.getMagicLabel(); needed { + *l = append(*l, labels.Label{Name: name, Value: value}) + } + + // Sort labels to maintain the sorted labels invariant. + sort.Sort(*l) + + return p.metricBytes.String() +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns if an exemplar exists or not. In case of a native +// histogram, the legacy bucket section is still used for exemplars. To ingest +// all examplars, call the Exemplar method repeatedly until it returns false. +func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + m := p.mf.GetMetric()[p.metricPos] + var exProto *dto.Exemplar + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + exProto = m.GetCounter().GetExemplar() + case dto.MetricType_HISTOGRAM: + bb := m.GetHistogram().GetBucket() + if p.fieldPos < 0 { + if p.state == EntrySeries { + return false // At _count or _sum. + } + p.fieldPos = 0 // Start at 1st bucket for native histograms. + } + for p.fieldPos < len(bb) { + exProto = bb[p.fieldPos].GetExemplar() + if p.state == EntrySeries { + break + } + p.fieldPos++ + if exProto != nil { + break + } + } + default: + return false + } + if exProto == nil { + return false + } + ex.Value = exProto.GetValue() + if ts := exProto.GetTimestamp(); ts != nil { + ex.HasTs = true + ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) + } + for _, lp := range exProto.GetLabel() { + ex.Labels = append(ex.Labels, labels.Label{ + Name: lp.GetName(), + Value: lp.GetValue(), + }) + } + return true +} + +// Next advances the parser to the next "sample" (emulating the behavior of a +// text format parser). It returns (EntryInvalid, io.EOF) if no samples were +// read. +func (p *ProtobufParser) Next() (Entry, error) { + switch p.state { + case EntryInvalid: + p.metricPos = 0 + p.fieldPos = -2 + n, err := readDelimited(p.in[p.inPos:], p.mf) + p.inPos += n + if err != nil { + return p.state, err + } + + // Skip empty metric families. + if len(p.mf.GetMetric()) == 0 { + return p.Next() + } + + // We are at the beginning of a metric family. Put only the name + // into metricBytes and validate only name and help for now. + name := p.mf.GetName() + if !model.IsValidMetricName(model.LabelValue(name)) { + return EntryInvalid, errors.Errorf("invalid metric name: %s", name) + } + if help := p.mf.GetHelp(); !utf8.ValidString(help) { + return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) + } + p.metricBytes.Reset() + p.metricBytes.WriteString(name) + + p.state = EntryHelp + case EntryHelp: + p.state = EntryType + case EntryType: + if p.mf.GetType() == dto.MetricType_HISTOGRAM && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } else { + p.state = EntrySeries + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + case EntryHistogram, EntrySeries: + if p.state == EntrySeries && !p.fieldsDone && + (p.mf.GetType() == dto.MetricType_SUMMARY || p.mf.GetType() == dto.MetricType_HISTOGRAM) { + p.fieldPos++ + } else { + p.metricPos++ + p.fieldPos = -2 + p.fieldsDone = false + } + if p.metricPos >= len(p.mf.GetMetric()) { + p.state = EntryInvalid + return p.Next() + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + default: + return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state) + } + return p.state, nil +} + +func (p *ProtobufParser) updateMetricBytes() error { + b := p.metricBytes + b.Reset() + b.WriteString(p.getMagicName()) + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + b.WriteByte(model.SeparatorByte) + n := lp.GetName() + if !model.LabelName(n).IsValid() { + return errors.Errorf("invalid label name: %s", n) + } + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + v := lp.GetValue() + if !utf8.ValidString(v) { + return errors.Errorf("invalid label value: %s", v) + } + b.WriteString(v) + } + if needed, n, v := p.getMagicLabel(); needed { + b.WriteByte(model.SeparatorByte) + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + b.WriteString(v) + } + return nil +} + +// getMagicName usually just returns p.mf.GetType() but adds a magic suffix +// ("_count", "_sum", "_bucket") if needed according to the current parser +// state. +func (p *ProtobufParser) getMagicName() string { + t := p.mf.GetType() + if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) { + return p.mf.GetName() + } + if p.fieldPos == -2 { + return p.mf.GetName() + "_count" + } + if p.fieldPos == -1 { + return p.mf.GetName() + "_sum" + } + if t == dto.MetricType_HISTOGRAM { + return p.mf.GetName() + "_bucket" + } + return p.mf.GetName() +} + +// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if +// so, its name and value. It also sets p.fieldsDone if applicable. +func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + if p.state == EntryHistogram || p.fieldPos < 0 { + return false, "", "" + } + switch p.mf.GetType() { + case dto.MetricType_SUMMARY: + qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + q := qq[p.fieldPos] + p.fieldsDone = p.fieldPos == len(qq)-1 + return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) + case dto.MetricType_HISTOGRAM: + bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + if p.fieldPos >= len(bb) { + p.fieldsDone = true + return true, model.BucketLabel, "+Inf" + } + b := bb[p.fieldPos] + p.fieldsDone = math.IsInf(b.GetUpperBound(), +1) + return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound()) + } + return false, "", "" +} + +var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") + +// readDelimited is essentially doing what the function of the same name in +// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is +// specific to a MetricFamily, utilizes the more efficient gogo-protobuf +// unmarshaling, and acts on a byte slice directly without any additional +// staging buffers. +func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { + if len(b) == 0 { + return 0, io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return 0, errInvalidVarint + } + totalLength := varIntLength + int(messageLength) + if totalLength > len(b) { + return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + mf.Reset() + return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) +} + +// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// but appends ".0" if the resulting number would otherwise contain neither a +// "." nor an "e". +func formatOpenMetricsFloat(f float64) string { + // A few common cases hardcoded. + switch { + case f == 1: + return "1.0" + case f == 0: + return "0.0" + case f == -1: + return "-1.0" + case math.IsNaN(f): + return "NaN" + case math.IsInf(f, +1): + return "+Inf" + case math.IsInf(f, -1): + return "-Inf" + } + s := fmt.Sprint(f) + if strings.ContainsAny(s, "e.") { + return s + } + return s + ".0" +} + +// isNativeHistogram returns false iff the provided histograms has no sparse +// buckets and a zero threshold of 0 and a zero count of 0. In principle, this +// could still be meant to be a native histogram (with a zero threshold of 0 and +// no observations yet), but for now, we'll treat this case as a conventional +// histogram. +// +// TODO(beorn7): In the final format, there should be an unambiguous way of +// deciding if a histogram should be ingested as a conventional one or a native +// one. +func isNativeHistogram(h *dto.Histogram) bool { + return len(h.GetNegativeDelta()) > 0 || + len(h.GetPositiveDelta()) > 0 || + h.GetZeroCount() > 0 || + h.GetZeroThreshold() > 0 +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/buf.yaml b/vendor/github.com/prometheus/prometheus/prompb/buf.yaml index acd7af9cf42..0f7ec13401b 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/buf.yaml +++ b/vendor/github.com/prometheus/prometheus/prompb/buf.yaml @@ -5,14 +5,17 @@ lint: ENUM_VALUE_PREFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto ENUM_ZERO_VALUE_SUFFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto PACKAGE_DIRECTORY_MATCH: - remote.proto - types.proto PACKAGE_VERSION_SUFFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto deps: - buf.build/gogo/protobuf diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go new file mode 100644 index 00000000000..1be98a2f772 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -0,0 +1,3994 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: io/prometheus/client/metrics.proto + +package io_prometheus_client + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". + MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} + +type LabelPair struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return m.Size() +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type Gauge struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return m.Size() +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Counter struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Quantile struct { + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return m.Size() +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil { + return m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Summary struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return m.Size() +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return m.Size() +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Histogram struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema int32 `protobuf:"zigzag32,5,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` + ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil { + return m.SampleCountFloat + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil { + return m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil { + return m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []*BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []*BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +type Bucket struct { + CumulativeCount uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount,proto3" json:"cumulative_count,omitempty"` + CumulativeCountFloat float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat,proto3" json:"cumulative_count_float,omitempty"` + UpperBound float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound,proto3" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return m.Size() +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil { + return m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil { + return m.CumulativeCountFloat + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil { + return m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{9} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"` + TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{10} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` + Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{11} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return m.Size() +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil { + return m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 894 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdd, 0x6e, 0xe3, 0x44, + 0x18, 0xc5, 0xcd, 0xaf, 0xbf, 0x34, 0xdd, 0xec, 0x50, 0xad, 0xac, 0x42, 0xdb, 0x60, 0x09, 0xa9, + 0x20, 0xe4, 0x08, 0xe8, 0x0a, 0x84, 0xe0, 0xa2, 0xdd, 0xcd, 0x76, 0x91, 0xc8, 0xee, 0x32, 0x49, + 0x2e, 0x16, 0x2e, 0xac, 0x49, 0x3a, 0x4d, 0x2c, 0x3c, 0x1e, 0x63, 0x8f, 0x57, 0x94, 0x17, 0xe0, + 0x9a, 0x57, 0xe0, 0x61, 0x10, 0x97, 0x3c, 0x02, 0x2a, 0x0f, 0x02, 0x9a, 0x3f, 0xbb, 0x59, 0x39, + 0xcb, 0xb2, 0x77, 0x99, 0xe3, 0x73, 0xbe, 0x39, 0x67, 0x3c, 0x39, 0x06, 0x3f, 0xe2, 0xa3, 0x34, + 0xe3, 0x8c, 0x8a, 0x35, 0x2d, 0xf2, 0xd1, 0x32, 0x8e, 0x68, 0x22, 0x46, 0x8c, 0x8a, 0x2c, 0x5a, + 0xe6, 0x41, 0x9a, 0x71, 0xc1, 0xd1, 0x7e, 0xc4, 0x83, 0x8a, 0x13, 0x68, 0xce, 0xc1, 0xf1, 0x8a, + 0xf3, 0x55, 0x4c, 0x47, 0x8a, 0xb3, 0x28, 0xae, 0x46, 0x22, 0x62, 0x34, 0x17, 0x84, 0xa5, 0x5a, + 0xe6, 0xdf, 0x07, 0xf7, 0x1b, 0xb2, 0xa0, 0xf1, 0x33, 0x12, 0x65, 0x08, 0x41, 0x33, 0x21, 0x8c, + 0x7a, 0xce, 0xd0, 0x39, 0x71, 0xb1, 0xfa, 0x8d, 0xf6, 0xa1, 0xf5, 0x82, 0xc4, 0x05, 0xf5, 0x76, + 0x14, 0xa8, 0x17, 0xfe, 0x21, 0xb4, 0x2e, 0x48, 0xb1, 0xba, 0xf5, 0x58, 0x6a, 0x1c, 0xfb, 0xf8, + 0x7b, 0xe8, 0x3c, 0xe0, 0x45, 0x22, 0x68, 0x56, 0x4f, 0x40, 0x5f, 0x40, 0x97, 0xfe, 0x44, 0x59, + 0x1a, 0x93, 0x4c, 0x0d, 0xee, 0x7d, 0x72, 0x14, 0xd4, 0x05, 0x08, 0xc6, 0x86, 0x85, 0x4b, 0xbe, + 0xff, 0x25, 0x74, 0xbf, 0x2d, 0x48, 0x22, 0xa2, 0x98, 0xa2, 0x03, 0xe8, 0xfe, 0x68, 0x7e, 0x9b, + 0x0d, 0xca, 0xf5, 0xa6, 0xf3, 0xd2, 0xda, 0x2f, 0x0e, 0x74, 0xa6, 0x05, 0x63, 0x24, 0xbb, 0x46, + 0xef, 0xc1, 0x6e, 0x4e, 0x58, 0x1a, 0xd3, 0x70, 0x29, 0xdd, 0xaa, 0x09, 0x4d, 0xdc, 0xd3, 0x98, + 0x0a, 0x80, 0x0e, 0x01, 0x0c, 0x25, 0x2f, 0x98, 0x99, 0xe4, 0x6a, 0x64, 0x5a, 0x30, 0x99, 0xa3, + 0xdc, 0xbf, 0x31, 0x6c, 0x6c, 0xcf, 0x61, 0x1d, 0x57, 0xfe, 0xfc, 0x63, 0xe8, 0xcc, 0x13, 0x71, + 0x9d, 0xd2, 0xcb, 0x2d, 0xa7, 0xf8, 0x77, 0x13, 0xdc, 0xc7, 0x51, 0x2e, 0xf8, 0x2a, 0x23, 0xec, + 0x75, 0xcc, 0x7e, 0x04, 0xe8, 0x36, 0x25, 0xbc, 0x8a, 0x39, 0x11, 0x5e, 0x53, 0xcd, 0x1c, 0xdc, + 0x22, 0x3e, 0x92, 0xf8, 0x7f, 0x45, 0x3b, 0x85, 0xf6, 0xa2, 0x58, 0xfe, 0x40, 0x85, 0x09, 0xf6, + 0x6e, 0x7d, 0xb0, 0x73, 0xc5, 0xc1, 0x86, 0x8b, 0xee, 0x41, 0x3b, 0x5f, 0xae, 0x29, 0x23, 0x5e, + 0x6b, 0xe8, 0x9c, 0xdc, 0xc5, 0x66, 0x85, 0xde, 0x87, 0xbd, 0x9f, 0x69, 0xc6, 0x43, 0xb1, 0xce, + 0x68, 0xbe, 0xe6, 0xf1, 0xa5, 0xd7, 0x56, 0x1b, 0xf6, 0x25, 0x3a, 0xb3, 0xa0, 0xf4, 0xa4, 0x68, + 0x3a, 0x62, 0x47, 0x45, 0x74, 0x25, 0xa2, 0x03, 0x9e, 0xc0, 0xa0, 0x7a, 0x6c, 0xe2, 0x75, 0xd5, + 0x9c, 0xbd, 0x92, 0xa4, 0xc3, 0x8d, 0xa1, 0x9f, 0xd0, 0x15, 0x11, 0xd1, 0x0b, 0x1a, 0xe6, 0x29, + 0x49, 0x3c, 0x57, 0x85, 0x18, 0xbe, 0x2a, 0xc4, 0x34, 0x25, 0x09, 0xde, 0xb5, 0x32, 0xb9, 0x92, + 0xb6, 0xcb, 0x31, 0x97, 0x34, 0x16, 0xc4, 0x83, 0x61, 0xe3, 0x04, 0xe1, 0x72, 0xf8, 0x43, 0x09, + 0x6e, 0xd0, 0xb4, 0xf5, 0xde, 0xb0, 0x21, 0xd3, 0x59, 0x54, 0xdb, 0x1f, 0x43, 0x3f, 0xe5, 0x79, + 0x54, 0x99, 0xda, 0x7d, 0x5d, 0x53, 0x56, 0x66, 0x4d, 0x95, 0x63, 0xb4, 0xa9, 0xbe, 0x36, 0x65, + 0xd1, 0xd2, 0x54, 0x49, 0xd3, 0xa6, 0xf6, 0xb4, 0x29, 0x8b, 0x2a, 0x53, 0xfe, 0xef, 0x0e, 0xb4, + 0xf5, 0x56, 0xe8, 0x03, 0x18, 0x2c, 0x0b, 0x56, 0xc4, 0xb7, 0x83, 0xe8, 0x6b, 0x76, 0xa7, 0xc2, + 0x75, 0x94, 0x53, 0xb8, 0xf7, 0x32, 0x75, 0xe3, 0xba, 0xed, 0xbf, 0x24, 0xd0, 0x6f, 0xe5, 0x18, + 0x7a, 0x45, 0x9a, 0xd2, 0x2c, 0x5c, 0xf0, 0x22, 0xb9, 0x34, 0x77, 0x0e, 0x14, 0x74, 0x2e, 0x91, + 0x8d, 0x5e, 0x68, 0xfc, 0xef, 0x5e, 0x80, 0xea, 0xc8, 0xe4, 0x45, 0xe4, 0x57, 0x57, 0x39, 0xd5, + 0x09, 0xee, 0x62, 0xb3, 0x92, 0x78, 0x4c, 0x93, 0x95, 0x58, 0xab, 0xdd, 0xfb, 0xd8, 0xac, 0xfc, + 0x5f, 0x1d, 0xe8, 0xda, 0xa1, 0xe8, 0x3e, 0xb4, 0x62, 0xd9, 0x8a, 0x9e, 0xa3, 0x5e, 0xd0, 0x71, + 0xbd, 0x87, 0xb2, 0x38, 0xb1, 0x66, 0xd7, 0x37, 0x0e, 0xfa, 0x1c, 0xdc, 0xb2, 0x75, 0x4d, 0xa8, + 0x83, 0x40, 0xf7, 0x72, 0x60, 0x7b, 0x39, 0x98, 0x59, 0x06, 0xae, 0xc8, 0xfe, 0x3f, 0x3b, 0xd0, + 0x9e, 0xa8, 0x96, 0x7f, 0x53, 0x47, 0x1f, 0x43, 0x6b, 0x25, 0x7b, 0xda, 0x94, 0xec, 0x3b, 0xf5, + 0x32, 0x55, 0xe5, 0x58, 0x33, 0xd1, 0x67, 0xd0, 0x59, 0xea, 0xee, 0x36, 0x66, 0x0f, 0xeb, 0x45, + 0xa6, 0xe0, 0xb1, 0x65, 0x4b, 0x61, 0xae, 0x8b, 0x55, 0xdd, 0x81, 0xad, 0x42, 0xd3, 0xbe, 0xd8, + 0xb2, 0xa5, 0xb0, 0xd0, 0x45, 0xa8, 0x4a, 0x63, 0xab, 0xd0, 0xb4, 0x25, 0xb6, 0x6c, 0xf4, 0x15, + 0xb8, 0x6b, 0xdb, 0x8f, 0xaa, 0x2c, 0xb6, 0x1e, 0x4c, 0x59, 0xa3, 0xb8, 0x52, 0xc8, 0x46, 0x2d, + 0xcf, 0x3a, 0x64, 0xb9, 0x6a, 0xa4, 0x06, 0xee, 0x95, 0xd8, 0x24, 0xf7, 0x7f, 0x73, 0x60, 0x57, + 0xbf, 0x81, 0x47, 0x84, 0x45, 0xf1, 0x75, 0xed, 0x27, 0x12, 0x41, 0x73, 0x4d, 0xe3, 0xd4, 0x7c, + 0x21, 0xd5, 0x6f, 0x74, 0x0a, 0x4d, 0xe9, 0x51, 0x1d, 0xe1, 0xde, 0xb6, 0x7f, 0xb8, 0x9e, 0x3c, + 0xbb, 0x4e, 0x29, 0x56, 0x6c, 0xd9, 0xb9, 0xfa, 0xab, 0xee, 0x35, 0x5f, 0xd5, 0xb9, 0x5a, 0x87, + 0x0d, 0xf7, 0xc3, 0x05, 0x40, 0x35, 0x09, 0xf5, 0xa0, 0xf3, 0xe0, 0xe9, 0xfc, 0xc9, 0x6c, 0x8c, + 0x07, 0x6f, 0x21, 0x17, 0x5a, 0x17, 0x67, 0xf3, 0x8b, 0xf1, 0xc0, 0x91, 0xf8, 0x74, 0x3e, 0x99, + 0x9c, 0xe1, 0xe7, 0x83, 0x1d, 0xb9, 0x98, 0x3f, 0x99, 0x3d, 0x7f, 0x36, 0x7e, 0x38, 0x68, 0xa0, + 0x3e, 0xb8, 0x8f, 0xbf, 0x9e, 0xce, 0x9e, 0x5e, 0xe0, 0xb3, 0xc9, 0xa0, 0x89, 0xde, 0x86, 0x3b, + 0x4a, 0x13, 0x56, 0x60, 0xeb, 0xdc, 0xff, 0xe3, 0xe6, 0xc8, 0xf9, 0xf3, 0xe6, 0xc8, 0xf9, 0xeb, + 0xe6, 0xc8, 0xf9, 0x6e, 0x3f, 0xe2, 0x61, 0x65, 0x2b, 0xd4, 0xb6, 0x16, 0x6d, 0x75, 0x9b, 0x3f, + 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xbc, 0x25, 0x8b, 0xaf, 0x08, 0x00, 0x00, +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Gauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Quantile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quantile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Quantile != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Summary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Summary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Quantile) > 0 { + for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Quantile[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Untyped) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Untyped) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Untyped) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PositiveCount) > 0 { + for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- { + f2 := math.Float64bits(float64(m.PositiveCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8)) + i-- + dAtA[i] = 0x72 + } + if len(m.PositiveDelta) > 0 { + var j3 int + dAtA5 := make([]byte, len(m.PositiveDelta)*10) + for _, num := range m.PositiveDelta { + x4 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x4 >= 1<<7 { + dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) + j3++ + x4 >>= 7 + } + dAtA5[j3] = uint8(x4) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA5[:j3]) + i = encodeVarintMetrics(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveSpan) > 0 { + for iNdEx := len(m.PositiveSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if len(m.NegativeCount) > 0 { + for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.NegativeCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8)) + i-- + dAtA[i] = 0x5a + } + if len(m.NegativeDelta) > 0 { + var j7 int + dAtA9 := make([]byte, len(m.NegativeDelta)*10) + for _, num := range m.NegativeDelta { + x8 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x8 >= 1<<7 { + dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) + j7++ + x8 >>= 7 + } + dAtA9[j7] = uint8(x8) + j7++ + } + i -= j7 + copy(dAtA[i:], dAtA9[:j7]) + i = encodeVarintMetrics(dAtA, i, uint64(j7)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeSpan) > 0 { + for iNdEx := len(m.NegativeSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.ZeroCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x41 + } + if m.ZeroCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ZeroCount)) + i-- + dAtA[i] = 0x38 + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x31 + } + if m.Schema != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x28 + } + if m.SampleCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if len(m.Bucket) > 0 { + for iNdEx := len(m.Bucket) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bucket[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CumulativeCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CumulativeCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UpperBound != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.UpperBound)))) + i-- + dAtA[i] = 0x11 + } + if m.CumulativeCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.CumulativeCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != nil { + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Histogram != nil { + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.TimestampMs != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x30 + } + if m.Untyped != nil { + { + size, err := m.Untyped.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Summary != nil { + { + size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Counter != nil { + { + size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Gauge != nil { + { + size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricFamily) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricFamily) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metric) > 0 { + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Type != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Gauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Counter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Quantile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Quantile != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Summary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Quantile) > 0 { + for _, e := range m.Quantile { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Untyped) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Bucket) > 0 { + for _, e := range m.Bucket { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.SampleCountFloat != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozMetrics(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != 0 { + n += 1 + sovMetrics(uint64(m.ZeroCount)) + } + if m.ZeroCountFloat != 0 { + n += 9 + } + if len(m.NegativeSpan) > 0 { + for _, e := range m.NegativeSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.NegativeDelta) > 0 { + l = 0 + for _, e := range m.NegativeDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.NegativeCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.NegativeCount)*8)) + len(m.NegativeCount)*8 + } + if len(m.PositiveSpan) > 0 { + for _, e := range m.PositiveSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.PositiveDelta) > 0 { + l = 0 + for _, e := range m.PositiveDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.PositiveCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Bucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CumulativeCount != 0 { + n += 1 + sovMetrics(uint64(m.CumulativeCount)) + } + if m.UpperBound != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CumulativeCountFloat != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozMetrics(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovMetrics(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Gauge != nil { + l = m.Gauge.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Counter != nil { + l = m.Counter.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Summary != nil { + l = m.Summary.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Untyped != nil { + l = m.Untyped.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.TimestampMs != 0 { + n += 1 + sovMetrics(uint64(m.TimestampMs)) + } + if m.Histogram != nil { + l = m.Histogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MetricFamily) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovMetrics(uint64(m.Type)) + } + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quantile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Summary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Summary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Quantile = append(m.Quantile, &Quantile{}) + if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Untyped) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Untyped: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Untyped: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = append(m.Bucket, &Bucket{}) + if err := m.Bucket[len(m.Bucket)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleCountFloat = float64(math.Float64frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType) + } + m.ZeroCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ZeroCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCountFloat = float64(math.Float64frombits(v)) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpan = append(m.NegativeSpan, &BucketSpan{}) + if err := m.NegativeSpan[len(m.NegativeSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDelta) == 0 { + m.NegativeDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDelta", wireType) + } + case 11: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCount) == 0 { + m.NegativeCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCount", wireType) + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpan = append(m.PositiveSpan, &BucketSpan{}) + if err := m.PositiveSpan[len(m.PositiveSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDelta) == 0 { + m.PositiveDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDelta", wireType) + } + case 14: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCount) == 0 { + m.PositiveCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Bucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Bucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCount", wireType) + } + m.CumulativeCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CumulativeCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field UpperBound", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.UpperBound = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.CumulativeCountFloat = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, &LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &types.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, &LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Summary == nil { + m.Summary = &Summary{} + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Untyped == nil { + m.Untyped = &Untyped{} + } + if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricFamily) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricFamily: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = append(m.Metric, &Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto new file mode 100644 index 00000000000..20858f33db1 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -0,0 +1,146 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is copied and lightly edited from +// github.com/prometheus/client_model/io/prometheus/client/metrics.proto +// and finally converted to proto3 syntax to make it usable for the +// gogo-protobuf approach taken within prometheus/prometheus. + +syntax = "proto3"; + +package io.prometheus.client; +option go_package = "io_prometheus_client"; + +import "google/protobuf/timestamp.proto"; + +message LabelPair { + string name = 1; + string value = 2; +} + +enum MetricType { + // COUNTER must use the Metric field "counter". + COUNTER = 0; + // GAUGE must use the Metric field "gauge". + GAUGE = 1; + // SUMMARY must use the Metric field "summary". + SUMMARY = 2; + // UNTYPED must use the Metric field "untyped". + UNTYPED = 3; + // HISTOGRAM must use the Metric field "histogram". + HISTOGRAM = 4; + // GAUGE_HISTOGRAM must use the Metric field "histogram". + GAUGE_HISTOGRAM = 5; +} + +message Gauge { + double value = 1; +} + +message Counter { + double value = 1; + Exemplar exemplar = 2; +} + +message Quantile { + double quantile = 1; + double value = 2; +} + +message Summary { + uint64 sample_count = 1; + double sample_sum = 2; + repeated Quantile quantile = 3; +} + +message Untyped { + double value = 1; +} + +message Histogram { + uint64 sample_count = 1; + double sample_count_float = 4; // Overrides sample_count if > 0. + double sample_sum = 2; + // Buckets for the conventional histogram. + repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional. + + // Everything below here is for native histograms (also known as sparse histograms). + // Native histograms are an experimental feature without stability guarantees. + + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + sint32 schema = 5; + double zero_threshold = 6; // Breadth of the zero bucket. + uint64 zero_count = 7; // Count in zero bucket. + double zero_count_float = 8; // Overrides sb_zero_count if > 0. + + // Negative buckets for the native histogram. + repeated BucketSpan negative_span = 9; + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_count = 11; // Absolute count of each bucket. + + // Positive buckets for the native histogram. + repeated BucketSpan positive_span = 12; + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_count = 14; // Absolute count of each bucket. +} + +message Bucket { + uint64 cumulative_count = 1; // Cumulative in increasing order. + double cumulative_count_float = 4; // Overrides cumulative_count if > 0. + double upper_bound = 2; // Inclusive. + Exemplar exemplar = 3; +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + +message Exemplar { + repeated LabelPair label = 1; + double value = 2; + google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style. +} + +message Metric { + repeated LabelPair label = 1; + Gauge gauge = 2; + Counter counter = 3; + Summary summary = 4; + Untyped untyped = 5; + Histogram histogram = 7; + int64 timestamp_ms = 6; +} + +message MetricFamily { + string name = 1; + string help = 2; + MetricType type = 3; + repeated Metric metric = 4; +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go index b3cf44884f9..19318878d75 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go @@ -34,8 +34,10 @@ const ( // Content-Type: "application/x-protobuf" // Content-Encoding: "snappy" ReadRequest_SAMPLES ReadRequest_ResponseType = 0 - // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. // // Response headers: // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.proto b/vendor/github.com/prometheus/prometheus/prompb/remote.proto index 70c6dd3fbbc..b4f82f5f9d7 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.proto @@ -39,8 +39,10 @@ message ReadRequest { // Content-Type: "application/x-protobuf" // Content-Encoding: "snappy" SAMPLES = 0; - // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. // // Response headers: // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index e91dd55c4d6..a4c6b332bba 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -68,6 +68,37 @@ func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_d938547f84707355, []int{0, 0} } +type Histogram_ResetHint int32 + +const ( + Histogram_UNKNOWN Histogram_ResetHint = 0 + Histogram_YES Histogram_ResetHint = 1 + Histogram_NO Histogram_ResetHint = 2 + Histogram_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "UNKNOWN", + 1: "YES", + 2: "NO", + 3: "GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "UNKNOWN": 0, + "YES": 1, + "NO": 2, + "GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3, 0} +} + type LabelMatcher_Type int32 const ( @@ -96,25 +127,28 @@ func (x LabelMatcher_Type) String() string { } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6, 0} + return fileDescriptor_d938547f84707355, []int{8, 0} } // We require this to match chunkenc.Encoding. type Chunk_Encoding int32 const ( - Chunk_UNKNOWN Chunk_Encoding = 0 - Chunk_XOR Chunk_Encoding = 1 + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 ) var Chunk_Encoding_name = map[int32]string{ 0: "UNKNOWN", 1: "XOR", + 2: "HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "UNKNOWN": 0, - "XOR": 1, + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, } func (x Chunk_Encoding) String() string { @@ -122,7 +156,7 @@ func (x Chunk_Encoding) String() string { } func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8, 0} + return fileDescriptor_d938547f84707355, []int{10, 0} } type MetricMetadata struct { @@ -321,23 +355,324 @@ func (m *Exemplar) GetTimestamp() int64 { return 0 } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []*BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []*BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_UNKNOWN +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + // TimeSeries represents samples and labels for a single time series. type TimeSeries struct { // For a timeseries to be valid, and for the samples and exemplars // to be ingested by the remote system properly, the labels field is required. - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` - Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} + return fileDescriptor_d938547f84707355, []int{5} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,6 +722,13 @@ func (m *TimeSeries) GetExemplars() []Exemplar { return nil } +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + type Label struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -399,7 +741,7 @@ func (m *Label) Reset() { *m = Label{} } func (m *Label) String() string { return proto.CompactTextString(m) } func (*Label) ProtoMessage() {} func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4} + return fileDescriptor_d938547f84707355, []int{6} } func (m *Label) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -453,7 +795,7 @@ func (m *Labels) Reset() { *m = Labels{} } func (m *Labels) String() string { return proto.CompactTextString(m) } func (*Labels) ProtoMessage() {} func (*Labels) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} + return fileDescriptor_d938547f84707355, []int{7} } func (m *Labels) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -503,7 +845,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6} + return fileDescriptor_d938547f84707355, []int{8} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -570,7 +912,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} } func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (*ReadHints) ProtoMessage() {} func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{7} + return fileDescriptor_d938547f84707355, []int{9} } func (m *ReadHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,7 +1006,7 @@ func (m *Chunk) Reset() { *m = Chunk{} } func (m *Chunk) String() string { return proto.CompactTextString(m) } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8} + return fileDescriptor_d938547f84707355, []int{10} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -736,7 +1078,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{9} + return fileDescriptor_d938547f84707355, []int{11} } func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -781,11 +1123,14 @@ func (m *ChunkedSeries) GetChunks() []Chunk { func init() { proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("prometheus.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") proto.RegisterType((*Sample)(nil), "prometheus.Sample") proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar") + proto.RegisterType((*Histogram)(nil), "prometheus.Histogram") + proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*Label)(nil), "prometheus.Label") proto.RegisterType((*Labels)(nil), "prometheus.Labels") @@ -798,53 +1143,75 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 734 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcb, 0x6e, 0xdb, 0x46, - 0x14, 0xf5, 0xf0, 0x29, 0x5e, 0xd9, 0x02, 0x3d, 0x50, 0x51, 0xd6, 0x68, 0x55, 0x81, 0x40, 0x01, - 0x2d, 0x0a, 0x19, 0x76, 0x37, 0x35, 0xd0, 0x8d, 0x6c, 0xd0, 0x0f, 0xd4, 0x94, 0xe0, 0x91, 0x84, - 0x3e, 0x36, 0xc2, 0x48, 0x1a, 0x4b, 0x44, 0xc4, 0x47, 0x38, 0x54, 0x60, 0x7d, 0x48, 0x76, 0xf9, - 0x83, 0x20, 0x8b, 0xfc, 0x85, 0x97, 0xf9, 0x82, 0x20, 0xf0, 0x97, 0x04, 0x33, 0xa4, 0x4c, 0x29, - 0x4e, 0x16, 0xce, 0xee, 0xde, 0x7b, 0xce, 0xb9, 0x8f, 0xb9, 0x97, 0x84, 0x6a, 0xb6, 0x4a, 0x18, - 0x6f, 0x27, 0x69, 0x9c, 0xc5, 0x18, 0x92, 0x34, 0x0e, 0x59, 0x36, 0x67, 0x4b, 0x7e, 0x50, 0x9f, - 0xc5, 0xb3, 0x58, 0x86, 0x0f, 0x85, 0x95, 0x33, 0xdc, 0x37, 0x0a, 0xd4, 0x7c, 0x96, 0xa5, 0xc1, - 0xc4, 0x67, 0x19, 0x9d, 0xd2, 0x8c, 0xe2, 0x13, 0xd0, 0x44, 0x0e, 0x07, 0x35, 0x51, 0xab, 0x76, - 0xfc, 0x5b, 0xbb, 0xcc, 0xd1, 0xde, 0x66, 0x16, 0xee, 0x60, 0x95, 0x30, 0x22, 0x25, 0xf8, 0x77, - 0xc0, 0xa1, 0x8c, 0x8d, 0x6e, 0x69, 0x18, 0x2c, 0x56, 0xa3, 0x88, 0x86, 0xcc, 0x51, 0x9a, 0xa8, - 0x65, 0x11, 0x3b, 0x47, 0xce, 0x25, 0xd0, 0xa5, 0x21, 0xc3, 0x18, 0xb4, 0x39, 0x5b, 0x24, 0x8e, - 0x26, 0x71, 0x69, 0x8b, 0xd8, 0x32, 0x0a, 0x32, 0x47, 0xcf, 0x63, 0xc2, 0x76, 0x57, 0x00, 0x65, - 0x25, 0x5c, 0x05, 0x73, 0xd8, 0xfd, 0xbb, 0xdb, 0xfb, 0xa7, 0x6b, 0xef, 0x08, 0xe7, 0xac, 0x37, - 0xec, 0x0e, 0x3c, 0x62, 0x23, 0x6c, 0x81, 0x7e, 0xd1, 0x19, 0x5e, 0x78, 0xb6, 0x82, 0xf7, 0xc0, - 0xba, 0xbc, 0xea, 0x0f, 0x7a, 0x17, 0xa4, 0xe3, 0xdb, 0x2a, 0xc6, 0x50, 0x93, 0x48, 0x19, 0xd3, - 0x84, 0xb4, 0x3f, 0xf4, 0xfd, 0x0e, 0xf9, 0xcf, 0xd6, 0x71, 0x05, 0xb4, 0xab, 0xee, 0x79, 0xcf, - 0x36, 0xf0, 0x2e, 0x54, 0xfa, 0x83, 0xce, 0xc0, 0xeb, 0x7b, 0x03, 0xdb, 0x74, 0xff, 0x02, 0xa3, - 0x4f, 0xc3, 0x64, 0xc1, 0x70, 0x1d, 0xf4, 0x57, 0x74, 0xb1, 0xcc, 0x9f, 0x05, 0x91, 0xdc, 0xc1, - 0x3f, 0x83, 0x95, 0x05, 0x21, 0xe3, 0x19, 0x0d, 0x13, 0x39, 0xa7, 0x4a, 0xca, 0x80, 0x1b, 0x43, - 0xc5, 0xbb, 0x63, 0x61, 0xb2, 0xa0, 0x29, 0x3e, 0x04, 0x63, 0x41, 0xc7, 0x6c, 0xc1, 0x1d, 0xd4, - 0x54, 0x5b, 0xd5, 0xe3, 0xfd, 0xcd, 0x77, 0xbd, 0x16, 0xc8, 0xa9, 0x76, 0xff, 0xf1, 0xd7, 0x1d, - 0x52, 0xd0, 0xca, 0x82, 0xca, 0x37, 0x0b, 0xaa, 0x5f, 0x16, 0x7c, 0x8b, 0x00, 0x06, 0x41, 0xc8, - 0xfa, 0x2c, 0x0d, 0x18, 0x7f, 0x7e, 0xcd, 0x63, 0x30, 0xb9, 0x1c, 0x97, 0x3b, 0x8a, 0x54, 0xe0, - 0x4d, 0x45, 0xfe, 0x12, 0x85, 0x64, 0x4d, 0xc4, 0x7f, 0x82, 0xc5, 0x8a, 0x21, 0xb9, 0xa3, 0x4a, - 0x55, 0x7d, 0x53, 0xb5, 0x7e, 0x81, 0x42, 0x57, 0x92, 0xdd, 0x23, 0xd0, 0x65, 0x13, 0x62, 0xe9, - 0xf2, 0x50, 0x50, 0xbe, 0x74, 0x61, 0x6f, 0x8f, 0x6f, 0x15, 0xe3, 0xbb, 0x27, 0x60, 0x5c, 0xe7, - 0xad, 0x3e, 0x77, 0x36, 0xf7, 0x35, 0x82, 0x5d, 0x19, 0xf7, 0x69, 0x36, 0x99, 0xb3, 0x14, 0x1f, - 0x6d, 0xdd, 0xf9, 0x2f, 0x4f, 0xf4, 0x05, 0xaf, 0xbd, 0x71, 0xdf, 0xeb, 0x46, 0x95, 0xaf, 0x35, - 0xaa, 0x6e, 0x36, 0xda, 0x02, 0x4d, 0x5e, 0xab, 0x01, 0x8a, 0x77, 0x63, 0xef, 0x60, 0x13, 0xd4, - 0xae, 0x77, 0x63, 0x23, 0x11, 0x20, 0xe2, 0x42, 0x45, 0x80, 0x78, 0xb6, 0xea, 0xbe, 0x47, 0x60, - 0x11, 0x46, 0xa7, 0x97, 0x41, 0x94, 0x71, 0xfc, 0x23, 0x98, 0x3c, 0x63, 0xc9, 0x28, 0xe4, 0xb2, - 0x2f, 0x95, 0x18, 0xc2, 0xf5, 0xb9, 0x28, 0x7d, 0xbb, 0x8c, 0x26, 0xeb, 0xd2, 0xc2, 0xc6, 0x3f, - 0x41, 0x85, 0x67, 0x34, 0xcd, 0x04, 0x3b, 0xbf, 0x05, 0x53, 0xfa, 0x3e, 0xc7, 0x3f, 0x80, 0xc1, - 0xa2, 0xa9, 0x00, 0x34, 0x09, 0xe8, 0x2c, 0x9a, 0xfa, 0x1c, 0x1f, 0x40, 0x65, 0x96, 0xc6, 0xcb, - 0x24, 0x88, 0x66, 0x8e, 0xde, 0x54, 0x5b, 0x16, 0x79, 0xf4, 0x71, 0x0d, 0x94, 0xf1, 0xca, 0x31, - 0x9a, 0xa8, 0x55, 0x21, 0xca, 0x78, 0x25, 0xb2, 0xa7, 0x34, 0x9a, 0x31, 0x91, 0xc4, 0xcc, 0xb3, - 0x4b, 0xdf, 0xe7, 0xee, 0x3b, 0x04, 0xfa, 0xd9, 0x7c, 0x19, 0xbd, 0xc0, 0x0d, 0xa8, 0x86, 0x41, - 0x34, 0x12, 0x27, 0x58, 0xf6, 0x6c, 0x85, 0x41, 0x24, 0xce, 0xd0, 0xe7, 0x12, 0xa7, 0x77, 0x8f, - 0x78, 0xf1, 0x89, 0x84, 0xf4, 0xae, 0xc0, 0xdb, 0xc5, 0x12, 0x54, 0xb9, 0x84, 0x83, 0xcd, 0x25, - 0xc8, 0x02, 0x6d, 0x2f, 0x9a, 0xc4, 0xd3, 0x20, 0x9a, 0x95, 0x1b, 0x10, 0xbf, 0x1e, 0x39, 0xd5, - 0x2e, 0x91, 0xb6, 0xdb, 0x84, 0xca, 0x9a, 0xb5, 0xfd, 0x77, 0x30, 0x41, 0xfd, 0xb7, 0x47, 0x6c, - 0xe4, 0xbe, 0x84, 0x3d, 0x99, 0x8d, 0x4d, 0xbf, 0xf7, 0xcb, 0x38, 0x04, 0x63, 0x22, 0x32, 0xac, - 0x3f, 0x8c, 0xfd, 0x27, 0x9d, 0xae, 0x05, 0x39, 0xed, 0xb4, 0x7e, 0xff, 0xd0, 0x40, 0x1f, 0x1e, - 0x1a, 0xe8, 0xd3, 0x43, 0x03, 0xfd, 0x6f, 0x08, 0x76, 0x32, 0x1e, 0x1b, 0xf2, 0xaf, 0xfb, 0xc7, - 0xe7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa3, 0x6c, 0x23, 0xa6, 0x05, 0x00, 0x00, + // 1075 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0x5e, 0xdb, 0x89, 0x13, 0x9f, 0xfc, 0xd4, 0x3b, 0xda, 0x16, 0x53, 0xd1, 0x6d, 0xb0, 0x54, + 0x88, 0x10, 0xca, 0xaa, 0x85, 0x0b, 0x2a, 0x0a, 0xd2, 0x6e, 0xc9, 0xfe, 0x88, 0x26, 0x51, 0x27, + 0x59, 0x41, 0xb9, 0x89, 0x66, 0x93, 0xd9, 0xc4, 0xaa, 0xff, 0xf0, 0x4c, 0xaa, 0x0d, 0xef, 0xc1, + 0x1d, 0x2f, 0xc1, 0x3d, 0x12, 0xb7, 0xbd, 0xe4, 0x09, 0x10, 0xda, 0x2b, 0x1e, 0x03, 0xcd, 0xb1, + 0x1d, 0x3b, 0xdd, 0x82, 0x54, 0xee, 0xe6, 0x7c, 0xe7, 0x3b, 0x33, 0x9f, 0xe7, 0xfc, 0x8c, 0xa1, + 0x21, 0xd7, 0x31, 0x17, 0xbd, 0x38, 0x89, 0x64, 0x44, 0x20, 0x4e, 0xa2, 0x80, 0xcb, 0x25, 0x5f, + 0x89, 0xbb, 0x7b, 0x8b, 0x68, 0x11, 0x21, 0x7c, 0xa0, 0x56, 0x29, 0xc3, 0xfd, 0x45, 0x87, 0xf6, + 0x80, 0xcb, 0xc4, 0x9b, 0x0d, 0xb8, 0x64, 0x73, 0x26, 0x19, 0x79, 0x0c, 0x15, 0xb5, 0x87, 0xa3, + 0x75, 0xb4, 0x6e, 0xfb, 0xd1, 0x83, 0x5e, 0xb1, 0x47, 0x6f, 0x9b, 0x99, 0x99, 0x93, 0x75, 0xcc, + 0x29, 0x86, 0x90, 0x4f, 0x81, 0x04, 0x88, 0x4d, 0x2f, 0x59, 0xe0, 0xf9, 0xeb, 0x69, 0xc8, 0x02, + 0xee, 0xe8, 0x1d, 0xad, 0x6b, 0x51, 0x3b, 0xf5, 0x1c, 0xa3, 0x63, 0xc8, 0x02, 0x4e, 0x08, 0x54, + 0x96, 0xdc, 0x8f, 0x9d, 0x0a, 0xfa, 0x71, 0xad, 0xb0, 0x55, 0xe8, 0x49, 0xa7, 0x9a, 0x62, 0x6a, + 0xed, 0xae, 0x01, 0x8a, 0x93, 0x48, 0x03, 0x6a, 0xe7, 0xc3, 0x6f, 0x87, 0xa3, 0xef, 0x86, 0xf6, + 0x8e, 0x32, 0x9e, 0x8e, 0xce, 0x87, 0x93, 0x3e, 0xb5, 0x35, 0x62, 0x41, 0xf5, 0xe4, 0xf0, 0xfc, + 0xa4, 0x6f, 0xeb, 0xa4, 0x05, 0xd6, 0xe9, 0xd9, 0x78, 0x32, 0x3a, 0xa1, 0x87, 0x03, 0xdb, 0x20, + 0x04, 0xda, 0xe8, 0x29, 0xb0, 0x8a, 0x0a, 0x1d, 0x9f, 0x0f, 0x06, 0x87, 0xf4, 0x85, 0x5d, 0x25, + 0x75, 0xa8, 0x9c, 0x0d, 0x8f, 0x47, 0xb6, 0x49, 0x9a, 0x50, 0x1f, 0x4f, 0x0e, 0x27, 0xfd, 0x71, + 0x7f, 0x62, 0xd7, 0xdc, 0x27, 0x60, 0x8e, 0x59, 0x10, 0xfb, 0x9c, 0xec, 0x41, 0xf5, 0x15, 0xf3, + 0x57, 0xe9, 0xb5, 0x68, 0x34, 0x35, 0xc8, 0x07, 0x60, 0x49, 0x2f, 0xe0, 0x42, 0xb2, 0x20, 0xc6, + 0xef, 0x34, 0x68, 0x01, 0xb8, 0x11, 0xd4, 0xfb, 0x57, 0x3c, 0x88, 0x7d, 0x96, 0x90, 0x03, 0x30, + 0x7d, 0x76, 0xc1, 0x7d, 0xe1, 0x68, 0x1d, 0xa3, 0xdb, 0x78, 0xb4, 0x5b, 0xbe, 0xd7, 0x67, 0xca, + 0x73, 0x54, 0x79, 0xfd, 0xe7, 0xfd, 0x1d, 0x9a, 0xd1, 0x8a, 0x03, 0xf5, 0x7f, 0x3d, 0xd0, 0x78, + 0xf3, 0xc0, 0xdf, 0xab, 0x60, 0x9d, 0x7a, 0x42, 0x46, 0x8b, 0x84, 0x05, 0xe4, 0x1e, 0x58, 0xb3, + 0x68, 0x15, 0xca, 0xa9, 0x17, 0x4a, 0x94, 0x5d, 0x39, 0xdd, 0xa1, 0x75, 0x84, 0xce, 0x42, 0x49, + 0x3e, 0x84, 0x46, 0xea, 0xbe, 0xf4, 0x23, 0x26, 0xd3, 0x63, 0x4e, 0x77, 0x28, 0x20, 0x78, 0xac, + 0x30, 0x62, 0x83, 0x21, 0x56, 0x01, 0x9e, 0xa3, 0x51, 0xb5, 0x24, 0x77, 0xc0, 0x14, 0xb3, 0x25, + 0x0f, 0x18, 0x66, 0x6d, 0x97, 0x66, 0x16, 0x79, 0x00, 0xed, 0x9f, 0x78, 0x12, 0x4d, 0xe5, 0x32, + 0xe1, 0x62, 0x19, 0xf9, 0x73, 0xcc, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20, 0xf9, 0x28, 0xa3, + 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x9f, 0xe6, 0xda, 0x3e, 0x01, 0xbb, 0xc4, 0x4b, + 0x05, 0xd6, 0x50, 0xa0, 0x46, 0xdb, 0x1b, 0x66, 0x2a, 0xf2, 0x2b, 0x68, 0x87, 0x7c, 0xc1, 0xa4, + 0xf7, 0x8a, 0x4f, 0x45, 0xcc, 0x42, 0xe1, 0xd4, 0xf1, 0x86, 0xef, 0x94, 0x6f, 0xf8, 0x68, 0x35, + 0x7b, 0xc9, 0xe5, 0x38, 0x66, 0x21, 0x6d, 0xe5, 0x6c, 0x65, 0x09, 0xf2, 0x31, 0xdc, 0xda, 0x84, + 0xcf, 0xb9, 0x2f, 0x99, 0x70, 0xac, 0x8e, 0xd1, 0x25, 0x74, 0xb3, 0xeb, 0x37, 0x88, 0x6e, 0x11, + 0x51, 0x97, 0x70, 0xa0, 0x63, 0x74, 0xb5, 0x82, 0x88, 0xa2, 0x84, 0x12, 0x14, 0x47, 0xc2, 0x2b, + 0x09, 0x6a, 0xfc, 0xb7, 0xa0, 0x9c, 0xbd, 0x11, 0xb4, 0x09, 0xcf, 0x04, 0x35, 0x53, 0x41, 0x39, + 0x5c, 0x08, 0xda, 0x10, 0x33, 0x41, 0xad, 0x54, 0x50, 0x0e, 0x67, 0x82, 0xbe, 0x06, 0x48, 0xb8, + 0xe0, 0x72, 0xba, 0x54, 0x37, 0xde, 0xc6, 0xbe, 0xbe, 0x5f, 0x16, 0xb3, 0xa9, 0x99, 0x1e, 0x55, + 0xbc, 0x53, 0x2f, 0x94, 0xd4, 0x4a, 0xf2, 0xe5, 0x76, 0xd1, 0xdd, 0x7a, 0xb3, 0xe8, 0x3e, 0x07, + 0x6b, 0x13, 0xb5, 0xdd, 0x9d, 0x35, 0x30, 0x5e, 0xf4, 0xc7, 0xb6, 0x46, 0x4c, 0xd0, 0x87, 0x23, + 0x5b, 0x2f, 0x3a, 0xd4, 0x38, 0xaa, 0x41, 0x15, 0x35, 0x1f, 0x35, 0x01, 0x8a, 0x54, 0xbb, 0x4f, + 0x00, 0x8a, 0x9b, 0x51, 0xd5, 0x16, 0x5d, 0x5e, 0x0a, 0x9e, 0x96, 0xef, 0x2e, 0xcd, 0x2c, 0x85, + 0xfb, 0x3c, 0x5c, 0xc8, 0x25, 0x56, 0x6d, 0x8b, 0x66, 0x96, 0xfb, 0xb7, 0x06, 0x30, 0xf1, 0x02, + 0x3e, 0xe6, 0x89, 0xc7, 0xc5, 0xbb, 0xf7, 0xdc, 0x23, 0xa8, 0x09, 0x6c, 0x77, 0xe1, 0xe8, 0x18, + 0x41, 0xca, 0x11, 0xe9, 0x24, 0xc8, 0x42, 0x72, 0x22, 0xf9, 0x02, 0x2c, 0x9e, 0x35, 0xb9, 0x70, + 0x0c, 0x8c, 0xda, 0x2b, 0x47, 0xe5, 0x13, 0x20, 0x8b, 0x2b, 0xc8, 0xe4, 0x4b, 0x80, 0x65, 0x7e, + 0xf1, 0xc2, 0xa9, 0x60, 0xe8, 0xed, 0xb7, 0xa6, 0x25, 0x8b, 0x2d, 0xd1, 0xdd, 0x87, 0x50, 0xc5, + 0x2f, 0x50, 0x13, 0x13, 0xa7, 0xac, 0x96, 0x4e, 0x4c, 0xb5, 0xde, 0x9e, 0x1d, 0x56, 0x36, 0x3b, + 0xdc, 0xc7, 0x60, 0x3e, 0x4b, 0xbf, 0xf3, 0x5d, 0x2f, 0xc6, 0xfd, 0x59, 0x83, 0x26, 0xe2, 0x03, + 0x26, 0x67, 0x4b, 0x9e, 0x90, 0x87, 0x5b, 0x8f, 0xc4, 0xbd, 0x1b, 0xf1, 0x19, 0xaf, 0x57, 0x7a, + 0x1c, 0x72, 0xa1, 0xfa, 0xdb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8, 0xe0, 0xa8, 0x37, 0x41, 0xef, + 0x3f, 0x4f, 0xeb, 0x68, 0xd8, 0x7f, 0x9e, 0xd6, 0x11, 0x55, 0xe3, 0x5d, 0x01, 0xb4, 0x6f, 0x1b, + 0xee, 0xaf, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0x7b, 0x50, 0x13, 0x92, 0xc7, 0xd3, + 0x40, 0xa0, 0x2e, 0x83, 0x9a, 0xca, 0x1c, 0x08, 0x75, 0xf4, 0xe5, 0x2a, 0x9c, 0xe5, 0x47, 0xab, + 0x35, 0x79, 0x1f, 0xea, 0x42, 0xb2, 0x44, 0x2a, 0x76, 0x3a, 0x48, 0x6b, 0x68, 0x0f, 0x04, 0xb9, + 0x0d, 0x26, 0x0f, 0xe7, 0x53, 0x4c, 0x8a, 0x72, 0x54, 0x79, 0x38, 0x1f, 0x08, 0x72, 0x17, 0xea, + 0x8b, 0x24, 0x5a, 0xc5, 0x5e, 0xb8, 0x70, 0xaa, 0x1d, 0xa3, 0x6b, 0xd1, 0x8d, 0x4d, 0xda, 0xa0, + 0x5f, 0xac, 0x71, 0x98, 0xd5, 0xa9, 0x7e, 0xb1, 0x56, 0xbb, 0x27, 0x2c, 0x5c, 0x70, 0xb5, 0x49, + 0x2d, 0xdd, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd3, 0xa0, 0xfa, 0x74, 0xb9, 0x0a, 0x5f, 0x92, 0x7d, + 0x68, 0x04, 0x5e, 0x38, 0x55, 0xad, 0x54, 0x68, 0xb6, 0x02, 0x2f, 0x54, 0x35, 0x3c, 0x10, 0xe8, + 0x67, 0x57, 0x1b, 0x7f, 0xf6, 0xbe, 0x04, 0xec, 0x2a, 0xf3, 0xf7, 0xb2, 0x24, 0x18, 0x98, 0x84, + 0xbb, 0xe5, 0x24, 0xe0, 0x01, 0xbd, 0x7e, 0x38, 0x8b, 0xe6, 0x5e, 0xb8, 0x28, 0x32, 0xa0, 0xde, + 0x6d, 0xfc, 0xaa, 0x26, 0xc5, 0xb5, 0x7b, 0x00, 0xf5, 0x9c, 0x75, 0xa3, 0x79, 0xbf, 0x1f, 0xa9, + 0x67, 0x75, 0xeb, 0x2d, 0xd5, 0xdd, 0x1f, 0xa1, 0x85, 0x9b, 0xf3, 0xf9, 0xff, 0xed, 0xb2, 0x03, + 0x30, 0x67, 0x6a, 0x87, 0xbc, 0xc9, 0x76, 0x6f, 0x08, 0xcf, 0x03, 0x52, 0xda, 0xd1, 0xde, 0xeb, + 0xeb, 0x7d, 0xed, 0x8f, 0xeb, 0x7d, 0xed, 0xaf, 0xeb, 0x7d, 0xed, 0x07, 0x53, 0xb1, 0xe3, 0x8b, + 0x0b, 0x13, 0xff, 0x60, 0x3e, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0x36, 0xd7, 0x1e, 0xb4, 0xf2, + 0x08, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -990,7 +1357,7 @@ func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { +func (m *Histogram) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1000,12 +1367,12 @@ func (m *TimeSeries) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1014,10 +1381,49 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1025,13 +1431,42 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x5a } } - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintTypes(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1039,48 +1474,239 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x42 } } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err } - i-- - dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *Label) Marshal() (dAtA []byte, err error) { +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Label) MarshalTo(dAtA []byte) (int, error) { +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } if len(m.Value) > 0 { @@ -1444,6 +2070,125 @@ func (m *Exemplar) Size() (n int) { return n } +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *TimeSeries) Size() (n int) { if m == nil { return 0 @@ -1468,6 +2213,12 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1989,7 +2740,7 @@ func (m *Exemplar) Unmarshal(dAtA []byte) error { } return nil } -func (m *TimeSeries) Unmarshal(dAtA []byte) error { +func (m *Histogram) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2012,17 +2763,17 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) } - var msglen int + var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2032,26 +2783,643 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, Label{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) } - iNdEx = postIndex + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) @@ -2120,6 +3488,40 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index 6ba7074a5d3..e6a1e107c9e 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -54,13 +54,79 @@ message Exemplar { int64 timestamp = 3; } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + // TimeSeries represents samples and labels for a single time series. message TimeSeries { // For a timeseries to be valid, and for the samples and exemplars // to be ingested by the remote system properly, the labels field is required. - repeated Label labels = 1 [(gogoproto.nullable) = false]; - repeated Sample samples = 2 [(gogoproto.nullable) = false]; - repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; } message Label { @@ -103,8 +169,9 @@ message Chunk { // We require this to match chunkenc.Encoding. enum Encoding { - UNKNOWN = 0; - XOR = 1; + UNKNOWN = 0; + XOR = 1; + HISTOGRAM = 2; } Encoding type = 3; bytes data = 4; diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index b6842e8f47d..a2b43848932 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -35,12 +35,15 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/stats" ) @@ -197,7 +200,6 @@ func (q *query) Exec(ctx context.Context) *Result { // Exec query. res, warnings, err := q.ng.exec(ctx, q) - return &Result{Err: err, Value: res, Warnings: warnings} } @@ -676,7 +678,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval for i, s := range mat { // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. - vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}} + vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, H: s.Points[0].H, T: start}} } return vector, warnings, nil case parser.ValueTypeScalar: @@ -823,6 +825,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS return start, end } +func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { + var interval time.Duration + for _, node := range path { + switch n := node.(type) { + case *parser.SubqueryExpr: + interval = n.Step + if n.Step == 0 { + interval = time.Duration(ng.noStepSubqueryIntervalFn(durationMilliseconds(n.Range))) * time.Millisecond + } + } + } + return interval +} + func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets @@ -833,10 +849,14 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { switch n := node.(type) { case *parser.VectorSelector: start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + interval := ng.getLastSubqueryInterval(path) + if interval == 0 { + interval = s.Interval + } hints := &storage.SelectHints{ Start: start, End: end, - Step: durationMilliseconds(s.Interval), + Step: durationMilliseconds(interval), Range: durationMilliseconds(evalRange), Func: extractFuncFromPath(path), } @@ -962,8 +982,10 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error case errWithWarnings: *errp = err.err *ws = append(*ws, err.warnings...) + case error: + *errp = err default: - *errp = e.(error) + *errp = fmt.Errorf("%v", err) } } @@ -992,7 +1014,7 @@ type EvalNodeHelper struct { // Caches. // DropMetricName and label_*. Dmn map[uint64]labels.Labels - // funcHistogramQuantile. + // funcHistogramQuantile for conventional histograms. signatureToMetricWithBuckets map[string]*metricWithBuckets // label_replace. regex *regexp.Regexp @@ -1243,7 +1265,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.AggregateExpr: // Grouping labels must be sorted (expected both by generateGroupingKey() and aggregation()). sortedGrouping := e.Grouping - sort.Strings(sortedGrouping) + slices.Sort(sortedGrouping) // Prepare a function to initialise series helpers with the grouping key. buf := make([]byte, 0, 1024) @@ -1409,7 +1431,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points))) enh.Out = outVec[:0] if len(outVec) > 0 { - ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts}) + ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts}) } // Only buffer stepRange milliseconds from the second step on. it.ReduceDelta(stepRange) @@ -1562,10 +1584,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ - _, v, ok := ev.vectorSelectorSingle(it, e, ts) + _, v, h, ok := ev.vectorSelectorSingle(it, e, ts) if ok { if ev.currentSamples < ev.maxSamples { - ss.Points = append(ss.Points, Point{V: v, T: ts}) + ss.Points = append(ss.Points, Point{V: v, H: h, T: ts}) ev.samplesStats.IncrementSamplesAtStep(step, 1) ev.currentSamples++ } else { @@ -1675,6 +1697,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { mat[i].Points = append(mat[i].Points, Point{ T: ts, V: mat[i].Points[0].V, + H: mat[i].Points[0].H, }) ev.currentSamples++ if ev.currentSamples > ev.maxSamples { @@ -1700,11 +1723,11 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect for i, s := range node.Series { it.Reset(s.Iterator()) - t, v, ok := ev.vectorSelectorSingle(it, node, ts) + t, v, h, ok := ev.vectorSelectorSingle(it, node, ts) if ok { vec = append(vec, Sample{ Metric: node.Series[i].Labels(), - Point: Point{V: v, T: t}, + Point: Point{V: v, H: h, T: t}, }) ev.currentSamples++ @@ -1719,33 +1742,39 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect return vec, ws } -// vectorSelectorSingle evaluates a instant vector for the iterator of one time series. -func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) { +// vectorSelectorSingle evaluates an instant vector for the iterator of one time series. +func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) ( + int64, float64, *histogram.FloatHistogram, bool, +) { refTime := ts - durationMilliseconds(node.Offset) var t int64 var v float64 + var h *histogram.FloatHistogram - ok := it.Seek(refTime) - if !ok { + valueType := it.Seek(refTime) + switch valueType { + case chunkenc.ValNone: if it.Err() != nil { ev.error(it.Err()) } - } - - if ok { + case chunkenc.ValFloat: t, v = it.At() + case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: + t, h = it.AtFloatHistogram() + default: + panic(fmt.Errorf("unknown value type %v", valueType)) } - - if !ok || t > refTime { - t, v, ok = it.PeekPrev() + if valueType == chunkenc.ValNone || t > refTime { + var ok bool + t, v, _, h, ok = it.PeekPrev() if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { - return 0, 0, false + return 0, 0, nil, false } } - if value.IsStaleNaN(v) { - return 0, 0, false + if value.IsStaleNaN(v) || (h != nil && value.IsStaleNaN(h.Sum)) { + return 0, 0, nil, false } - return t, v, true + return t, v, h, true } var pointPool = sync.Pool{} @@ -1830,30 +1859,59 @@ func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, m out = out[:0] } - ok := it.Seek(maxt) - if !ok { + soughtValueType := it.Seek(maxt) + if soughtValueType == chunkenc.ValNone { if it.Err() != nil { ev.error(it.Err()) } } buf := it.Buffer() - for buf.Next() { - t, v := buf.At() - if value.IsStaleNaN(v) { - continue +loop: + for { + switch buf.Next() { + case chunkenc.ValNone: + break loop + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + t, h := buf.AtFloatHistogram() + if value.IsStaleNaN(h.Sum) { + continue loop + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mint { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples++ + out = append(out, Point{T: t, H: h}) + } + case chunkenc.ValFloat: + t, v := buf.At() + if value.IsStaleNaN(v) { + continue loop + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mint { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples++ + out = append(out, Point{T: t, V: v}) + } } - // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + } + // The sought sample might also be in the range. + switch soughtValueType { + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + t, h := it.AtFloatHistogram() + if t == maxt && !value.IsStaleNaN(h.Sum) { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } + out = append(out, Point{T: t, H: h}) ev.currentSamples++ - out = append(out, Point{T: t, V: v}) } - } - // The seeked sample might also be in the range. - if ok { + case chunkenc.ValFloat: t, v := it.At() if t == maxt && !value.IsStaleNaN(v) { if ev.currentSamples >= ev.maxSamples { @@ -2011,10 +2069,12 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // Account for potentially swapped sidedness. vl, vr := ls.V, rs.V + hl, hr := ls.H, rs.H if matching.Card == parser.CardOneToMany { vl, vr = vr, vl + hl, hr = hr, hl } - value, keep := vectorElemBinop(op, vl, vr) + value, histogramValue, keep := vectorElemBinop(op, vl, vr, hl, hr) if returnBool { if keep { value = 1.0 @@ -2049,23 +2109,26 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * insertedSigs[insertSig] = struct{}{} } - enh.Out = append(enh.Out, Sample{ - Metric: metric, - Point: Point{V: value}, - }) + if (hl != nil && hr != nil) || (hl == nil && hr == nil) { + // Both lhs and rhs are of same type. + enh.Out = append(enh.Out, Sample{ + Metric: metric, + Point: Point{V: value, H: histogramValue}, + }) + } } return enh.Out } func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string { if on { - sort.Strings(names) + slices.Sort(names) return func(lset labels.Labels) string { return string(lset.BytesWithLabels(b, names...)) } } names = append([]string{labels.MetricName}, names...) - sort.Strings(names) + slices.Sort(names) return func(lset labels.Labels) string { return string(lset.BytesWithoutLabels(b, names...)) } @@ -2130,7 +2193,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala if swap { lv, rv = rv, lv } - value, keep := vectorElemBinop(op, lv, rv) + value, _, keep := vectorElemBinop(op, lv, rv, nil, nil) // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { @@ -2193,45 +2256,56 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { switch op { case parser.ADD: - return lhs + rhs, true + if hlhs != nil && hrhs != nil { + // The histogram being added must have the larger schema + // code (i.e. the higher resolution). + if hrhs.Schema >= hlhs.Schema { + return 0, hlhs.Copy().Add(hrhs), true + } + return 0, hrhs.Copy().Add(hlhs), true + } + return lhs + rhs, nil, true case parser.SUB: - return lhs - rhs, true + return lhs - rhs, nil, true case parser.MUL: - return lhs * rhs, true + return lhs * rhs, nil, true case parser.DIV: - return lhs / rhs, true + return lhs / rhs, nil, true case parser.POW: - return math.Pow(lhs, rhs), true + return math.Pow(lhs, rhs), nil, true case parser.MOD: - return math.Mod(lhs, rhs), true + return math.Mod(lhs, rhs), nil, true case parser.EQLC: - return lhs, lhs == rhs + return lhs, nil, lhs == rhs case parser.NEQ: - return lhs, lhs != rhs + return lhs, nil, lhs != rhs case parser.GTR: - return lhs, lhs > rhs + return lhs, nil, lhs > rhs case parser.LSS: - return lhs, lhs < rhs + return lhs, nil, lhs < rhs case parser.GTE: - return lhs, lhs >= rhs + return lhs, nil, lhs >= rhs case parser.LTE: - return lhs, lhs <= rhs + return lhs, nil, lhs <= rhs case parser.ATAN2: - return math.Atan2(lhs, rhs), true + return math.Atan2(lhs, rhs), nil, true } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } type groupedAggregation struct { - labels labels.Labels - value float64 - mean float64 - groupCount int - heap vectorByValueHeap - reverseHeap vectorByReverseValueHeap + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + labels labels.Labels + value float64 + histogramValue *histogram.FloatHistogram + mean float64 + groupCount int + heap vectorByValueHeap + reverseHeap vectorByReverseValueHeap } // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels @@ -2267,7 +2341,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // operator is less frequently used than other aggregations, we're fine having to // re-compute the grouping key on each step for this case. grouping = append(grouping, valueLabel) - sort.Strings(grouping) + slices.Sort(grouping) recomputeGroupingKey = true } } @@ -2311,6 +2385,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without mean: s.V, groupCount: 1, } + if s.H == nil { + newAgg.hasFloat = true + } else if op == parser.SUM { + newAgg.histogramValue = s.H.Copy() + newAgg.hasHistogram = true + } result[groupingKey] = newAgg orderedResult = append(orderedResult, newAgg) @@ -2345,7 +2425,26 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without switch op { case parser.SUM: - group.value += s.V + if s.H != nil { + group.hasHistogram = true + if group.histogramValue != nil { + // The histogram being added must have + // an equal or larger schema. + if s.H.Schema >= group.histogramValue.Schema { + group.histogramValue.Add(s.H) + } else { + h := s.H.Copy() + h.Add(group.histogramValue) + group.histogramValue = h + } + } + // Otherwise the aggregation contained floats + // previously and will be invalid anyway. No + // point in copying the histogram in that case. + } else { + group.hasFloat = true + group.value += s.V + } case parser.AVG: group.groupCount++ @@ -2479,13 +2578,18 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.QUANTILE: aggr.value = quantile(q, aggr.heap) + case parser.SUM: + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + continue + } default: // For other aggregations, we already have the right value. } enh.Out = append(enh.Out, Sample{ Metric: aggr.labels, - Point: Point{V: aggr.value}, + Point: Point{V: aggr.value, H: aggr.histogramValue}, }) } return enh.Out diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index e25f3f119e1..8e9e6f96548 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -24,6 +24,7 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" ) @@ -66,9 +67,11 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] - rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) - rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + samples = vals[0].(Matrix)[0] + rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) + rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + resultValue float64 + resultHistogram *histogram.FloatHistogram ) // No sense in trying to compute a rate without at least two points. Drop @@ -77,14 +80,32 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } - resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V - if isCounter { - var lastValue float64 - for _, sample := range samples.Points { - if sample.V < lastValue { - resultValue += lastValue + if samples.Points[0].H != nil { + resultHistogram = histogramRate(samples.Points, isCounter) + if resultHistogram == nil { + // Points are a mix of floats and histograms, or the histograms + // are not compatible with each other. + // TODO(beorn7): find a way of communicating the exact reason + return enh.Out + } + } else { + resultValue = samples.Points[len(samples.Points)-1].V - samples.Points[0].V + prevValue := samples.Points[0].V + // We have to iterate through everything even in the non-counter + // case because we have to check that everything is a float. + // TODO(beorn7): Find a way to check that earlier, e.g. by + // handing in a []FloatPoint and a []HistogramPoint separately. + for _, currPoint := range samples.Points[1:] { + if currPoint.H != nil { + return nil // Range contains a mix of histograms and floats. + } + if !isCounter { + continue + } + if currPoint.V < prevValue { + resultValue += prevValue } - lastValue = sample.V + prevValue = currPoint.V } } @@ -95,6 +116,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + // TODO(beorn7): Do this for histograms, too. if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { // Counters cannot be negative. If we have any slope at // all (i.e. resultValue went up), we can extrapolate @@ -126,16 +148,69 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod } else { extrapolateToInterval += averageDurationBetweenSamples / 2 } - resultValue = resultValue * (extrapolateToInterval / sampledInterval) + factor := extrapolateToInterval / sampledInterval if isRate { - resultValue = resultValue / ms.Range.Seconds() + factor /= ms.Range.Seconds() + } + if resultHistogram == nil { + resultValue *= factor + } else { + resultHistogram.Scale(factor) } return append(enh.Out, Sample{ - Point: Point{V: resultValue}, + Point: Point{V: resultValue, H: resultHistogram}, }) } +// histogramRate is a helper function for extrapolatedRate. It requires +// points[0] to be a histogram. It returns nil if any other Point in points is +// not a histogram. +func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram { + prev := points[0].H // We already know that this is a histogram. + last := points[len(points)-1].H + if last == nil { + return nil // Range contains a mix of histograms and floats. + } + minSchema := prev.Schema + if last.Schema < minSchema { + minSchema = last.Schema + } + + // First iteration to find out two things: + // - What's the smallest relevant schema? + // - Are all data points histograms? + // TODO(beorn7): Find a way to check that earlier, e.g. by handing in a + // []FloatPoint and a []HistogramPoint separately. + for _, currPoint := range points[1 : len(points)-1] { + curr := currPoint.H + if curr == nil { + return nil // Range contains a mix of histograms and floats. + } + if !isCounter { + continue + } + if curr.Schema < minSchema { + minSchema = curr.Schema + } + } + + h := last.CopyToSchema(minSchema) + h.Sub(prev) + + if isCounter { + // Second iteration to deal with counter resets. + for _, currPoint := range points[1:] { + curr := currPoint.H + if curr.DetectReset(prev) { + h.Add(prev) + } + prev = curr + } + } + return h.Compact(0) +} + // === delta(Matrix parser.ValueTypeMatrix) Vector === func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return extrapolatedRate(vals, args, enh, false, false) @@ -793,6 +868,59 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo }) } +// === histogram_count(Vector parser.ValueTypeVector) Vector === +func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: sample.H.Count}, + }) + } + return enh.Out +} + +// === histogram_sum(Vector parser.ValueTypeVector) Vector === +func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: sample.H.Sum}, + }) + } + return enh.Out +} + +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + lower := vals[0].(Vector)[0].V + upper := vals[1].(Vector)[0].V + inVec := vals[2].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: histogramFraction(lower, upper, sample.H)}, + }) + } + return enh.Out +} + // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { q := vals[0].(Vector)[0].V @@ -805,26 +933,57 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev v.buckets = v.buckets[:0] } } - for _, el := range inVec { + + var histogramSamples []Sample + + for _, sample := range inVec { + // We are only looking for conventional buckets here. Remember + // the histograms for later treatment. + if sample.H != nil { + histogramSamples = append(histogramSamples, sample) + continue + } + upperBound, err := strconv.ParseFloat( - el.Metric.Get(model.BucketLabel), 64, + sample.Metric.Get(model.BucketLabel), 64, ) if err != nil { // Oops, no bucket label or malformed label value. Skip. // TODO(beorn7): Issue a warning somehow. continue } - enh.lblBuf = el.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) + enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)] if !ok { - el.Metric = labels.NewBuilder(el.Metric). + sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). Labels(nil) - mb = &metricWithBuckets{el.Metric, nil} + mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } - mb.buckets = append(mb.buckets, bucket{upperBound, el.V}) + mb.buckets = append(mb.buckets, bucket{upperBound, sample.V}) + + } + + // Now deal with the histograms. + for _, sample := range histogramSamples { + // We have to reconstruct the exact same signature as above for + // a conventional histogram, just ignoring any le label. + enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) + if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { + // At this data point, we have conventional histogram + // buckets and a native histogram with the same name and + // labels. Do not evaluate anything. + // TODO(beorn7): Issue a warning somehow. + delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) + continue + } + + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: histogramQuantile(q, sample.H)}, + }) } for _, mb := range enh.signatureToMetricWithBuckets { @@ -1103,7 +1262,10 @@ var FunctionCalls = map[string]FunctionCall{ "deriv": funcDeriv, "exp": funcExp, "floor": funcFloor, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, "holt_winters": funcHoltWinters, "hour": funcHour, "idelta": funcIdelta, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 92afff8b238..450021328be 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -163,6 +163,21 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_count": { + Name: "histogram_count", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_sum": { + Name: "histogram_sum", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_fraction": { + Name: "histogram_fraction", + ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_quantile": { Name: "histogram_quantile", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go index a3abf68dd02..1561a2ce880 100644 --- a/vendor/github.com/prometheus/prometheus/promql/quantile.go +++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go @@ -17,6 +17,7 @@ import ( "math" "sort" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) @@ -119,6 +120,176 @@ func bucketQuantile(q float64, buckets buckets) float64 { return bucketStart + (bucketEnd-bucketStart)*(rank/count) } +// histogramQuantile calculates the quantile 'q' based on the given histogram. +// +// The quantile value is interpolated assuming a linear distribution within a +// bucket. +// TODO(beorn7): Find an interpolation method that is a better fit for +// exponential buckets (and think about configurable interpolation). +// +// A natural lower bound of 0 is assumed if the histogram has only positive +// buckets. Likewise, a natural upper bound of 0 is assumed if the histogram has +// only negative buckets. +// TODO(beorn7): Come to terms if we want that. +// +// There are a number of special cases (once we have a way to report errors +// happening during evaluations of AST functions, we should report those +// explicitly): +// +// If the histogram has 0 observations, NaN is returned. +// +// If q<0, -Inf is returned. +// +// If q>1, +Inf is returned. +// +// If q is NaN, NaN is returned. +func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + + if h.Count == 0 || math.IsNaN(q) { + return math.NaN() + } + + var ( + bucket histogram.Bucket[float64] + count float64 + it = h.AllBucketIterator() + rank = q * h.Count + ) + for it.Next() { + bucket = it.At() + count += bucket.Count + if count >= rank { + break + } + } + if bucket.Lower < 0 && bucket.Upper > 0 { + if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { + // The result is in the zero bucket and the histogram has only + // positive buckets. So we consider 0 to be the lower bound. + bucket.Lower = 0 + } else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { + // The result is in the zero bucket and the histogram has only + // negative buckets. So we consider 0 to be the upper bound. + bucket.Upper = 0 + } + } + // Due to numerical inaccuracies, we could end up with a higher count + // than h.Count. Thus, make sure count is never higher than h.Count. + if count > h.Count { + count = h.Count + } + // We could have hit the highest bucket without even reaching the rank + // (this should only happen if the histogram contains observations of + // the value NaN), in which case we simply return the upper limit of the + // highest explicit bucket. + if count < rank { + return bucket.Upper + } + + rank -= count - bucket.Count + // TODO(codesome): Use a better estimation than linear. + return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count) +} + +// histogramFraction calculates the fraction of observations between the +// provided lower and upper bounds, based on the provided histogram. +// +// histogramFraction is in a certain way the inverse of histogramQuantile. If +// histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h) +// returns 0.9. +// +// The same notes (and TODOs) with regard to interpolation and assumptions about +// the zero bucket boundaries apply as for histogramQuantile. +// +// Whether either boundary is inclusive or exclusive doesn’t actually matter as +// long as interpolation has to be performed anyway. In the case of a boundary +// coinciding with a bucket boundary, the inclusive or exclusive nature of the +// boundary determines the exact behavior of the threshold. With the current +// implementation, that means that lower is exclusive for positive values and +// inclusive for negative values, while upper is inclusive for positive values +// and exclusive for negative values. +// +// Special cases: +// +// If the histogram has 0 observations, NaN is returned. +// +// Use a lower bound of -Inf to get the fraction of all observations below the +// upper bound. +// +// Use an upper bound of +Inf to get the fraction of all observations above the +// lower bound. +// +// If lower or upper is NaN, NaN is returned. +// +// If lower >= upper and the histogram has at least 1 observation, zero is returned. +func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float64 { + if h.Count == 0 || math.IsNaN(lower) || math.IsNaN(upper) { + return math.NaN() + } + if lower >= upper { + return 0 + } + + var ( + rank, lowerRank, upperRank float64 + lowerSet, upperSet bool + it = h.AllBucketIterator() + ) + for it.Next() { + b := it.At() + if b.Lower < 0 && b.Upper > 0 { + if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { + // This is the zero bucket and the histogram has only + // positive buckets. So we consider 0 to be the lower + // bound. + b.Lower = 0 + } else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { + // This is in the zero bucket and the histogram has only + // negative buckets. So we consider 0 to be the upper + // bound. + b.Upper = 0 + } + } + if !lowerSet && b.Lower >= lower { + lowerRank = rank + lowerSet = true + } + if !upperSet && b.Lower >= upper { + upperRank = rank + upperSet = true + } + if lowerSet && upperSet { + break + } + if !lowerSet && b.Lower < lower && b.Upper > lower { + lowerRank = rank + b.Count*(lower-b.Lower)/(b.Upper-b.Lower) + lowerSet = true + } + if !upperSet && b.Lower < upper && b.Upper > upper { + upperRank = rank + b.Count*(upper-b.Lower)/(b.Upper-b.Lower) + upperSet = true + } + if lowerSet && upperSet { + break + } + rank += b.Count + } + if !lowerSet || lowerRank > h.Count { + lowerRank = h.Count + } + if !upperSet || upperRank > h.Count { + upperRank = h.Count + } + + return (upperRank - lowerRank) / h.Count +} + // coalesceBuckets merges buckets with the same upper bound. // // The input buckets must be sorted. diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index c3d3bffdb97..507a5e6f158 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -20,6 +20,7 @@ import ( "strconv" "strings" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" @@ -63,8 +64,8 @@ func (s Scalar) MarshalJSON() ([]byte, error) { // Series is a stream of data points belonging to a metric. type Series struct { - Metric labels.Labels `json:"metric"` - Points []Point `json:"values"` + Metric labels.Labels + Points []Point } func (s Series) String() string { @@ -75,15 +76,48 @@ func (s Series) String() string { return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n")) } +// MarshalJSON is mirrored in web/api/v1/api.go for efficiency reasons. +// This implementation is still provided for debug purposes and usage +// without jsoniter. +func (s Series) MarshalJSON() ([]byte, error) { + // Note that this is rather inefficient because it re-creates the whole + // series, just separated by Histogram Points and Value Points. For API + // purposes, there is a more efficcient jsoniter implementation in + // web/api/v1/api.go. + series := struct { + M labels.Labels `json:"metric"` + V []Point `json:"values,omitempty"` + H []Point `json:"histograms,omitempty"` + }{ + M: s.Metric, + } + for _, p := range s.Points { + if p.H == nil { + series.V = append(series.V, p) + continue + } + series.H = append(series.H, p) + } + return json.Marshal(series) +} + // Point represents a single data point for a given timestamp. +// If H is not nil, then this is a histogram point and only (T, H) is valid. +// If H is nil, then only (T, V) is valid. type Point struct { T int64 V float64 + H *histogram.FloatHistogram } func (p Point) String() string { - v := strconv.FormatFloat(p.V, 'f', -1, 64) - return fmt.Sprintf("%v @[%v]", v, p.T) + var s string + if p.H != nil { + s = p.H.String() + } else { + s = strconv.FormatFloat(p.V, 'f', -1, 64) + } + return fmt.Sprintf("%s @[%v]", s, p.T) } // MarshalJSON implements json.Marshaler. @@ -96,8 +130,45 @@ func (p Point) String() string { // slightly different results in terms of formatting and rounding of the // timestamp. func (p Point) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(p.V, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) + if p.H == nil { + v := strconv.FormatFloat(p.V, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) + } + h := struct { + Count string `json:"count"` + Sum string `json:"sum"` + Buckets [][]interface{} `json:"buckets,omitempty"` + }{ + Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64), + Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64), + } + it := p.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + boundaries := 2 // Exclusive on both sides AKA open interval. + if bucket.LowerInclusive { + if bucket.UpperInclusive { + boundaries = 3 // Inclusive on both sides AKA closed interval. + } else { + boundaries = 1 // Inclusive only on lower end AKA right open. + } + } else { + if bucket.UpperInclusive { + boundaries = 0 // Inclusive only on upper end AKA left open. + } + } + bucketToMarshal := []interface{}{ + boundaries, + strconv.FormatFloat(bucket.Lower, 'f', -1, 64), + strconv.FormatFloat(bucket.Upper, 'f', -1, 64), + strconv.FormatFloat(bucket.Count, 'f', -1, 64), + } + h.Buckets = append(h.Buckets, bucketToMarshal) + } + return json.Marshal([...]interface{}{float64(p.T) / 1000, h}) } // Sample is a single sample belonging to a metric. @@ -111,15 +182,27 @@ func (s Sample) String() string { return fmt.Sprintf("%s => %s", s.Metric, s.Point) } +// MarshalJSON is mirrored in web/api/v1/api.go with jsoniter because Point +// wouldn't be marshaled with jsoniter in all cases otherwise. func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { + if s.Point.H == nil { + v := struct { + M labels.Labels `json:"metric"` + V Point `json:"value"` + }{ + M: s.Metric, + V: s.Point, + } + return json.Marshal(v) + } + h := struct { M labels.Labels `json:"metric"` - V Point `json:"value"` + H Point `json:"histogram"` }{ M: s.Metric, - V: s.Point, + H: s.Point, } - return json.Marshal(v) + return json.Marshal(h) } // Vector is basically only an alias for model.Samples, but the @@ -296,19 +379,23 @@ func newStorageSeriesIterator(series Series) *storageSeriesIterator { } } -func (ssi *storageSeriesIterator) Seek(t int64) bool { +func (ssi *storageSeriesIterator) Seek(t int64) chunkenc.ValueType { i := ssi.curr if i < 0 { i = 0 } for ; i < len(ssi.points); i++ { - if ssi.points[i].T >= t { + p := ssi.points[i] + if p.T >= t { ssi.curr = i - return true + if p.H != nil { + return chunkenc.ValFloatHistogram + } + return chunkenc.ValFloat } } ssi.curr = len(ssi.points) - 1 - return false + return chunkenc.ValNone } func (ssi *storageSeriesIterator) At() (t int64, v float64) { @@ -316,9 +403,30 @@ func (ssi *storageSeriesIterator) At() (t int64, v float64) { return p.T, p.V } -func (ssi *storageSeriesIterator) Next() bool { +func (ssi *storageSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + panic(errors.New("storageSeriesIterator: AtHistogram not supported")) +} + +func (ssi *storageSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + p := ssi.points[ssi.curr] + return p.T, p.H +} + +func (ssi *storageSeriesIterator) AtT() int64 { + p := ssi.points[ssi.curr] + return p.T +} + +func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { ssi.curr++ - return ssi.curr < len(ssi.points) + if ssi.curr >= len(ssi.points) { + return chunkenc.ValNone + } + p := ssi.points[ssi.curr] + if p.H != nil { + return chunkenc.ValFloatHistogram + } + return chunkenc.ValFloat } func (ssi *storageSeriesIterator) Err() error { diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 1ed46959640..ced61ea4925 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/strutil" ) @@ -201,7 +202,7 @@ func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc { return v, nil case promql.Scalar: return promql.Vector{promql.Sample{ - Point: promql.Point(v), + Point: promql.Point{T: v.T, V: v.V}, Metric: labels.Labels{}, }}, nil default: @@ -798,7 +799,7 @@ func (g *Group) RestoreForState(ts time.Time) { var t int64 var v float64 it := s.Iterator() - for it.Next() { + for it.Next() == chunkenc.ValFloat { t, v = it.At() } if it.Err() != nil { diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index d8a0ce72f9c..3c77dac397b 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -132,6 +132,9 @@ type Options struct { // Option to enable the experimental in-memory metadata storage and append // metadata to the WAL. EnableMetadataStorage bool + // Option to enable protobuf negotiation with the client. Note that the client can already + // send protobuf without needing to enable this. + EnableProtobufNegotiation bool // Option to increase the interval used by scrape manager to throttle target groups updates. DiscoveryReloadInterval model.Duration diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index a84e8cbea01..04375ab564b 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -40,6 +40,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" @@ -242,6 +243,8 @@ type scrapePool struct { newLoop func(scrapeLoopOptions) loop noDefaultPort bool + + enableProtobufNegotiation bool } type labelLimits struct { @@ -283,15 +286,16 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed ctx, cancel := context.WithCancel(context.Background()) sp := &scrapePool{ - cancel: cancel, - appendable: app, - config: cfg, - client: client, - activeTargets: map[uint64]*Target{}, - loops: map[uint64]loop{}, - logger: logger, - httpOpts: options.HTTPClientOptions, - noDefaultPort: options.NoDefaultPort, + cancel: cancel, + appendable: app, + config: cfg, + client: client, + activeTargets: map[uint64]*Target{}, + loops: map[uint64]loop{}, + logger: logger, + httpOpts: options.HTTPClientOptions, + noDefaultPort: options.NoDefaultPort, + enableProtobufNegotiation: options.EnableProtobufNegotiation, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -432,8 +436,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { t := sp.activeTargets[fp] interval, timeout, err := t.intervalAndTimeout(interval, timeout) + acceptHeader := scrapeAcceptHeader + if sp.enableProtobufNegotiation { + acceptHeader = scrapeAcceptHeaderWithProtobuf + } var ( - s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit} + s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader} newLoop = sp.newLoop(scrapeLoopOptions{ target: t, scraper: s, @@ -536,8 +544,11 @@ func (sp *scrapePool) sync(targets []*Target) { // for every target. var err error interval, timeout, err = t.intervalAndTimeout(interval, timeout) - - s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit} + acceptHeader := scrapeAcceptHeader + if sp.enableProtobufNegotiation { + acceptHeader = scrapeAcceptHeaderWithProtobuf + } + s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader} l := sp.newLoop(scrapeLoopOptions{ target: t, scraper: s, @@ -756,11 +767,15 @@ type targetScraper struct { buf *bufio.Reader bodySizeLimit int64 + acceptHeader string } var errBodySizeLimit = errors.New("body size limit exceeded") -const acceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` +const ( + scrapeAcceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` + scrapeAcceptHeaderWithProtobuf = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.8,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` +) var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) @@ -770,7 +785,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error) if err != nil { return "", err } - req.Header.Add("Accept", acceptHeader) + req.Header.Add("Accept", s.acceptHeader) req.Header.Add("Accept-Encoding", "gzip") req.Header.Set("User-Agent", UserAgent) req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64)) @@ -1510,8 +1525,12 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, loop: for { var ( - et textparse.Entry - sampleAdded bool + et textparse.Entry + sampleAdded, isHistogram bool + met []byte + parsedTimestamp *int64 + val float64 + h *histogram.Histogram ) if et, err = p.Next(); err != nil { if err == io.EOF { @@ -1531,17 +1550,24 @@ loop: continue case textparse.EntryComment: continue + case textparse.EntryHistogram: + isHistogram = true default: } total++ t := defTime - met, tp, v := p.Series() + if isHistogram { + met, parsedTimestamp, h, _ = p.Histogram() + // TODO: ingest float histograms in tsdb. + } else { + met, parsedTimestamp, val = p.Series() + } if !sl.honorTimestamps { - tp = nil + parsedTimestamp = nil } - if tp != nil { - t = *tp + if parsedTimestamp != nil { + t = *parsedTimestamp } // Zero metadata out for current iteration until it's resolved. @@ -1594,8 +1620,14 @@ loop: updateMetadata(lset, true) } - ref, err = app.Append(ref, lset, t, v) - sampleAdded, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs) + if isHistogram { + if h != nil { + ref, err = app.AppendHistogram(ref, lset, t, h) + } + } else { + ref, err = app.Append(ref, lset, t, val) + } + sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &appErrs) if err != nil { if err != storage.ErrNotFound { level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) @@ -1604,7 +1636,7 @@ loop: } if !ok { - if tp == nil { + if parsedTimestamp == nil { // Bypass staleness logic if there is an explicit timestamp. sl.cache.trackStaleness(hash, lset) } diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index 0b38dc07794..dc9e9bca321 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -14,8 +14,10 @@ package storage import ( + "fmt" "math" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -25,8 +27,8 @@ type BufferedSeriesIterator struct { buf *sampleRing delta int64 - lastTime int64 - ok bool + lastTime int64 + valueType chunkenc.ValueType } // NewBuffer returns a new iterator that buffers the values within the time range @@ -39,6 +41,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator { // NewBufferIterator returns a new iterator that buffers the values within the // time range of the current element and the duration of delta before. func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { + // TODO(codesome): based on encoding, allocate different buffer. bit := &BufferedSeriesIterator{ buf: newSampleRing(delta, 16), delta: delta, @@ -53,10 +56,9 @@ func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterato func (b *BufferedSeriesIterator) Reset(it chunkenc.Iterator) { b.it = it b.lastTime = math.MinInt64 - b.ok = true b.buf.reset() b.buf.delta = b.delta - it.Next() + b.valueType = it.Next() } // ReduceDelta lowers the buffered time delta, for the current SeriesIterator only. @@ -66,8 +68,9 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { // PeekBack returns the nth previous element of the iterator. If there is none buffered, // ok is false. -func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, ok bool) { - return b.buf.nthLast(n) +func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, h *histogram.Histogram, ok bool) { + s, ok := b.buf.nthLast(n) + return s.t, s.v, s.h, ok } // Buffer returns an iterator over the buffered data. Invalidates previously @@ -77,63 +80,96 @@ func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator { } // Seek advances the iterator to the element at time t or greater. -func (b *BufferedSeriesIterator) Seek(t int64) bool { +func (b *BufferedSeriesIterator) Seek(t int64) chunkenc.ValueType { t0 := t - b.buf.delta // If the delta would cause us to seek backwards, preserve the buffer // and just continue regular advancement while filling the buffer on the way. - if b.ok && t0 > b.lastTime { + if b.valueType != chunkenc.ValNone && t0 > b.lastTime { b.buf.reset() - b.ok = b.it.Seek(t0) - if !b.ok { - return false + b.valueType = b.it.Seek(t0) + switch b.valueType { + case chunkenc.ValNone: + return chunkenc.ValNone + case chunkenc.ValFloat: + b.lastTime, _ = b.At() + case chunkenc.ValHistogram: + b.lastTime, _ = b.AtHistogram() + case chunkenc.ValFloatHistogram: + b.lastTime, _ = b.AtFloatHistogram() + default: + panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } - b.lastTime, _ = b.At() } if b.lastTime >= t { - return true + return b.valueType } - for b.Next() { - if b.lastTime >= t { - return true + for { + if b.valueType = b.Next(); b.valueType == chunkenc.ValNone || b.lastTime >= t { + return b.valueType } } - - return false } // Next advances the iterator to the next element. -func (b *BufferedSeriesIterator) Next() bool { - if !b.ok { - return false - } - +func (b *BufferedSeriesIterator) Next() chunkenc.ValueType { // Add current element to buffer before advancing. - b.buf.add(b.it.At()) - - b.ok = b.it.Next() - if b.ok { - b.lastTime, _ = b.At() + switch b.valueType { + case chunkenc.ValNone: + return chunkenc.ValNone + case chunkenc.ValFloat: + t, v := b.it.At() + b.buf.add(sample{t: t, v: v}) + case chunkenc.ValHistogram: + t, h := b.it.AtHistogram() + b.buf.add(sample{t: t, h: h}) + case chunkenc.ValFloatHistogram: + t, fh := b.it.AtFloatHistogram() + b.buf.add(sample{t: t, fh: fh}) + default: + panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } - return b.ok + b.valueType = b.it.Next() + if b.valueType != chunkenc.ValNone { + b.lastTime = b.AtT() + } + return b.valueType } -// At returns the current element of the iterator. +// At returns the current float element of the iterator. func (b *BufferedSeriesIterator) At() (int64, float64) { return b.it.At() } +// AtHistogram returns the current histogram element of the iterator. +func (b *BufferedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + return b.it.AtHistogram() +} + +// AtFloatHistogram returns the current float-histogram element of the iterator. +func (b *BufferedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + return b.it.AtFloatHistogram() +} + +// AtT returns the current timestamp of the iterator. +func (b *BufferedSeriesIterator) AtT() int64 { + return b.it.AtT() +} + // Err returns the last encountered error. func (b *BufferedSeriesIterator) Err() error { return b.it.Err() } +// TODO(beorn7): Consider having different sample types for different value types. type sample struct { - t int64 - v float64 + t int64 + v float64 + h *histogram.Histogram + fh *histogram.FloatHistogram } func (s sample) T() int64 { @@ -144,6 +180,25 @@ func (s sample) V() float64 { return s.v } +func (s sample) H() *histogram.Histogram { + return s.h +} + +func (s sample) FH() *histogram.FloatHistogram { + return s.fh +} + +func (s sample) Type() chunkenc.ValueType { + switch { + case s.h != nil: + return chunkenc.ValHistogram + case s.fh != nil: + return chunkenc.ValFloatHistogram + default: + return chunkenc.ValFloat + } +} + type sampleRing struct { delta int64 @@ -176,17 +231,36 @@ func (r *sampleRing) iterator() chunkenc.Iterator { } type sampleRingIterator struct { - r *sampleRing - i int + r *sampleRing + i int + t int64 + v float64 + h *histogram.Histogram + fh *histogram.FloatHistogram } -func (it *sampleRingIterator) Next() bool { +func (it *sampleRingIterator) Next() chunkenc.ValueType { it.i++ - return it.i < it.r.l + if it.i >= it.r.l { + return chunkenc.ValNone + } + s := it.r.at(it.i) + it.t = s.t + switch { + case s.h != nil: + it.h = s.h + return chunkenc.ValHistogram + case s.fh != nil: + it.fh = s.fh + return chunkenc.ValFloatHistogram + default: + it.v = s.v + return chunkenc.ValFloat + } } -func (it *sampleRingIterator) Seek(int64) bool { - return false +func (it *sampleRingIterator) Seek(int64) chunkenc.ValueType { + return chunkenc.ValNone } func (it *sampleRingIterator) Err() error { @@ -194,18 +268,32 @@ func (it *sampleRingIterator) Err() error { } func (it *sampleRingIterator) At() (int64, float64) { - return it.r.at(it.i) + return it.t, it.v +} + +func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { + return it.t, it.h +} + +func (it *sampleRingIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + if it.fh == nil { + return it.t, it.h.ToFloat() + } + return it.t, it.fh +} + +func (it *sampleRingIterator) AtT() int64 { + return it.t } -func (r *sampleRing) at(i int) (int64, float64) { +func (r *sampleRing) at(i int) sample { j := (r.f + i) % len(r.buf) - s := r.buf[j] - return s.t, s.v + return r.buf[j] } // add adds a sample to the ring buffer and frees all samples that fall // out of the delta range. -func (r *sampleRing) add(t int64, v float64) { +func (r *sampleRing) add(s sample) { l := len(r.buf) // Grow the ring buffer if it fits no more elements. if l == r.l { @@ -224,11 +312,11 @@ func (r *sampleRing) add(t int64, v float64) { } } - r.buf[r.i] = sample{t: t, v: v} + r.buf[r.i] = s r.l++ // Free head of the buffer of samples that just fell out of the range. - tmin := t - r.delta + tmin := s.t - r.delta for r.buf[r.f].t < tmin { r.f++ if r.f >= l { @@ -264,12 +352,11 @@ func (r *sampleRing) reduceDelta(delta int64) bool { } // nthLast returns the nth most recent element added to the ring. -func (r *sampleRing) nthLast(n int) (int64, float64, bool) { +func (r *sampleRing) nthLast(n int) (sample, bool) { if n > r.l { - return 0, 0, false + return sample{}, false } - t, v := r.at(r.l - n) - return t, v, true + return r.at(r.l - n), true } func (r *sampleRing) samples() []sample { diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index a51526f06e1..cdaf8194c6e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -173,6 +174,20 @@ func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exempl return ref, nil } +func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) { + ref, err := f.primary.AppendHistogram(ref, l, t, h) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendHistogram(ref, l, t, h); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { ref, err := f.primary.UpdateMetadata(ref, l, m) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index d73ec72203a..9df4b75f5be 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -19,6 +19,7 @@ import ( "fmt" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -35,11 +36,16 @@ var ( // ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed. ErrTooOldSample = errors.New("too old sample") // ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value. - ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") - ErrOutOfOrderExemplar = errors.New("out of order exemplar") - ErrDuplicateExemplar = errors.New("duplicate exemplar") - ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) - ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") + ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") + ErrOutOfOrderExemplar = errors.New("out of order exemplar") + ErrDuplicateExemplar = errors.New("duplicate exemplar") + ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) + ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") + ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -207,6 +213,9 @@ func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier, // It must be completed with a call to Commit or Rollback and must not be reused afterwards. // // Operations on the Appender interface are not goroutine-safe. +// +// The type of samples (float64, histogram, etc) appended for a given series must remain same within an Appender. +// The behaviour is undefined if samples of different types are appended to the same series in a single Commit(). type Appender interface { // Append adds a sample pair for the given series. // An optional series reference can be provided to accelerate calls. @@ -227,7 +236,9 @@ type Appender interface { // Rollback rolls back all modifications made in the appender so far. // Appender has to be discarded after rollback. Rollback() error + ExemplarAppender + HistogramAppender MetadataUpdater } @@ -257,6 +268,22 @@ type ExemplarAppender interface { AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) } +// HistogramAppender provides an interface for appending histograms to the storage. +type HistogramAppender interface { + // AppendHistogram adds a histogram for the given series labels. An + // optional reference number can be provided to accelerate calls. A + // reference number is returned which can be used to add further + // histograms in the same or later transactions. Returned reference + // numbers are ephemeral and may be rejected in calls to Append() at any + // point. Adding the sample via Append() returns a new reference number. + // If the reference is 0, it must not be used for caching. + // + // For efficiency reasons, the histogram is passed as a + // pointer. AppendHistogram won't mutate the histogram, but in turn + // depends on the caller to not mutate it either. + AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) +} + // MetadataUpdater provides an interface for associating metadata to stored series. type MetadataUpdater interface { // UpdateMetadata updates a metadata entry for the given series and labels. diff --git a/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go b/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go index d72338b78c5..88eee0756b8 100644 --- a/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go +++ b/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go @@ -16,6 +16,7 @@ package storage import ( "math" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -24,12 +25,18 @@ type MemoizedSeriesIterator struct { it chunkenc.Iterator delta int64 - lastTime int64 - ok bool + lastTime int64 + valueType chunkenc.ValueType // Keep track of the previously returned value. - prevTime int64 - prevValue float64 + prevTime int64 + prevValue float64 + prevHistogram *histogram.Histogram + prevFloatHistogram *histogram.FloatHistogram + // TODO(beorn7): MemoizedSeriesIterator is currently only used by the + // PromQL engine, which only works with FloatHistograms. For better + // performance, we could change MemoizedSeriesIterator to also only + // handle FloatHistograms. } // NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator. @@ -53,70 +60,93 @@ func NewMemoizedIterator(it chunkenc.Iterator, delta int64) *MemoizedSeriesItera func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) { b.it = it b.lastTime = math.MinInt64 - b.ok = true b.prevTime = math.MinInt64 - it.Next() + b.valueType = it.Next() } // PeekPrev returns the previous element of the iterator. If there is none buffered, // ok is false. -func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, ok bool) { +func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool) { if b.prevTime == math.MinInt64 { - return 0, 0, false + return 0, 0, nil, nil, false } - return b.prevTime, b.prevValue, true + return b.prevTime, b.prevValue, b.prevHistogram, b.prevFloatHistogram, true } // Seek advances the iterator to the element at time t or greater. -func (b *MemoizedSeriesIterator) Seek(t int64) bool { +func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType { t0 := t - b.delta - if b.ok && t0 > b.lastTime { + if b.valueType != chunkenc.ValNone && t0 > b.lastTime { // Reset the previously stored element because the seek advanced // more than the delta. b.prevTime = math.MinInt64 - b.ok = b.it.Seek(t0) - if !b.ok { - return false + b.valueType = b.it.Seek(t0) + if b.valueType == chunkenc.ValNone { + return chunkenc.ValNone } - b.lastTime, _ = b.it.At() + b.lastTime = b.it.AtT() } - if b.lastTime >= t { - return true + return b.valueType } - for b.Next() { + for b.Next() != chunkenc.ValNone { if b.lastTime >= t { - return true + return b.valueType } } - return false + return chunkenc.ValNone } // Next advances the iterator to the next element. -func (b *MemoizedSeriesIterator) Next() bool { - if !b.ok { - return false - } - +func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType { // Keep track of the previous element. - b.prevTime, b.prevValue = b.it.At() - - b.ok = b.it.Next() - if b.ok { - b.lastTime, _ = b.it.At() + switch b.valueType { + case chunkenc.ValNone: + return chunkenc.ValNone + case chunkenc.ValFloat: + b.prevTime, b.prevValue = b.it.At() + b.prevHistogram = nil + b.prevFloatHistogram = nil + case chunkenc.ValHistogram: + b.prevValue = 0 + b.prevTime, b.prevHistogram = b.it.AtHistogram() + _, b.prevFloatHistogram = b.it.AtFloatHistogram() + case chunkenc.ValFloatHistogram: + b.prevValue = 0 + b.prevHistogram = nil + b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram() } - return b.ok + b.valueType = b.it.Next() + if b.valueType != chunkenc.ValNone { + b.lastTime = b.it.AtT() + } + return b.valueType } -// At returns the current element of the iterator. +// At returns the current float element of the iterator. func (b *MemoizedSeriesIterator) At() (int64, float64) { return b.it.At() } +// AtHistogram returns the current histogram element of the iterator. +func (b *MemoizedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + return b.it.AtHistogram() +} + +// AtFloatHistogram returns the current float-histogram element of the iterator. +func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + return b.it.AtFloatHistogram() +} + +// AtT returns the current timestamp of the iterator. +func (b *MemoizedSeriesIterator) AtT() int64 { + return b.it.AtT() +} + // Err returns the last encountered error. func (b *MemoizedSeriesIterator) Err() error { return b.it.Err() diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 2f175d3e7ec..258e4e3120f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -18,9 +18,11 @@ import ( "container/heap" "fmt" "math" - "sort" "sync" + "golang.org/x/exp/slices" + + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -240,7 +242,7 @@ func (q *mergeGenericQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, for name := range labelNamesMap { labelNames = append(labelNames, name) } - sort.Strings(labelNames) + slices.Sort(labelNames) return labelNames, warnings, nil } @@ -441,7 +443,7 @@ type chainSampleIterator struct { h samplesIteratorHeap curr chunkenc.Iterator - lastt int64 + lastT int64 } // NewChainSampleIterator returns a single iterator that iterates over the samples from the given iterators in a sorted @@ -451,60 +453,82 @@ func NewChainSampleIterator(iterators []chunkenc.Iterator) chunkenc.Iterator { return &chainSampleIterator{ iterators: iterators, h: nil, - lastt: math.MinInt64, + lastT: math.MinInt64, } } -func (c *chainSampleIterator) Seek(t int64) bool { - // No-op check - if c.curr != nil && c.lastt >= t { - return true +func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType { + // No-op check. + if c.curr != nil && c.lastT >= t { + return c.curr.Seek(c.lastT) } - c.h = samplesIteratorHeap{} for _, iter := range c.iterators { - if iter.Seek(t) { + if iter.Seek(t) != chunkenc.ValNone { heap.Push(&c.h, iter) } } if len(c.h) > 0 { c.curr = heap.Pop(&c.h).(chunkenc.Iterator) - c.lastt, _ = c.curr.At() - return true + c.lastT = c.curr.AtT() + return c.curr.Seek(c.lastT) } c.curr = nil - return false + return chunkenc.ValNone } func (c *chainSampleIterator) At() (t int64, v float64) { if c.curr == nil { - panic("chainSampleIterator.At() called before first .Next() or after .Next() returned false.") + panic("chainSampleIterator.At called before first .Next or after .Next returned false.") } return c.curr.At() } -func (c *chainSampleIterator) Next() bool { +func (c *chainSampleIterator) AtHistogram() (int64, *histogram.Histogram) { + if c.curr == nil { + panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.") + } + return c.curr.AtHistogram() +} + +func (c *chainSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + if c.curr == nil { + panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.") + } + return c.curr.AtFloatHistogram() +} + +func (c *chainSampleIterator) AtT() int64 { + if c.curr == nil { + panic("chainSampleIterator.AtT called before first .Next or after .Next returned false.") + } + return c.curr.AtT() +} + +func (c *chainSampleIterator) Next() chunkenc.ValueType { if c.h == nil { c.h = samplesIteratorHeap{} // We call c.curr.Next() as the first thing below. // So, we don't call Next() on it here. c.curr = c.iterators[0] for _, iter := range c.iterators[1:] { - if iter.Next() { + if iter.Next() != chunkenc.ValNone { heap.Push(&c.h, iter) } } } if c.curr == nil { - return false + return chunkenc.ValNone } - var currt int64 + var currT int64 + var currValueType chunkenc.ValueType for { - if c.curr.Next() { - currt, _ = c.curr.At() - if currt == c.lastt { + currValueType = c.curr.Next() + if currValueType != chunkenc.ValNone { + currT = c.curr.AtT() + if currT == c.lastT { // Ignoring sample for the same timestamp. continue } @@ -515,7 +539,8 @@ func (c *chainSampleIterator) Next() bool { } // Check current iterator with the top of the heap. - if nextt, _ := c.h[0].At(); currt < nextt { + nextT := c.h[0].AtT() + if currT < nextT { // Current iterator has smaller timestamp than the heap. break } @@ -524,18 +549,19 @@ func (c *chainSampleIterator) Next() bool { } else if len(c.h) == 0 { // No iterator left to iterate. c.curr = nil - return false + return chunkenc.ValNone } c.curr = heap.Pop(&c.h).(chunkenc.Iterator) - currt, _ = c.curr.At() - if currt != c.lastt { + currT = c.curr.AtT() + currValueType = c.curr.Seek(currT) + if currT != c.lastT { break } } - c.lastt = currt - return true + c.lastT = currT + return currValueType } func (c *chainSampleIterator) Err() error { @@ -552,9 +578,7 @@ func (h samplesIteratorHeap) Len() int { return len(h) } func (h samplesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h samplesIteratorHeap) Less(i, j int) bool { - at, _ := h[i].At() - bt, _ := h[j].At() - return at < bt + return h[i].AtT() < h[j].AtT() } func (h *samplesIteratorHeap) Push(x interface{}) { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 5d78107bb96..9b6f5a5ab18 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/prompb" @@ -118,7 +119,8 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, iter := series.Iterator() samples := []prompb.Sample{} - for iter.Next() { + for iter.Next() == chunkenc.ValFloat { + // TODO(beorn7): Add Histogram support. numSamples++ if sampleLimit > 0 && numSamples > sampleLimit { return nil, ss.Warnings(), HTTPError{ @@ -355,37 +357,65 @@ func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator { } // Seek implements storage.SeriesIterator. -func (c *concreteSeriesIterator) Seek(t int64) bool { +func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { if c.cur == -1 { c.cur = 0 } if c.cur >= len(c.series.samples) { - return false + return chunkenc.ValNone } // No-op check. if s := c.series.samples[c.cur]; s.Timestamp >= t { - return true + return chunkenc.ValFloat } // Do binary search between current position and end. c.cur += sort.Search(len(c.series.samples)-c.cur, func(n int) bool { return c.series.samples[n+c.cur].Timestamp >= t }) - return c.cur < len(c.series.samples) + if c.cur < len(c.series.samples) { + return chunkenc.ValFloat + } + return chunkenc.ValNone + // TODO(beorn7): Add histogram support. } -// At implements storage.SeriesIterator. +// At implements chunkenc.Iterator. func (c *concreteSeriesIterator) At() (t int64, v float64) { s := c.series.samples[c.cur] return s.Timestamp, s.Value } -// Next implements storage.SeriesIterator. -func (c *concreteSeriesIterator) Next() bool { +// AtHistogram always returns (0, nil) because there is no support for histogram +// values yet. +// TODO(beorn7): Fix that for histogram support in remote storage. +func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + return 0, nil +} + +// AtFloatHistogram always returns (0, nil) because there is no support for histogram +// values yet. +// TODO(beorn7): Fix that for histogram support in remote storage. +func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + return 0, nil +} + +// AtT implements chunkenc.Iterator. +func (c *concreteSeriesIterator) AtT() int64 { + s := c.series.samples[c.cur] + return s.Timestamp +} + +// Next implements chunkenc.Iterator. +func (c *concreteSeriesIterator) Next() chunkenc.ValueType { c.cur++ - return c.cur < len(c.series.samples) + if c.cur < len(c.series.samples) { + return chunkenc.ValFloat + } + return chunkenc.ValNone + // TODO(beorn7): Add histogram support. } -// Err implements storage.SeriesIterator. +// Err implements chunkenc.Iterator. func (c *concreteSeriesIterator) Err() error { return nil } @@ -472,6 +502,56 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar { } } +// HistogramProtoToHistogram extracts a (normal integer) Histogram from the +// provided proto message. The caller has to make sure that the proto message +// represents an interger histogram and not a float histogram. +func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { + return &histogram.Histogram{ + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountInt(), + Count: hp.GetCountInt(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeDeltas(), + } +} + +func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []*prompb.BucketSpan { + spans := make([]*prompb.BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = &prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + // LabelProtosToMetric unpack a []*prompb.Label to a model.Metric func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { metric := make(model.Metric, len(labelPairs)) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 420a2d11ae2..e701cb94ba1 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -32,13 +32,14 @@ import ( "go.uber.org/atomic" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" ) const ( @@ -54,30 +55,35 @@ const ( type queueManagerMetrics struct { reg prometheus.Registerer - samplesTotal prometheus.Counter - exemplarsTotal prometheus.Counter - metadataTotal prometheus.Counter - failedSamplesTotal prometheus.Counter - failedExemplarsTotal prometheus.Counter - failedMetadataTotal prometheus.Counter - retriedSamplesTotal prometheus.Counter - retriedExemplarsTotal prometheus.Counter - retriedMetadataTotal prometheus.Counter - droppedSamplesTotal prometheus.Counter - droppedExemplarsTotal prometheus.Counter - enqueueRetriesTotal prometheus.Counter - sentBatchDuration prometheus.Histogram - highestSentTimestamp *maxTimestamp - pendingSamples prometheus.Gauge - pendingExemplars prometheus.Gauge - shardCapacity prometheus.Gauge - numShards prometheus.Gauge - maxNumShards prometheus.Gauge - minNumShards prometheus.Gauge - desiredNumShards prometheus.Gauge - sentBytesTotal prometheus.Counter - metadataBytesTotal prometheus.Counter - maxSamplesPerSend prometheus.Gauge + samplesTotal prometheus.Counter + exemplarsTotal prometheus.Counter + histogramsTotal prometheus.Counter + metadataTotal prometheus.Counter + failedSamplesTotal prometheus.Counter + failedExemplarsTotal prometheus.Counter + failedHistogramsTotal prometheus.Counter + failedMetadataTotal prometheus.Counter + retriedSamplesTotal prometheus.Counter + retriedExemplarsTotal prometheus.Counter + retriedHistogramsTotal prometheus.Counter + retriedMetadataTotal prometheus.Counter + droppedSamplesTotal prometheus.Counter + droppedExemplarsTotal prometheus.Counter + droppedHistogramsTotal prometheus.Counter + enqueueRetriesTotal prometheus.Counter + sentBatchDuration prometheus.Histogram + highestSentTimestamp *maxTimestamp + pendingSamples prometheus.Gauge + pendingExemplars prometheus.Gauge + pendingHistograms prometheus.Gauge + shardCapacity prometheus.Gauge + numShards prometheus.Gauge + maxNumShards prometheus.Gauge + minNumShards prometheus.Gauge + desiredNumShards prometheus.Gauge + sentBytesTotal prometheus.Counter + metadataBytesTotal prometheus.Counter + maxSamplesPerSend prometheus.Gauge } func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics { @@ -103,6 +109,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of exemplars sent to remote storage.", ConstLabels: constLabels, }) + m.histogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "histograms_total", + Help: "Total number of histograms sent to remote storage.", + ConstLabels: constLabels, + }) m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -124,6 +137,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.", ConstLabels: constLabels, }) + m.failedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "histograms_failed_total", + Help: "Total number of histograms which failed on send to remote storage, non-recoverable errors.", + ConstLabels: constLabels, + }) m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -145,6 +165,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) + m.retriedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "histograms_retried_total", + Help: "Total number of histograms which failed on send to remote storage but were retried because the send error was recoverable.", + ConstLabels: constLabels, + }) m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -166,6 +193,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, }) + m.droppedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "histograms_dropped_total", + Help: "Total number of histograms which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", + ConstLabels: constLabels, + }) m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -204,6 +238,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.", ConstLabels: constLabels, }) + m.pendingHistograms = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "histograms_pending", + Help: "The number of histograms pending in the queues shards to be sent to the remote storage.", + ConstLabels: constLabels, + }) m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, @@ -269,20 +310,25 @@ func (m *queueManagerMetrics) register() { m.reg.MustRegister( m.samplesTotal, m.exemplarsTotal, + m.histogramsTotal, m.metadataTotal, m.failedSamplesTotal, m.failedExemplarsTotal, + m.failedHistogramsTotal, m.failedMetadataTotal, m.retriedSamplesTotal, m.retriedExemplarsTotal, + m.retriedHistogramsTotal, m.retriedMetadataTotal, m.droppedSamplesTotal, m.droppedExemplarsTotal, + m.droppedHistogramsTotal, m.enqueueRetriesTotal, m.sentBatchDuration, m.highestSentTimestamp, m.pendingSamples, m.pendingExemplars, + m.pendingHistograms, m.shardCapacity, m.numShards, m.maxNumShards, @@ -299,20 +345,25 @@ func (m *queueManagerMetrics) unregister() { if m.reg != nil { m.reg.Unregister(m.samplesTotal) m.reg.Unregister(m.exemplarsTotal) + m.reg.Unregister(m.histogramsTotal) m.reg.Unregister(m.metadataTotal) m.reg.Unregister(m.failedSamplesTotal) m.reg.Unregister(m.failedExemplarsTotal) + m.reg.Unregister(m.failedHistogramsTotal) m.reg.Unregister(m.failedMetadataTotal) m.reg.Unregister(m.retriedSamplesTotal) m.reg.Unregister(m.retriedExemplarsTotal) + m.reg.Unregister(m.retriedHistogramsTotal) m.reg.Unregister(m.retriedMetadataTotal) m.reg.Unregister(m.droppedSamplesTotal) m.reg.Unregister(m.droppedExemplarsTotal) + m.reg.Unregister(m.droppedHistogramsTotal) m.reg.Unregister(m.enqueueRetriesTotal) m.reg.Unregister(m.sentBatchDuration) m.reg.Unregister(m.highestSentTimestamp) m.reg.Unregister(m.pendingSamples) m.reg.Unregister(m.pendingExemplars) + m.reg.Unregister(m.pendingHistograms) m.reg.Unregister(m.shardCapacity) m.reg.Unregister(m.numShards) m.reg.Unregister(m.maxNumShards) @@ -341,15 +392,16 @@ type WriteClient interface { type QueueManager struct { lastSendTimestamp atomic.Int64 - logger log.Logger - flushDeadline time.Duration - cfg config.QueueConfig - mcfg config.MetadataConfig - externalLabels labels.Labels - relabelConfigs []*relabel.Config - sendExemplars bool - watcher *wal.Watcher - metadataWatcher *MetadataWatcher + logger log.Logger + flushDeadline time.Duration + cfg config.QueueConfig + mcfg config.MetadataConfig + externalLabels labels.Labels + relabelConfigs []*relabel.Config + sendExemplars bool + sendNativeHistograms bool + watcher *wlog.Watcher + metadataWatcher *MetadataWatcher clientMtx sync.RWMutex storeClient WriteClient @@ -381,8 +433,8 @@ type QueueManager struct { // the WAL directory will be constructed as /wal. func NewQueueManager( metrics *queueManagerMetrics, - watcherMetrics *wal.WatcherMetrics, - readerMetrics *wal.LiveReaderMetrics, + watcherMetrics *wlog.WatcherMetrics, + readerMetrics *wlog.LiveReaderMetrics, logger log.Logger, dir string, samplesIn *ewmaRate, @@ -396,6 +448,7 @@ func NewQueueManager( highestRecvTimestamp *maxTimestamp, sm ReadyScrapeManager, enableExemplarRemoteWrite bool, + enableNativeHistogramRemoteWrite bool, ) *QueueManager { if logger == nil { logger = log.NewNopLogger() @@ -403,14 +456,15 @@ func NewQueueManager( logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint()) t := &QueueManager{ - logger: logger, - flushDeadline: flushDeadline, - cfg: cfg, - mcfg: mCfg, - externalLabels: externalLabels, - relabelConfigs: relabelConfigs, - storeClient: client, - sendExemplars: enableExemplarRemoteWrite, + logger: logger, + flushDeadline: flushDeadline, + cfg: cfg, + mcfg: mCfg, + externalLabels: externalLabels, + relabelConfigs: relabelConfigs, + storeClient: client, + sendExemplars: enableExemplarRemoteWrite, + sendNativeHistograms: enableNativeHistogramRemoteWrite, seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels), seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int), @@ -430,7 +484,7 @@ func NewQueueManager( highestRecvTimestamp: highestRecvTimestamp, } - t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite) + t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite) if t.mcfg.Send { t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline) } @@ -538,11 +592,11 @@ outer: return false default: } - if t.shards.enqueue(s.Ref, sampleOrExemplar{ + if t.shards.enqueue(s.Ref, timeSeries{ seriesLabels: lbls, timestamp: s.T, value: s.V, - isSample: true, + sType: tSample, }) { continue outer } @@ -588,11 +642,59 @@ outer: return false default: } - if t.shards.enqueue(e.Ref, sampleOrExemplar{ + if t.shards.enqueue(e.Ref, timeSeries{ seriesLabels: lbls, timestamp: e.T, value: e.V, exemplarLabels: e.Labels, + sType: tExemplar, + }) { + continue outer + } + + t.metrics.enqueueRetriesTotal.Inc() + time.Sleep(time.Duration(backoff)) + backoff = backoff * 2 + if backoff > t.cfg.MaxBackoff { + backoff = t.cfg.MaxBackoff + } + } + } + return true +} + +func (t *QueueManager) AppendHistograms(histograms []record.RefHistogramSample) bool { + if !t.sendNativeHistograms { + return true + } + +outer: + for _, h := range histograms { + t.seriesMtx.Lock() + lbls, ok := t.seriesLabels[h.Ref] + if !ok { + t.metrics.droppedHistogramsTotal.Inc() + t.dataDropped.incr(1) + if _, ok := t.droppedSeries[h.Ref]; !ok { + level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + } + t.seriesMtx.Unlock() + continue + } + t.seriesMtx.Unlock() + + backoff := model.Duration(5 * time.Millisecond) + for { + select { + case <-t.quit: + return false + default: + } + if t.shards.enqueue(h.Ref, timeSeries{ + seriesLabels: lbls, + timestamp: h.T, + histogram: h.H, + sType: tHistogram, }) { continue outer } @@ -921,8 +1023,9 @@ type shards struct { qm *QueueManager queues []*queue // So we can accurately track how many of each are lost during shard shutdowns. - enqueuedSamples atomic.Int64 - enqueuedExemplars atomic.Int64 + enqueuedSamples atomic.Int64 + enqueuedExemplars atomic.Int64 + enqueuedHistograms atomic.Int64 // Emulate a wait group with a channel and an atomic int, as you // cannot select on a wait group. @@ -934,9 +1037,10 @@ type shards struct { // Hard shutdown context is used to terminate outgoing HTTP connections // after giving them a chance to terminate. - hardShutdown context.CancelFunc - samplesDroppedOnHardShutdown atomic.Uint32 - exemplarsDroppedOnHardShutdown atomic.Uint32 + hardShutdown context.CancelFunc + samplesDroppedOnHardShutdown atomic.Uint32 + exemplarsDroppedOnHardShutdown atomic.Uint32 + histogramsDroppedOnHardShutdown atomic.Uint32 } // start the shards; must be called before any call to enqueue. @@ -961,8 +1065,10 @@ func (s *shards) start(n int) { s.done = make(chan struct{}) s.enqueuedSamples.Store(0) s.enqueuedExemplars.Store(0) + s.enqueuedHistograms.Store(0) s.samplesDroppedOnHardShutdown.Store(0) s.exemplarsDroppedOnHardShutdown.Store(0) + s.histogramsDroppedOnHardShutdown.Store(0) for i := 0; i < n; i++ { go s.runShard(hardShutdownCtx, i, newQueues[i]) } @@ -1008,7 +1114,7 @@ func (s *shards) stop() { // retry. A shard is full when its configured capacity has been reached, // specifically, when s.queues[shard] has filled its batchQueue channel and the // partial batch has also been filled. -func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool { +func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool { s.mtx.RLock() defer s.mtx.RUnlock() @@ -1021,12 +1127,16 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool { if !appended { return false } - if data.isSample { + switch data.sType { + case tSample: s.qm.metrics.pendingSamples.Inc() s.enqueuedSamples.Inc() - } else { + case tExemplar: s.qm.metrics.pendingExemplars.Inc() s.enqueuedExemplars.Inc() + case tHistogram: + s.qm.metrics.pendingHistograms.Inc() + s.enqueuedHistograms.Inc() } return true } @@ -1035,24 +1145,34 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool { type queue struct { // batchMtx covers operations appending to or publishing the partial batch. batchMtx sync.Mutex - batch []sampleOrExemplar - batchQueue chan []sampleOrExemplar + batch []timeSeries + batchQueue chan []timeSeries // Since we know there are a limited number of batches out, using a stack // is easy and safe so a sync.Pool is not necessary. // poolMtx covers adding and removing batches from the batchPool. poolMtx sync.Mutex - batchPool [][]sampleOrExemplar + batchPool [][]timeSeries } -type sampleOrExemplar struct { +type timeSeries struct { seriesLabels labels.Labels value float64 + histogram *histogram.Histogram timestamp int64 exemplarLabels labels.Labels - isSample bool + // The type of series: sample, exemplar, or histogram. + sType seriesType } +type seriesType int + +const ( + tSample seriesType = iota + tExemplar + tHistogram +) + func newQueue(batchSize, capacity int) *queue { batches := capacity / batchSize // Always create an unbuffered channel even if capacity is configured to be @@ -1061,17 +1181,17 @@ func newQueue(batchSize, capacity int) *queue { batches = 1 } return &queue{ - batch: make([]sampleOrExemplar, 0, batchSize), - batchQueue: make(chan []sampleOrExemplar, batches), + batch: make([]timeSeries, 0, batchSize), + batchQueue: make(chan []timeSeries, batches), // batchPool should have capacity for everything in the channel + 1 for // the batch being processed. - batchPool: make([][]sampleOrExemplar, 0, batches+1), + batchPool: make([][]timeSeries, 0, batches+1), } } -// Append the sampleOrExemplar to the buffered batch. Returns false if it +// Append the timeSeries to the buffered batch. Returns false if it // cannot be added and must be retried. -func (q *queue) Append(datum sampleOrExemplar) bool { +func (q *queue) Append(datum timeSeries) bool { q.batchMtx.Lock() defer q.batchMtx.Unlock() q.batch = append(q.batch, datum) @@ -1089,12 +1209,12 @@ func (q *queue) Append(datum sampleOrExemplar) bool { return true } -func (q *queue) Chan() <-chan []sampleOrExemplar { +func (q *queue) Chan() <-chan []timeSeries { return q.batchQueue } // Batch returns the current batch and allocates a new batch. -func (q *queue) Batch() []sampleOrExemplar { +func (q *queue) Batch() []timeSeries { q.batchMtx.Lock() defer q.batchMtx.Unlock() @@ -1109,7 +1229,7 @@ func (q *queue) Batch() []sampleOrExemplar { } // ReturnForReuse adds the batch buffer back to the internal pool. -func (q *queue) ReturnForReuse(batch []sampleOrExemplar) { +func (q *queue) ReturnForReuse(batch []timeSeries) { q.poolMtx.Lock() defer q.poolMtx.Unlock() if len(q.batchPool) < cap(q.batchPool) { @@ -1149,7 +1269,7 @@ func (q *queue) tryEnqueueingBatch(done <-chan struct{}) bool { } } -func (q *queue) newBatch(capacity int) []sampleOrExemplar { +func (q *queue) newBatch(capacity int) []timeSeries { q.poolMtx.Lock() defer q.poolMtx.Unlock() batches := len(q.batchPool) @@ -1158,7 +1278,7 @@ func (q *queue) newBatch(capacity int) []sampleOrExemplar { q.batchPool = q.batchPool[:batches-1] return batch } - return make([]sampleOrExemplar, 0, capacity) + return make([]timeSeries, 0, capacity) } func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { @@ -1209,22 +1329,26 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { // Remove them from pending and mark them as failed. droppedSamples := int(s.enqueuedSamples.Load()) droppedExemplars := int(s.enqueuedExemplars.Load()) + droppedHistograms := int(s.enqueuedHistograms.Load()) s.qm.metrics.pendingSamples.Sub(float64(droppedSamples)) s.qm.metrics.pendingExemplars.Sub(float64(droppedExemplars)) + s.qm.metrics.pendingHistograms.Sub(float64(droppedHistograms)) s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples)) s.qm.metrics.failedExemplarsTotal.Add(float64(droppedExemplars)) + s.qm.metrics.failedHistogramsTotal.Add(float64(droppedHistograms)) s.samplesDroppedOnHardShutdown.Add(uint32(droppedSamples)) s.exemplarsDroppedOnHardShutdown.Add(uint32(droppedExemplars)) + s.histogramsDroppedOnHardShutdown.Add(uint32(droppedHistograms)) return case batch, ok := <-batchQueue: if !ok { return } - nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData) + nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData) queue.ReturnForReuse(batch) - n := nPendingSamples + nPendingExemplars - s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, pBuf, &buf) + n := nPendingSamples + nPendingExemplars + nPendingHistograms + s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) stop() timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) @@ -1232,10 +1356,10 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { case <-timer.C: batch := queue.Batch() if len(batch) > 0 { - nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData) - n := nPendingSamples + nPendingExemplars + nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData) + n := nPendingSamples + nPendingExemplars + nPendingHistograms level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum) - s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, pBuf, &buf) + s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) } queue.ReturnForReuse(batch) timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) @@ -1243,43 +1367,51 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { } } -func (s *shards) populateTimeSeries(batch []sampleOrExemplar, pendingData []prompb.TimeSeries) (int, int) { - var nPendingSamples, nPendingExemplars int +func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries) (int, int, int) { + var nPendingSamples, nPendingExemplars, nPendingHistograms int for nPending, d := range batch { pendingData[nPending].Samples = pendingData[nPending].Samples[:0] if s.qm.sendExemplars { pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0] } + if s.qm.sendNativeHistograms { + pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0] + } + // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll // stop reading from the queue. This makes it safe to reference pendingSamples by index. - if d.isSample { - pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + switch d.sType { + case tSample: pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{ Value: d.value, Timestamp: d.timestamp, }) nPendingSamples++ - } else { - pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + case tExemplar: pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{ Labels: labelsToLabelsProto(d.exemplarLabels, nil), Value: d.value, Timestamp: d.timestamp, }) nPendingExemplars++ + case tHistogram: + pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram)) + nPendingHistograms++ } } - return nPendingSamples, nPendingExemplars + return nPendingSamples, nPendingExemplars, nPendingHistograms } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) { begin := time.Now() - err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf) + err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf) if err != nil { level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) + s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount)) } // These counters are used to calculate the dynamic sharding, and as such @@ -1287,16 +1419,18 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s s.qm.dataOut.incr(int64(len(samples))) s.qm.dataOutDuration.incr(int64(time.Since(begin))) s.qm.lastSendTimestamp.Store(time.Now().Unix()) - // Pending samples/exemplars also should be subtracted as an error means + // Pending samples/exemplars/histograms also should be subtracted as an error means // they will not be retried. s.qm.metrics.pendingSamples.Sub(float64(sampleCount)) s.qm.metrics.pendingExemplars.Sub(float64(exemplarCount)) + s.qm.metrics.pendingHistograms.Sub(float64(histogramCount)) s.enqueuedSamples.Sub(int64(sampleCount)) s.enqueuedExemplars.Sub(int64(exemplarCount)) + s.enqueuedHistograms.Sub(int64(histogramCount)) } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error { // Build the WriteRequest with no metadata. req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) if err != nil { @@ -1326,10 +1460,14 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti if exemplarCount > 0 { span.SetAttributes(attribute.Int("exemplars", exemplarCount)) } + if histogramCount > 0 { + span.SetAttributes(attribute.Int("histograms", histogramCount)) + } begin := time.Now() s.qm.metrics.samplesTotal.Add(float64(sampleCount)) s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) + s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) err := s.qm.client().Store(ctx, *buf) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) @@ -1344,6 +1482,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti onRetry := func() { s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount)) s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount)) + s.qm.metrics.retriedHistogramsTotal.Add(float64(histogramCount)) } err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry) @@ -1420,6 +1559,9 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest { highest = ts.Exemplars[0].Timestamp } + if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest { + highest = ts.Histograms[0].Timestamp + } } req := &prompb.WriteRequest{ diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index be401e95d4a..2e38fb8e64e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -26,10 +26,11 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" ) var ( @@ -45,6 +46,12 @@ var ( Name: "exemplars_in_total", Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.", }) + histogramsIn = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "histograms_in_total", + Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers.", + }) ) // WriteStorage represents all the remote write storage. @@ -53,8 +60,8 @@ type WriteStorage struct { reg prometheus.Registerer mtx sync.Mutex - watcherMetrics *wal.WatcherMetrics - liveReaderMetrics *wal.LiveReaderMetrics + watcherMetrics *wlog.WatcherMetrics + liveReaderMetrics *wlog.LiveReaderMetrics externalLabels labels.Labels dir string queues map[string]*QueueManager @@ -75,8 +82,8 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f } rws := &WriteStorage{ queues: make(map[string]*QueueManager), - watcherMetrics: wal.NewWatcherMetrics(reg), - liveReaderMetrics: wal.NewLiveReaderMetrics(reg), + watcherMetrics: wlog.NewWatcherMetrics(reg), + liveReaderMetrics: wlog.NewLiveReaderMetrics(reg), logger: logger, reg: reg, flushDeadline: flushDeadline, @@ -188,6 +195,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { rws.highestTimestamp, rws.scraper, rwConf.SendExemplars, + rwConf.SendNativeHistograms, ) // Keep track of which queues are new so we know which to start. newHashes = append(newHashes, hash) @@ -251,6 +259,7 @@ type timestampTracker struct { writeStorage *WriteStorage samples int64 exemplars int64 + histograms int64 highestTimestamp int64 highestRecvTimestamp *maxTimestamp } @@ -269,6 +278,14 @@ func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, return 0, nil } +func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, h *histogram.Histogram) (storage.SeriesRef, error) { + t.histograms++ + if ts > t.highestTimestamp { + t.highestTimestamp = ts + } + return 0, nil +} + func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. // UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now. @@ -277,10 +294,11 @@ func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { - t.writeStorage.samplesIn.incr(t.samples + t.exemplars) + t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) samplesIn.Add(float64(t.samples)) exemplarsIn.Add(float64(t.exemplars)) + histogramsIn.Add(float64(t.histograms)) t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000)) return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 64936222393..ad1f0f3ae1b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -117,6 +117,20 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) } } + + for _, hp := range ts.Histograms { + hs := HistogramProtoToHistogram(hp) + _, err = app.AppendHistogram(0, labels, hp.Timestamp, hs) + if err != nil { + unwrappedErr := errors.Unwrap(err) + // Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is + // a note indicating its inclusion in the future. + if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { + level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) + } + return err + } + } } if outOfOrderExemplarErrs > 0 { diff --git a/vendor/github.com/prometheus/prometheus/storage/series.go b/vendor/github.com/prometheus/prometheus/storage/series.go index 2dba837156e..3259dd4d06c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/series.go +++ b/vendor/github.com/prometheus/prometheus/storage/series.go @@ -14,9 +14,11 @@ package storage import ( + "fmt" "math" "sort" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -90,21 +92,39 @@ func (it *listSeriesIterator) At() (int64, float64) { return s.T(), s.V() } -func (it *listSeriesIterator) Next() bool { +func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + s := it.samples.Get(it.idx) + return s.T(), s.H() +} + +func (it *listSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + s := it.samples.Get(it.idx) + return s.T(), s.FH() +} + +func (it *listSeriesIterator) AtT() int64 { + s := it.samples.Get(it.idx) + return s.T() +} + +func (it *listSeriesIterator) Next() chunkenc.ValueType { it.idx++ - return it.idx < it.samples.Len() + if it.idx >= it.samples.Len() { + return chunkenc.ValNone + } + return it.samples.Get(it.idx).Type() } -func (it *listSeriesIterator) Seek(t int64) bool { +func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType { if it.idx == -1 { it.idx = 0 } if it.idx >= it.samples.Len() { - return false + return chunkenc.ValNone } // No-op check. if s := it.samples.Get(it.idx); s.T() >= t { - return true + return s.Type() } // Do binary search between current position and end. it.idx += sort.Search(it.samples.Len()-it.idx, func(i int) bool { @@ -112,7 +132,10 @@ func (it *listSeriesIterator) Seek(t int64) bool { return s.T() >= t }) - return it.idx < it.samples.Len() + if it.idx >= it.samples.Len() { + return chunkenc.ValNone + } + return it.samples.Get(it.idx).Type() } func (it *listSeriesIterator) Err() error { return nil } @@ -230,27 +253,32 @@ func NewSeriesToChunkEncoder(series Series) ChunkSeries { } func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { - chk := chunkenc.NewXORChunk() - app, err := chk.Appender() - if err != nil { - return errChunksIterator{err: err} - } + var ( + chk chunkenc.Chunk + app chunkenc.Appender + err error + ) mint := int64(math.MaxInt64) maxt := int64(math.MinInt64) chks := []chunks.Meta{} - i := 0 seriesIter := s.Series.Iterator() - for seriesIter.Next() { - // Create a new chunk if too many samples in the current one. - if i >= seriesToChunkEncoderSplit { - chks = append(chks, chunks.Meta{ - MinTime: mint, - MaxTime: maxt, - Chunk: chk, - }) - chk = chunkenc.NewXORChunk() + lastType := chunkenc.ValNone + for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { + if typ != lastType || i >= seriesToChunkEncoderSplit { + // Create a new chunk if the sample type changed or too many samples in the current one. + if chk != nil { + chks = append(chks, chunks.Meta{ + MinTime: mint, + MaxTime: maxt, + Chunk: chk, + }) + } + chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding()) + if err != nil { + return errChunksIterator{err: err} + } app, err = chk.Appender() if err != nil { return errChunksIterator{err: err} @@ -259,9 +287,23 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { // maxt is immediately overwritten below which is why setting it here won't make a difference. i = 0 } + lastType = typ - t, v := seriesIter.At() - app.Append(t, v) + var ( + t int64 + v float64 + h *histogram.Histogram + ) + switch typ { + case chunkenc.ValFloat: + t, v = seriesIter.At() + app.Append(t, v) + case chunkenc.ValHistogram: + t, h = seriesIter.AtHistogram() + app.AppendHistogram(t, h) + default: + return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())} + } maxt = t if mint == math.MaxInt64 { @@ -273,11 +315,13 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { return errChunksIterator{err: err} } - chks = append(chks, chunks.Meta{ - MinTime: mint, - MaxTime: maxt, - Chunk: chk, - }) + if chk != nil { + chks = append(chks, chunks.Meta{ + MinTime: mint, + MaxTime: maxt, + Chunk: chk, + }) + } return NewListChunkSeriesIterator(chks...) } @@ -293,21 +337,34 @@ func (e errChunksIterator) Err() error { return e.err } // ExpandSamples iterates over all samples in the iterator, buffering all in slice. // Optionally it takes samples constructor, useful when you want to compare sample slices with different // sample implementations. if nil, sample type from this package will be used. -func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64) tsdbutil.Sample) ([]tsdbutil.Sample, error) { +func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { if newSampleFn == nil { - newSampleFn = func(t int64, v float64) tsdbutil.Sample { return sample{t, v} } + newSampleFn = func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { + return sample{t, v, h, fh} + } } var result []tsdbutil.Sample - for iter.Next() { - t, v := iter.At() - // NaNs can't be compared normally, so substitute for another value. - if math.IsNaN(v) { - v = -42 + for { + switch iter.Next() { + case chunkenc.ValNone: + return result, iter.Err() + case chunkenc.ValFloat: + t, v := iter.At() + // NaNs can't be compared normally, so substitute for another value. + if math.IsNaN(v) { + v = -42 + } + result = append(result, newSampleFn(t, v, nil, nil)) + case chunkenc.ValHistogram: + t, h := iter.AtHistogram() + result = append(result, newSampleFn(t, 0, h, nil)) + case chunkenc.ValFloatHistogram: + t, fh := iter.AtFloatHistogram() + result = append(result, newSampleFn(t, 0, nil, fh)) + } - result = append(result, newSampleFn(t, v)) } - return result, iter.Err() } // ExpandChunks iterates over all chunks in the iterator, buffering all in slice. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/README.md b/vendor/github.com/prometheus/prometheus/tsdb/README.md index ad9354586cd..80770e8dd49 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/README.md +++ b/vendor/github.com/prometheus/prometheus/tsdb/README.md @@ -13,7 +13,7 @@ which handles storage and querying of all Prometheus v2 data. ## External resources -* A writeup of the original design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/). +* A writeup of the original design can be found [here](https://web.archive.org/web/20210803115658/https://fabxc.org/tsdb/). * Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/). * Compression is based on the Gorilla TSDB [white paper](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 8fd1066ba2d..b06f0940a15 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -19,13 +19,13 @@ import ( "io" "os" "path/filepath" - "sort" "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -198,7 +198,7 @@ func (bm *BlockMetaCompaction) SetOutOfOrder() { return } bm.Hints = append(bm.Hints, CompactionHintFromOutOfOrder) - sort.Strings(bm.Hints) + slices.Sort(bm.Hints) } func (bm *BlockMetaCompaction) FromOutOfOrder() bool { @@ -463,7 +463,7 @@ func (r blockIndexReader) SortedLabelValues(name string, matchers ...*labels.Mat } else { st, err = r.LabelValues(name, matchers...) if err == nil { - sort.Strings(st) + slices.Sort(st) } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 4db30799752..0d017e095f4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -71,6 +71,7 @@ func (w *BlockWriter) initHead() error { opts := DefaultHeadOptions() opts.ChunkRange = w.blockSize opts.ChunkDirRoot = w.chunkDir + opts.EnableNativeHistograms.Store(true) h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats()) if err != nil { return errors.Wrap(err, "tsdb.NewHead") diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index c5f8036a715..0b7117ce9ee 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -18,27 +18,32 @@ import ( "sync" "github.com/pkg/errors" + + "github.com/prometheus/prometheus/model/histogram" ) // Encoding is the identifier for a chunk encoding. type Encoding uint8 +// The different available chunk encodings. +const ( + EncNone Encoding = iota + EncXOR + EncHistogram +) + func (e Encoding) String() string { switch e { case EncNone: return "none" case EncXOR: return "XOR" + case EncHistogram: + return "histogram" } return "" } -// The different available chunk encodings. -const ( - EncNone Encoding = iota - EncXOR -) - // Chunk encodings for out-of-order chunks. // These encodings must be only used by the Head block for its internal bookkeeping. const ( @@ -50,8 +55,9 @@ func IsOutOfOrderChunk(e Encoding) bool { return (e & OutOfOrderMask) != 0 } +// IsValidEncoding returns true for supported encodings. func IsValidEncoding(e Encoding) bool { - return e == EncXOR || e == EncOOOXOR + return e == EncXOR || e == EncOOOXOR || e == EncHistogram } // Chunk holds a sequence of sample pairs that can be iterated over and appended to. @@ -84,26 +90,80 @@ type Chunk interface { // Appender adds sample pairs to a chunk. type Appender interface { Append(int64, float64) + AppendHistogram(t int64, h *histogram.Histogram) } // Iterator is a simple iterator that can only get the next value. // Iterator iterates over the samples of a time series, in timestamp-increasing order. type Iterator interface { - // Next advances the iterator by one. - Next() bool - // Seek advances the iterator forward to the first sample with the timestamp equal or greater than t. - // If current sample found by previous `Next` or `Seek` operation already has this property, Seek has no effect. - // Seek returns true, if such sample exists, false otherwise. - // Iterator is exhausted when the Seek returns false. - Seek(t int64) bool - // At returns the current timestamp/value pair. - // Before the iterator has advanced At behaviour is unspecified. + // Next advances the iterator by one and returns the type of the value + // at the new position (or ValNone if the iterator is exhausted). + Next() ValueType + // Seek advances the iterator forward to the first sample with a + // timestamp equal or greater than t. If the current sample found by a + // previous `Next` or `Seek` operation already has this property, Seek + // has no effect. If a sample has been found, Seek returns the type of + // its value. Otherwise, it returns ValNone, after with the iterator is + // exhausted. + Seek(t int64) ValueType + // At returns the current timestamp/value pair if the value is a float. + // Before the iterator has advanced, the behaviour is unspecified. At() (int64, float64) - // Err returns the current error. It should be used only after iterator is - // exhausted, that is `Next` or `Seek` returns false. + // AtHistogram returns the current timestamp/value pair if the value is + // a histogram with integer counts. Before the iterator has advanced, + // the behaviour is unspecified. + AtHistogram() (int64, *histogram.Histogram) + // AtFloatHistogram returns the current timestamp/value pair if the + // value is a histogram with floating-point counts. It also works if the + // value is a histogram with integer counts, in which case a + // FloatHistogram copy of the histogram is returned. Before the iterator + // has advanced, the behaviour is unspecified. + AtFloatHistogram() (int64, *histogram.FloatHistogram) + // AtT returns the current timestamp. + // Before the iterator has advanced, the behaviour is unspecified. + AtT() int64 + // Err returns the current error. It should be used only after the + // iterator is exhausted, i.e. `Next` or `Seek` have returned ValNone. Err() error } +// ValueType defines the type of a value an Iterator points to. +type ValueType uint8 + +// Possible values for ValueType. +const ( + ValNone ValueType = iota // No value at the current position. + ValFloat // A simple float, retrieved with At. + ValHistogram // A histogram, retrieve with AtHistogram, but AtFloatHistogram works, too. + ValFloatHistogram // A floating-point histogram, retrieve with AtFloatHistogram. +) + +func (v ValueType) String() string { + switch v { + case ValNone: + return "none" + case ValFloat: + return "float" + case ValHistogram: + return "histogram" + case ValFloatHistogram: + return "floathistogram" + default: + return "unknown" + } +} + +func (v ValueType) ChunkEncoding() Encoding { + switch v { + case ValFloat: + return EncXOR + case ValHistogram: + return EncHistogram + default: + return EncNone + } +} + // MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values. func MockSeriesIterator(timestamps []int64, values []float64) Iterator { return &mockSeriesIterator{ @@ -119,18 +179,29 @@ type mockSeriesIterator struct { currIndex int } -func (it *mockSeriesIterator) Seek(int64) bool { return false } +func (it *mockSeriesIterator) Seek(int64) ValueType { return ValNone } + func (it *mockSeriesIterator) At() (int64, float64) { return it.timeStamps[it.currIndex], it.values[it.currIndex] } -func (it *mockSeriesIterator) Next() bool { +func (it *mockSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { return math.MinInt64, nil } + +func (it *mockSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + return math.MinInt64, nil +} + +func (it *mockSeriesIterator) AtT() int64 { + return it.timeStamps[it.currIndex] +} + +func (it *mockSeriesIterator) Next() ValueType { if it.currIndex < len(it.timeStamps)-1 { it.currIndex++ - return true + return ValFloat } - return false + return ValNone } func (it *mockSeriesIterator) Err() error { return nil } @@ -141,10 +212,13 @@ func NewNopIterator() Iterator { type nopIterator struct{} -func (nopIterator) Seek(int64) bool { return false } -func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 } -func (nopIterator) Next() bool { return false } -func (nopIterator) Err() error { return nil } +func (nopIterator) Next() ValueType { return ValNone } +func (nopIterator) Seek(int64) ValueType { return ValNone } +func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 } +func (nopIterator) AtHistogram() (int64, *histogram.Histogram) { return math.MinInt64, nil } +func (nopIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { return math.MinInt64, nil } +func (nopIterator) AtT() int64 { return math.MinInt64 } +func (nopIterator) Err() error { return nil } // Pool is used to create and reuse chunk references to avoid allocations. type Pool interface { @@ -154,7 +228,8 @@ type Pool interface { // pool is a memory pool of chunk objects. type pool struct { - xor sync.Pool + xor sync.Pool + histogram sync.Pool } // NewPool returns a new pool. @@ -165,6 +240,11 @@ func NewPool() Pool { return &XORChunk{b: bstream{}} }, }, + histogram: sync.Pool{ + New: func() interface{} { + return &HistogramChunk{b: bstream{}} + }, + }, } } @@ -175,6 +255,11 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { c.b.stream = b c.b.count = 0 return c, nil + case EncHistogram: + c := p.histogram.Get().(*HistogramChunk) + c.b.stream = b + c.b.count = 0 + return c, nil } return nil, errors.Errorf("invalid chunk encoding %q", e) } @@ -192,6 +277,17 @@ func (p *pool) Put(c Chunk) error { xc.b.stream = nil xc.b.count = 0 p.xor.Put(c) + case EncHistogram: + sh, ok := c.(*HistogramChunk) + // This may happen often with wrapped chunks. Nothing we can really do about + // it but returning an error would cause a lot of allocations again. Thus, + // we just skip it. + if !ok { + return nil + } + sh.b.stream = nil + sh.b.count = 0 + p.histogram.Put(c) default: return errors.Errorf("invalid chunk encoding %q", c.Encoding()) } @@ -205,6 +301,19 @@ func FromData(e Encoding, d []byte) (Chunk, error) { switch e { case EncXOR, EncOOOXOR: return &XORChunk{b: bstream{count: 0, stream: d}}, nil + case EncHistogram: + return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil + } + return nil, errors.Errorf("invalid chunk encoding %q", e) +} + +// NewEmptyChunk returns an empty chunk for the given encoding. +func NewEmptyChunk(e Encoding) (Chunk, error) { + switch e { + case EncXOR: + return NewXORChunk(), nil + case EncHistogram: + return NewHistogramChunk(), nil } return nil, errors.Errorf("invalid chunk encoding %q", e) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go new file mode 100644 index 00000000000..5aad382b2b2 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -0,0 +1,857 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunkenc + +import ( + "encoding/binary" + "math" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/value" +) + +// HistogramChunk holds encoded sample data for a sparse, high-resolution +// histogram. +// +// Each sample has multiple "fields", stored in the following way (raw = store +// number directly, delta = store delta to the previous number, dod = store +// delta of the delta to the previous number, xor = what we do for regular +// sample values): +// +// field → ts count zeroCount sum []posbuckets []negbuckets +// sample 1 raw raw raw raw []raw []raw +// sample 2 delta delta delta xor []delta []delta +// sample >2 dod dod dod xor []dod []dod +type HistogramChunk struct { + b bstream +} + +// NewHistogramChunk returns a new chunk with histogram encoding of the given +// size. +func NewHistogramChunk() *HistogramChunk { + b := make([]byte, 3, 128) + return &HistogramChunk{b: bstream{stream: b, count: 0}} +} + +// Encoding returns the encoding type. +func (c *HistogramChunk) Encoding() Encoding { + return EncHistogram +} + +// Bytes returns the underlying byte slice of the chunk. +func (c *HistogramChunk) Bytes() []byte { + return c.b.bytes() +} + +// NumSamples returns the number of samples in the chunk. +func (c *HistogramChunk) NumSamples() int { + return int(binary.BigEndian.Uint16(c.Bytes())) +} + +// Layout returns the histogram layout. Only call this on chunks that have at +// least one sample. +func (c *HistogramChunk) Layout() ( + schema int32, zeroThreshold float64, + negativeSpans, positiveSpans []histogram.Span, + err error, +) { + if c.NumSamples() == 0 { + panic("HistoChunk.Layout() called on an empty chunk") + } + b := newBReader(c.Bytes()[2:]) + return readHistogramChunkLayout(&b) +} + +// CounterResetHeader defines the first 2 bits of the chunk header. +type CounterResetHeader byte + +const ( + // CounterReset means there was definitely a counter reset that resulted in this chunk. + CounterReset CounterResetHeader = 0b10000000 + // NotCounterReset means there was definitely no counter reset when cutting this chunk. + NotCounterReset CounterResetHeader = 0b01000000 + // GaugeType means this chunk contains a gauge histogram, where counter resets do not happen. + GaugeType CounterResetHeader = 0b11000000 + // UnknownCounterReset means we cannot say if this chunk was created due to a counter reset or not. + // An explicit counter reset detection needs to happen during query time. + UnknownCounterReset CounterResetHeader = 0b00000000 +) + +// SetCounterResetHeader sets the counter reset header. +func (c *HistogramChunk) SetCounterResetHeader(h CounterResetHeader) { + switch h { + case CounterReset, NotCounterReset, GaugeType, UnknownCounterReset: + bytes := c.Bytes() + bytes[2] = (bytes[2] & 0b00111111) | byte(h) + default: + panic("invalid CounterResetHeader type") + } +} + +// GetCounterResetHeader returns the info about the first 2 bits of the chunk +// header. +func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader { + return CounterResetHeader(c.Bytes()[2] & 0b11000000) +} + +// Compact implements the Chunk interface. +func (c *HistogramChunk) Compact() { + if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold { + buf := make([]byte, l) + copy(buf, c.b.stream) + c.b.stream = buf + } +} + +// Appender implements the Chunk interface. +func (c *HistogramChunk) Appender() (Appender, error) { + it := c.iterator(nil) + + // To get an appender, we must know the state it would have if we had + // appended all existing data from scratch. We iterate through the end + // and populate via the iterator's state. + for it.Next() == ValHistogram { + } + if err := it.Err(); err != nil { + return nil, err + } + + a := &HistogramAppender{ + b: &c.b, + + schema: it.schema, + zThreshold: it.zThreshold, + pSpans: it.pSpans, + nSpans: it.nSpans, + t: it.t, + cnt: it.cnt, + zCnt: it.zCnt, + tDelta: it.tDelta, + cntDelta: it.cntDelta, + zCntDelta: it.zCntDelta, + pBuckets: it.pBuckets, + nBuckets: it.nBuckets, + pBucketsDelta: it.pBucketsDelta, + nBucketsDelta: it.nBucketsDelta, + + sum: it.sum, + leading: it.leading, + trailing: it.trailing, + } + if it.numTotal == 0 { + a.leading = 0xff + } + return a, nil +} + +func countSpans(spans []histogram.Span) int { + var cnt int + for _, s := range spans { + cnt += int(s.Length) + } + return cnt +} + +func newHistogramIterator(b []byte) *histogramIterator { + it := &histogramIterator{ + br: newBReader(b), + numTotal: binary.BigEndian.Uint16(b), + t: math.MinInt64, + } + // The first 3 bytes contain chunk headers. + // We skip that for actual samples. + _, _ = it.br.readBits(24) + return it +} + +func (c *HistogramChunk) iterator(it Iterator) *histogramIterator { + // This comment is copied from XORChunk.iterator: + // Should iterators guarantee to act on a copy of the data so it doesn't lock append? + // When using striped locks to guard access to chunks, probably yes. + // Could only copy data if the chunk is not completed yet. + if histogramIter, ok := it.(*histogramIterator); ok { + histogramIter.Reset(c.b.bytes()) + return histogramIter + } + return newHistogramIterator(c.b.bytes()) +} + +// Iterator implements the Chunk interface. +func (c *HistogramChunk) Iterator(it Iterator) Iterator { + return c.iterator(it) +} + +// HistogramAppender is an Appender implementation for sparse histograms. +type HistogramAppender struct { + b *bstream + + // Layout: + schema int32 + zThreshold float64 + pSpans, nSpans []histogram.Span + + // Although we intend to start new chunks on counter resets, we still + // have to handle negative deltas for gauge histograms. Therefore, even + // deltas are signed types here (even for tDelta to not treat that one + // specially). + t int64 + cnt, zCnt uint64 + tDelta, cntDelta, zCntDelta int64 + pBuckets, nBuckets []int64 + pBucketsDelta, nBucketsDelta []int64 + + // The sum is Gorilla xor encoded. + sum float64 + leading uint8 + trailing uint8 +} + +// Append implements Appender. This implementation panics because normal float +// samples must never be appended to a histogram chunk. +func (a *HistogramAppender) Append(int64, float64) { + panic("appended a float sample to a histogram chunk") +} + +// Appendable returns whether the chunk can be appended to, and if so +// whether any recoding needs to happen using the provided interjections +// (in case of any new buckets, positive or negative range, respectively). +// +// The chunk is not appendable in the following cases: +// +// • The schema has changed. +// +// • The threshold for the zero bucket has changed. +// +// • Any buckets have disappeared. +// +// • There was a counter reset in the count of observations or in any bucket, +// including the zero bucket. +// +// • The last sample in the chunk was stale while the current sample is not stale. +// +// The method returns an additional boolean set to true if it is not appendable +// because of a counter reset. If the given sample is stale, it is always ok to +// append. If counterReset is true, okToAppend is always false. +func (a *HistogramAppender) Appendable(h *histogram.Histogram) ( + positiveInterjections, negativeInterjections []Interjection, + okToAppend, counterReset bool, +) { + if value.IsStaleNaN(h.Sum) { + // This is a stale sample whose buckets and spans don't matter. + okToAppend = true + return + } + if value.IsStaleNaN(a.sum) { + // If the last sample was stale, then we can only accept stale + // samples in this chunk. + return + } + + if h.Count < a.cnt { + // There has been a counter reset. + counterReset = true + return + } + + if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold { + return + } + + if h.ZeroCount < a.zCnt { + // There has been a counter reset since ZeroThreshold didn't change. + counterReset = true + return + } + + var ok bool + positiveInterjections, ok = compareSpans(a.pSpans, h.PositiveSpans) + if !ok { + counterReset = true + return + } + negativeInterjections, ok = compareSpans(a.nSpans, h.NegativeSpans) + if !ok { + counterReset = true + return + } + + if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) || + counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) { + counterReset, positiveInterjections, negativeInterjections = true, nil, nil + return + } + + okToAppend = true + return +} + +// counterResetInAnyBucket returns true if there was a counter reset for any +// bucket. This should be called only when the bucket layout is the same or new +// buckets were added. It does not handle the case of buckets missing. +func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans []histogram.Span) bool { + if len(oldSpans) == 0 || len(oldBuckets) == 0 { + return false + } + + oldSpanSliceIdx, newSpanSliceIdx := 0, 0 // Index for the span slices. + oldInsideSpanIdx, newInsideSpanIdx := uint32(0), uint32(0) // Index inside a span. + oldIdx, newIdx := oldSpans[0].Offset, newSpans[0].Offset + + oldBucketSliceIdx, newBucketSliceIdx := 0, 0 // Index inside bucket slice. + oldVal, newVal := oldBuckets[0], newBuckets[0] + + // Since we assume that new spans won't have missing buckets, there will never be a case + // where the old index will not find a matching new index. + for { + if oldIdx == newIdx { + if newVal < oldVal { + return true + } + } + + if oldIdx <= newIdx { + // Moving ahead old bucket and span by 1 index. + if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 { + // Current span is over. + oldSpanSliceIdx++ + oldInsideSpanIdx = 0 + if oldSpanSliceIdx >= len(oldSpans) { + // All old spans are over. + break + } + oldIdx += 1 + oldSpans[oldSpanSliceIdx].Offset + } else { + oldInsideSpanIdx++ + oldIdx++ + } + oldBucketSliceIdx++ + oldVal += oldBuckets[oldBucketSliceIdx] + } + + if oldIdx > newIdx { + // Moving ahead new bucket and span by 1 index. + if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 { + // Current span is over. + newSpanSliceIdx++ + newInsideSpanIdx = 0 + if newSpanSliceIdx >= len(newSpans) { + // All new spans are over. + // This should not happen, old spans above should catch this first. + panic("new spans over before old spans in counterReset") + } + newIdx += 1 + newSpans[newSpanSliceIdx].Offset + } else { + newInsideSpanIdx++ + newIdx++ + } + newBucketSliceIdx++ + newVal += newBuckets[newBucketSliceIdx] + } + } + + return false +} + +// AppendHistogram appends a histogram to the chunk. The caller must ensure that +// the histogram is properly structured, e.g. the number of buckets used +// corresponds to the number conveyed by the span structures. First call +// Appendable() and act accordingly! +func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) { + var tDelta, cntDelta, zCntDelta int64 + num := binary.BigEndian.Uint16(a.b.bytes()) + + if value.IsStaleNaN(h.Sum) { + // Emptying out other fields to write no buckets, and an empty + // layout in case of first histogram in the chunk. + h = &histogram.Histogram{Sum: h.Sum} + } + + if num == 0 { + // The first append gets the privilege to dictate the layout + // but it's also responsible for encoding it into the chunk! + writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans) + a.schema = h.Schema + a.zThreshold = h.ZeroThreshold + + if len(h.PositiveSpans) > 0 { + a.pSpans = make([]histogram.Span, len(h.PositiveSpans)) + copy(a.pSpans, h.PositiveSpans) + } else { + a.pSpans = nil + } + if len(h.NegativeSpans) > 0 { + a.nSpans = make([]histogram.Span, len(h.NegativeSpans)) + copy(a.nSpans, h.NegativeSpans) + } else { + a.nSpans = nil + } + + numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans) + if numPBuckets > 0 { + a.pBuckets = make([]int64, numPBuckets) + a.pBucketsDelta = make([]int64, numPBuckets) + } else { + a.pBuckets = nil + a.pBucketsDelta = nil + } + if numNBuckets > 0 { + a.nBuckets = make([]int64, numNBuckets) + a.nBucketsDelta = make([]int64, numNBuckets) + } else { + a.nBuckets = nil + a.nBucketsDelta = nil + } + + // Now store the actual data. + putVarbitInt(a.b, t) + putVarbitUint(a.b, h.Count) + putVarbitUint(a.b, h.ZeroCount) + a.b.writeBits(math.Float64bits(h.Sum), 64) + for _, b := range h.PositiveBuckets { + putVarbitInt(a.b, b) + } + for _, b := range h.NegativeBuckets { + putVarbitInt(a.b, b) + } + } else { + // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, + // so we don't need a separate single delta logic for the 2nd sample. + + tDelta = t - a.t + cntDelta = int64(h.Count) - int64(a.cnt) + zCntDelta = int64(h.ZeroCount) - int64(a.zCnt) + + tDod := tDelta - a.tDelta + cntDod := cntDelta - a.cntDelta + zCntDod := zCntDelta - a.zCntDelta + + if value.IsStaleNaN(h.Sum) { + cntDod, zCntDod = 0, 0 + } + + putVarbitInt(a.b, tDod) + putVarbitInt(a.b, cntDod) + putVarbitInt(a.b, zCntDod) + + a.writeSumDelta(h.Sum) + + for i, b := range h.PositiveBuckets { + delta := b - a.pBuckets[i] + dod := delta - a.pBucketsDelta[i] + putVarbitInt(a.b, dod) + a.pBucketsDelta[i] = delta + } + for i, b := range h.NegativeBuckets { + delta := b - a.nBuckets[i] + dod := delta - a.nBucketsDelta[i] + putVarbitInt(a.b, dod) + a.nBucketsDelta[i] = delta + } + } + + binary.BigEndian.PutUint16(a.b.bytes(), num+1) + + a.t = t + a.cnt = h.Count + a.zCnt = h.ZeroCount + a.tDelta = tDelta + a.cntDelta = cntDelta + a.zCntDelta = zCntDelta + + copy(a.pBuckets, h.PositiveBuckets) + copy(a.nBuckets, h.NegativeBuckets) + // Note that the bucket deltas were already updated above. + a.sum = h.Sum +} + +// Recode converts the current chunk to accommodate an expansion of the set of +// (positive and/or negative) buckets used, according to the provided +// interjections, resulting in the honoring of the provided new positive and +// negative spans. To continue appending, use the returned Appender rather than +// the receiver of this method. +func (a *HistogramAppender) Recode( + positiveInterjections, negativeInterjections []Interjection, + positiveSpans, negativeSpans []histogram.Span, +) (Chunk, Appender) { + // TODO(beorn7): This currently just decodes everything and then encodes + // it again with the new span layout. This can probably be done in-place + // by editing the chunk. But let's first see how expensive it is in the + // big picture. Also, in-place editing might create concurrency issues. + byts := a.b.bytes() + it := newHistogramIterator(byts) + hc := NewHistogramChunk() + app, err := hc.Appender() + if err != nil { + panic(err) + } + numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans) + + for it.Next() == ValHistogram { + tOld, hOld := it.AtHistogram() + + // We have to newly allocate slices for the modified buckets + // here because they are kept by the appender until the next + // append. + // TODO(beorn7): We might be able to optimize this. + var positiveBuckets, negativeBuckets []int64 + if numPositiveBuckets > 0 { + positiveBuckets = make([]int64, numPositiveBuckets) + } + if numNegativeBuckets > 0 { + negativeBuckets = make([]int64, numNegativeBuckets) + } + + // Save the modified histogram to the new chunk. + hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans + if len(positiveInterjections) > 0 { + hOld.PositiveBuckets = interject(hOld.PositiveBuckets, positiveBuckets, positiveInterjections) + } + if len(negativeInterjections) > 0 { + hOld.NegativeBuckets = interject(hOld.NegativeBuckets, negativeBuckets, negativeInterjections) + } + app.AppendHistogram(tOld, hOld) + } + + hc.SetCounterResetHeader(CounterResetHeader(byts[2] & 0b11000000)) + return hc, app +} + +func (a *HistogramAppender) writeSumDelta(v float64) { + xorWrite(a.b, v, a.sum, &a.leading, &a.trailing) +} + +type histogramIterator struct { + br bstreamReader + numTotal uint16 + numRead uint16 + + // Layout: + schema int32 + zThreshold float64 + pSpans, nSpans []histogram.Span + + // For the fields that are tracked as deltas and ultimately dod's. + t int64 + cnt, zCnt uint64 + tDelta, cntDelta, zCntDelta int64 + pBuckets, nBuckets []int64 // Delta between buckets. + pFloatBuckets, nFloatBuckets []float64 // Absolute counts. + pBucketsDelta, nBucketsDelta []int64 + + // The sum is Gorilla xor encoded. + sum float64 + leading uint8 + trailing uint8 + + // Track calls to retrieve methods. Once they have been called, we + // cannot recycle the bucket slices anymore because we have returned + // them in the histogram. + atHistogramCalled, atFloatHistogramCalled bool + + err error +} + +func (it *histogramIterator) Seek(t int64) ValueType { + if it.err != nil { + return ValNone + } + + for t > it.t || it.numRead == 0 { + if it.Next() == ValNone { + return ValNone + } + } + return ValHistogram +} + +func (it *histogramIterator) At() (int64, float64) { + panic("cannot call histogramIterator.At") +} + +func (it *histogramIterator) AtHistogram() (int64, *histogram.Histogram) { + if value.IsStaleNaN(it.sum) { + return it.t, &histogram.Histogram{Sum: it.sum} + } + it.atHistogramCalled = true + return it.t, &histogram.Histogram{ + Count: it.cnt, + ZeroCount: it.zCnt, + Sum: it.sum, + ZeroThreshold: it.zThreshold, + Schema: it.schema, + PositiveSpans: it.pSpans, + NegativeSpans: it.nSpans, + PositiveBuckets: it.pBuckets, + NegativeBuckets: it.nBuckets, + } +} + +func (it *histogramIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + if value.IsStaleNaN(it.sum) { + return it.t, &histogram.FloatHistogram{Sum: it.sum} + } + it.atFloatHistogramCalled = true + return it.t, &histogram.FloatHistogram{ + Count: float64(it.cnt), + ZeroCount: float64(it.zCnt), + Sum: it.sum, + ZeroThreshold: it.zThreshold, + Schema: it.schema, + PositiveSpans: it.pSpans, + NegativeSpans: it.nSpans, + PositiveBuckets: it.pFloatBuckets, + NegativeBuckets: it.nFloatBuckets, + } +} + +func (it *histogramIterator) AtT() int64 { + return it.t +} + +func (it *histogramIterator) Err() error { + return it.err +} + +func (it *histogramIterator) Reset(b []byte) { + // The first 3 bytes contain chunk headers. + // We skip that for actual samples. + it.br = newBReader(b[3:]) + it.numTotal = binary.BigEndian.Uint16(b) + it.numRead = 0 + + it.t, it.cnt, it.zCnt = 0, 0, 0 + it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0 + + // Recycle slices that have not been returned yet. Otherwise, start from + // scratch. + if it.atHistogramCalled { + it.atHistogramCalled = false + it.pBuckets, it.nBuckets = nil, nil + } else { + it.pBuckets = it.pBuckets[:0] + it.nBuckets = it.nBuckets[:0] + } + if it.atFloatHistogramCalled { + it.atFloatHistogramCalled = false + it.pFloatBuckets, it.nFloatBuckets = nil, nil + } else { + it.pFloatBuckets = it.pFloatBuckets[:0] + it.nFloatBuckets = it.nFloatBuckets[:0] + } + + it.pBucketsDelta = it.pBucketsDelta[:0] + it.nBucketsDelta = it.nBucketsDelta[:0] + + it.sum = 0 + it.leading = 0 + it.trailing = 0 + it.err = nil +} + +func (it *histogramIterator) Next() ValueType { + if it.err != nil || it.numRead == it.numTotal { + return ValNone + } + + if it.numRead == 0 { + // The first read is responsible for reading the chunk layout + // and for initializing fields that depend on it. We give + // counter reset info at chunk level, hence we discard it here. + schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.schema = schema + it.zThreshold = zeroThreshold + it.pSpans, it.nSpans = posSpans, negSpans + numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans) + // The code below recycles existing slices in case this iterator + // was reset and already has slices of a sufficient capacity. + if numPBuckets > 0 { + it.pBuckets = append(it.pBuckets, make([]int64, numPBuckets)...) + it.pBucketsDelta = append(it.pBucketsDelta, make([]int64, numPBuckets)...) + it.pFloatBuckets = append(it.pFloatBuckets, make([]float64, numPBuckets)...) + } + if numNBuckets > 0 { + it.nBuckets = append(it.nBuckets, make([]int64, numNBuckets)...) + it.nBucketsDelta = append(it.nBucketsDelta, make([]int64, numNBuckets)...) + it.nFloatBuckets = append(it.nFloatBuckets, make([]float64, numNBuckets)...) + } + + // Now read the actual data. + t, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.t = t + + cnt, err := readVarbitUint(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.cnt = cnt + + zcnt, err := readVarbitUint(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.zCnt = zcnt + + sum, err := it.br.readBits(64) + if err != nil { + it.err = err + return ValNone + } + it.sum = math.Float64frombits(sum) + + var current int64 + for i := range it.pBuckets { + v, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.pBuckets[i] = v + current += it.pBuckets[i] + it.pFloatBuckets[i] = float64(current) + } + current = 0 + for i := range it.nBuckets { + v, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.nBuckets[i] = v + current += it.nBuckets[i] + it.nFloatBuckets[i] = float64(current) + } + + it.numRead++ + return ValHistogram + } + + // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, + // so we don't need a separate single delta logic for the 2nd sample. + + // Recycle bucket slices that have not been returned yet. Otherwise, + // copy them. + if it.atHistogramCalled { + it.atHistogramCalled = false + if len(it.pBuckets) > 0 { + newBuckets := make([]int64, len(it.pBuckets)) + copy(newBuckets, it.pBuckets) + it.pBuckets = newBuckets + } else { + it.pBuckets = nil + } + if len(it.nBuckets) > 0 { + newBuckets := make([]int64, len(it.nBuckets)) + copy(newBuckets, it.nBuckets) + it.nBuckets = newBuckets + } else { + it.nBuckets = nil + } + } + // FloatBuckets are set from scratch, so simply create empty ones. + if it.atFloatHistogramCalled { + it.atFloatHistogramCalled = false + if len(it.pFloatBuckets) > 0 { + it.pFloatBuckets = make([]float64, len(it.pFloatBuckets)) + } else { + it.pFloatBuckets = nil + } + if len(it.nFloatBuckets) > 0 { + it.nFloatBuckets = make([]float64, len(it.nFloatBuckets)) + } else { + it.nFloatBuckets = nil + } + } + + tDod, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.tDelta = it.tDelta + tDod + it.t += it.tDelta + + cntDod, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.cntDelta = it.cntDelta + cntDod + it.cnt = uint64(int64(it.cnt) + it.cntDelta) + + zcntDod, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.zCntDelta = it.zCntDelta + zcntDod + it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta) + + ok := it.readSum() + if !ok { + return ValNone + } + + if value.IsStaleNaN(it.sum) { + it.numRead++ + return ValHistogram + } + + var current int64 + for i := range it.pBuckets { + dod, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.pBucketsDelta[i] += dod + it.pBuckets[i] += it.pBucketsDelta[i] + current += it.pBuckets[i] + it.pFloatBuckets[i] = float64(current) + } + + current = 0 + for i := range it.nBuckets { + dod, err := readVarbitInt(&it.br) + if err != nil { + it.err = err + return ValNone + } + it.nBucketsDelta[i] += dod + it.nBuckets[i] += it.nBucketsDelta[i] + current += it.nBuckets[i] + it.nFloatBuckets[i] = float64(current) + } + + it.numRead++ + return ValHistogram +} + +func (it *histogramIterator) readSum() bool { + err := xorRead(&it.br, &it.sum, &it.leading, &it.trailing) + if err != nil { + it.err = err + return false + } + return true +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go new file mode 100644 index 00000000000..7a4407305ca --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go @@ -0,0 +1,334 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunkenc + +import ( + "math" + + "github.com/prometheus/prometheus/model/histogram" +) + +func writeHistogramChunkLayout(b *bstream, schema int32, zeroThreshold float64, positiveSpans, negativeSpans []histogram.Span) { + putZeroThreshold(b, zeroThreshold) + putVarbitInt(b, int64(schema)) + putHistogramChunkLayoutSpans(b, positiveSpans) + putHistogramChunkLayoutSpans(b, negativeSpans) +} + +func readHistogramChunkLayout(b *bstreamReader) ( + schema int32, zeroThreshold float64, + positiveSpans, negativeSpans []histogram.Span, + err error, +) { + zeroThreshold, err = readZeroThreshold(b) + if err != nil { + return + } + + v, err := readVarbitInt(b) + if err != nil { + return + } + schema = int32(v) + + positiveSpans, err = readHistogramChunkLayoutSpans(b) + if err != nil { + return + } + + negativeSpans, err = readHistogramChunkLayoutSpans(b) + if err != nil { + return + } + + return +} + +func putHistogramChunkLayoutSpans(b *bstream, spans []histogram.Span) { + putVarbitUint(b, uint64(len(spans))) + for _, s := range spans { + putVarbitUint(b, uint64(s.Length)) + putVarbitInt(b, int64(s.Offset)) + } +} + +func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) { + var spans []histogram.Span + num, err := readVarbitUint(b) + if err != nil { + return nil, err + } + for i := 0; i < int(num); i++ { + + length, err := readVarbitUint(b) + if err != nil { + return nil, err + } + + offset, err := readVarbitInt(b) + if err != nil { + return nil, err + } + + spans = append(spans, histogram.Span{ + Length: uint32(length), + Offset: int32(offset), + }) + } + return spans, nil +} + +// putZeroThreshold writes the zero threshold to the bstream. It stores typical +// values in just one byte, but needs 9 bytes for other values. In detail: +// +// * If the threshold is 0, store a single zero byte. +// +// - If the threshold is a power of 2 between (and including) 2^-243 and 2^10, +// take the exponent from the IEEE 754 representation of the threshold, which +// covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242 +// in IEEE 754 representation, and 2^10 is 0.5*2^11.) Add 243 to the exponent +// and store the result (which will be between 1 and 254) as a single +// byte. Note that small powers of two are preferred values for the zero +// threshold. The default value for the zero threshold is 2^-128 (or +// 0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a +// single byte (with value 116). +// +// - In all other cases, store 255 as a single byte, followed by the 8 bytes of +// the threshold as a float64, i.e. taking 9 bytes in total. +func putZeroThreshold(b *bstream, threshold float64) { + if threshold == 0 { + b.writeByte(0) + return + } + frac, exp := math.Frexp(threshold) + if frac != 0.5 || exp < -242 || exp > 11 { + b.writeByte(255) + b.writeBits(math.Float64bits(threshold), 64) + return + } + b.writeByte(byte(exp + 243)) +} + +// readZeroThreshold reads the zero threshold written with putZeroThreshold. +func readZeroThreshold(br *bstreamReader) (float64, error) { + b, err := br.ReadByte() + if err != nil { + return 0, err + } + switch b { + case 0: + return 0, nil + case 255: + v, err := br.readBits(64) + if err != nil { + return 0, err + } + return math.Float64frombits(v), nil + default: + return math.Ldexp(0.5, int(b)-243), nil + } +} + +type bucketIterator struct { + spans []histogram.Span + span int // Span position of last yielded bucket. + bucket int // Bucket position within span of last yielded bucket. + idx int // Bucket index (globally across all spans) of last yielded bucket. +} + +func newBucketIterator(spans []histogram.Span) *bucketIterator { + b := bucketIterator{ + spans: spans, + span: 0, + bucket: -1, + idx: -1, + } + if len(spans) > 0 { + b.idx += int(spans[0].Offset) + } + return &b +} + +func (b *bucketIterator) Next() (int, bool) { + // We're already out of bounds. + if b.span >= len(b.spans) { + return 0, false + } +try: + if b.bucket < int(b.spans[b.span].Length-1) { // Try to move within same span. + b.bucket++ + b.idx++ + return b.idx, true + } else if b.span < len(b.spans)-1 { // Try to move from one span to the next. + b.span++ + b.idx += int(b.spans[b.span].Offset + 1) + b.bucket = 0 + if b.spans[b.span].Length == 0 { + // Pathological case that should never happen. We can't use this span, let's try again. + goto try + } + return b.idx, true + } + // We're out of options. + return 0, false +} + +// An Interjection describes how many new buckets have to be introduced before +// processing the pos'th delta from the original slice. +type Interjection struct { + pos int + num int +} + +// compareSpans returns the interjections to convert a slice of deltas to a new +// slice representing an expanded set of buckets, or false if incompatible +// (e.g. if buckets were removed). +// +// Example: +// +// Let's say the old buckets look like this: +// +// span syntax: [offset, length] +// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1] +// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15] +// raw values 6 3 3 2 4 5 1 +// deltas 6 -3 0 -1 2 1 -4 +// +// But now we introduce a new bucket layout. (Carefully chosen example where we +// have a span appended, one unchanged[*], one prepended, and two merge - in +// that order.) +// +// [*] unchanged in terms of which bucket indices they represent. but to achieve +// that, their offset needs to change if "disrupted" by spans changing ahead of +// them +// +// \/ this one is "unchanged" +// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ] +// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15] +// raw values 6 3 0 3 0 0 2 4 5 0 1 +// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1 +// delta mods: / \ / \ / \ +// +// Note that whenever any new buckets are introduced, the subsequent "old" +// bucket needs to readjust its delta to the new base of 0. Thus, for the caller +// who wants to transform the set of original deltas to a new set of deltas to +// match a new span layout that adds buckets, we simply need to generate a list +// of interjections. +// +// Note: Within compareSpans we don't have to worry about the changes to the +// spans themselves, thanks to the iterators we get to work with the more useful +// bucket indices (which of course directly correspond to the buckets we have to +// adjust). +func compareSpans(a, b []histogram.Span) ([]Interjection, bool) { + ai := newBucketIterator(a) + bi := newBucketIterator(b) + + var interjections []Interjection + + // When inter.num becomes > 0, this becomes a valid interjection that + // should be yielded when we finish a streak of new buckets. + var inter Interjection + + av, aOK := ai.Next() + bv, bOK := bi.Next() +loop: + for { + switch { + case aOK && bOK: + switch { + case av == bv: // Both have an identical value. move on! + // Finish WIP interjection and reset. + if inter.num > 0 { + interjections = append(interjections, inter) + } + inter.num = 0 + av, aOK = ai.Next() + bv, bOK = bi.Next() + inter.pos++ + case av < bv: // b misses a value that is in a. + return interjections, false + case av > bv: // a misses a value that is in b. Forward b and recompare. + inter.num++ + bv, bOK = bi.Next() + } + case aOK && !bOK: // b misses a value that is in a. + return interjections, false + case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. + inter.num++ + bv, bOK = bi.Next() + default: // Both iterators ran out. We're done. + if inter.num > 0 { + interjections = append(interjections, inter) + } + break loop + } + } + + return interjections, true +} + +// interject merges 'in' with the provided interjections and writes them into +// 'out', which must already have the appropriate length. +func interject(in, out []int64, interjections []Interjection) []int64 { + var ( + j int // Position in out. + v int64 // The last value seen. + interj int // The next interjection to process. + ) + for i, d := range in { + if interj < len(interjections) && i == interjections[interj].pos { + + // We have an interjection! + // Add interjection.num new delta values such that their + // bucket values equate 0. + out[j] = int64(-v) + j++ + for x := 1; x < interjections[interj].num; x++ { + out[j] = 0 + j++ + } + interj++ + + // Now save the value from the input. The delta value we + // should save is the original delta value + the last + // value of the point before the interjection (to undo + // the delta that was introduced by the interjection). + out[j] = d + v + j++ + v = d + v + continue + } + + // If there was no interjection, the original delta is still + // valid. + out[j] = d + j++ + v += d + } + switch interj { + case len(interjections): + // All interjections processed. Nothing more to do. + case len(interjections) - 1: + // One more interjection to process at the end. + out[j] = int64(-v) + j++ + for x := 1; x < interjections[interj].num; x++ { + out[j] = 0 + j++ + } + default: + panic("unprocessed interjections left") + } + return out +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go new file mode 100644 index 00000000000..b3b14cf4170 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go @@ -0,0 +1,232 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunkenc + +import ( + "math/bits" + + "github.com/pkg/errors" +) + +// putVarbitInt writes an int64 using varbit encoding with a bit bucketing +// optimized for the dod's observed in histogram buckets, plus a few additional +// buckets for large numbers. +// +// For optimal space utilization, each branch didn't need to support any values +// of any of the prior branches. So we could expand the range of each branch. Do +// more with fewer bits. It would come at the price of more expensive encoding +// and decoding (cutting out and later adding back that center-piece we +// skip). With the distributions of values we see in practice, we would reduce +// the size by around 1%. A more detailed study would be needed for precise +// values, but it's appears quite certain that we would end up far below 10%, +// which would maybe convince us to invest the increased coding/decoding cost. +func putVarbitInt(b *bstream, val int64) { + switch { + case val == 0: // Precisely 0, needs 1 bit. + b.writeBit(zero) + case bitRange(val, 3): // -3 <= val <= 4, needs 5 bits. + b.writeBits(0b10, 2) + b.writeBits(uint64(val), 3) + case bitRange(val, 6): // -31 <= val <= 32, 9 bits. + b.writeBits(0b110, 3) + b.writeBits(uint64(val), 6) + case bitRange(val, 9): // -255 <= val <= 256, 13 bits. + b.writeBits(0b1110, 4) + b.writeBits(uint64(val), 9) + case bitRange(val, 12): // -2047 <= val <= 2048, 17 bits. + b.writeBits(0b11110, 5) + b.writeBits(uint64(val), 12) + case bitRange(val, 18): // -131071 <= val <= 131072, 3 bytes. + b.writeBits(0b111110, 6) + b.writeBits(uint64(val), 18) + case bitRange(val, 25): // -16777215 <= val <= 16777216, 4 bytes. + b.writeBits(0b1111110, 7) + b.writeBits(uint64(val), 25) + case bitRange(val, 56): // -36028797018963967 <= val <= 36028797018963968, 8 bytes. + b.writeBits(0b11111110, 8) + b.writeBits(uint64(val), 56) + default: + b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes. + b.writeBits(uint64(val), 64) + } +} + +// readVarbitInt reads an int64 encoced with putVarbitInt. +func readVarbitInt(b *bstreamReader) (int64, error) { + var d byte + for i := 0; i < 8; i++ { + d <<= 1 + bit, err := b.readBitFast() + if err != nil { + bit, err = b.readBit() + } + if err != nil { + return 0, err + } + if bit == zero { + break + } + d |= 1 + } + + var val int64 + var sz uint8 + + switch d { + case 0b0: + // val == 0 + case 0b10: + sz = 3 + case 0b110: + sz = 6 + case 0b1110: + sz = 9 + case 0b11110: + sz = 12 + case 0b111110: + sz = 18 + case 0b1111110: + sz = 25 + case 0b11111110: + sz = 56 + case 0b11111111: + // Do not use fast because it's very unlikely it will succeed. + bits, err := b.readBits(64) + if err != nil { + return 0, err + } + + val = int64(bits) + default: + return 0, errors.Errorf("invalid bit pattern %b", d) + } + + if sz != 0 { + bits, err := b.readBitsFast(sz) + if err != nil { + bits, err = b.readBits(sz) + } + if err != nil { + return 0, err + } + if bits > (1 << (sz - 1)) { + // Or something. + bits = bits - (1 << sz) + } + val = int64(bits) + } + + return val, nil +} + +func bitRangeUint(x uint64, nbits int) bool { + return bits.LeadingZeros64(x) >= 64-nbits +} + +// putVarbitUint writes a uint64 using varbit encoding. It uses the same bit +// buckets as putVarbitInt. +func putVarbitUint(b *bstream, val uint64) { + switch { + case val == 0: // Precisely 0, needs 1 bit. + b.writeBit(zero) + case bitRangeUint(val, 3): // val <= 7, needs 5 bits. + b.writeBits(0b10, 2) + b.writeBits(val, 3) + case bitRangeUint(val, 6): // val <= 63, 9 bits. + b.writeBits(0b110, 3) + b.writeBits(val, 6) + case bitRangeUint(val, 9): // val <= 511, 13 bits. + b.writeBits(0b1110, 4) + b.writeBits(val, 9) + case bitRangeUint(val, 12): // val <= 4095, 17 bits. + b.writeBits(0b11110, 5) + b.writeBits(val, 12) + case bitRangeUint(val, 18): // val <= 262143, 3 bytes. + b.writeBits(0b111110, 6) + b.writeBits(val, 18) + case bitRangeUint(val, 25): // val <= 33554431, 4 bytes. + b.writeBits(0b1111110, 7) + b.writeBits(val, 25) + case bitRangeUint(val, 56): // val <= 72057594037927935, 8 bytes. + b.writeBits(0b11111110, 8) + b.writeBits(val, 56) + default: + b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes. + b.writeBits(val, 64) + } +} + +// readVarbitUint reads a uint64 encoced with putVarbitUint. +func readVarbitUint(b *bstreamReader) (uint64, error) { + var d byte + for i := 0; i < 8; i++ { + d <<= 1 + bit, err := b.readBitFast() + if err != nil { + bit, err = b.readBit() + } + if err != nil { + return 0, err + } + if bit == zero { + break + } + d |= 1 + } + + var ( + bits uint64 + sz uint8 + err error + ) + + switch d { + case 0b0: + // val == 0 + case 0b10: + sz = 3 + case 0b110: + sz = 6 + case 0b1110: + sz = 9 + case 0b11110: + sz = 12 + case 0b111110: + sz = 18 + case 0b1111110: + sz = 25 + case 0b11111110: + sz = 56 + case 0b11111111: + // Do not use fast because it's very unlikely it will succeed. + bits, err = b.readBits(64) + if err != nil { + return 0, err + } + default: + return 0, errors.Errorf("invalid bit pattern %b", d) + } + + if sz != 0 { + bits, err = b.readBitsFast(sz) + if err != nil { + bits, err = b.readBits(sz) + } + if err != nil { + return 0, err + } + } + + return bits, nil +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go index 3429c2c606a..10d650d596b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go @@ -47,6 +47,8 @@ import ( "encoding/binary" "math" "math/bits" + + "github.com/prometheus/prometheus/model/histogram" ) const ( @@ -79,6 +81,7 @@ func (c *XORChunk) NumSamples() int { return int(binary.BigEndian.Uint16(c.Bytes())) } +// Compact implements the Chunk interface. func (c *XORChunk) Compact() { if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold { buf := make([]byte, l) @@ -96,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) { // To get an appender we must know the state it would have if we had // appended all existing data from scratch. // We iterate through the end and populate via the iterator's state. - for it.Next() { + for it.Next() != ValNone { } if err := it.Err(); err != nil { return nil, err @@ -110,7 +113,7 @@ func (c *XORChunk) Appender() (Appender, error) { leading: it.leading, trailing: it.trailing, } - if binary.BigEndian.Uint16(a.b.bytes()) == 0 { + if it.numTotal == 0 { a.leading = 0xff } return a, nil @@ -149,6 +152,10 @@ type xorAppender struct { trailing uint8 } +func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) { + panic("appended a histogram to an xor chunk") +} + func (a *xorAppender) Append(t int64, v float64) { var tDelta uint64 num := binary.BigEndian.Uint16(a.b.bytes()) @@ -176,6 +183,12 @@ func (a *xorAppender) Append(t int64, v float64) { // Gorilla has a max resolution of seconds, Prometheus milliseconds. // Thus we use higher value range steps with larger bit size. + // + // TODO(beorn7): This seems to needlessly jump to large bit + // sizes even for very small deviations from zero. Timestamp + // compression can probably benefit from some smaller bit + // buckets. See also what was done for histogram encoding in + // varbit.go. switch { case dod == 0: a.b.writeBit(zero) @@ -209,38 +222,7 @@ func bitRange(x int64, nbits uint8) bool { } func (a *xorAppender) writeVDelta(v float64) { - vDelta := math.Float64bits(v) ^ math.Float64bits(a.v) - - if vDelta == 0 { - a.b.writeBit(zero) - return - } - a.b.writeBit(one) - - leading := uint8(bits.LeadingZeros64(vDelta)) - trailing := uint8(bits.TrailingZeros64(vDelta)) - - // Clamp number of leading zeros to avoid overflow when encoding. - if leading >= 32 { - leading = 31 - } - - if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing { - a.b.writeBit(zero) - a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing)) - } else { - a.leading, a.trailing = leading, trailing - - a.b.writeBit(one) - a.b.writeBits(uint64(leading), 5) - - // Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have. - // Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0). - // So instead we write out a 0 and adjust it back to 64 on unpacking. - sigbits := 64 - leading - trailing - a.b.writeBits(uint64(sigbits), 6) - a.b.writeBits(vDelta>>trailing, int(sigbits)) - } + xorWrite(a.b, v, a.v, &a.leading, &a.trailing) } type xorIterator struct { @@ -258,23 +240,35 @@ type xorIterator struct { err error } -func (it *xorIterator) Seek(t int64) bool { +func (it *xorIterator) Seek(t int64) ValueType { if it.err != nil { - return false + return ValNone } for t > it.t || it.numRead == 0 { - if !it.Next() { - return false + if it.Next() == ValNone { + return ValNone } } - return true + return ValFloat } func (it *xorIterator) At() (int64, float64) { return it.t, it.val } +func (it *xorIterator) AtHistogram() (int64, *histogram.Histogram) { + panic("cannot call xorIterator.AtHistogram") +} + +func (it *xorIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + panic("cannot call xorIterator.AtFloatHistogram") +} + +func (it *xorIterator) AtT() int64 { + return it.t +} + func (it *xorIterator) Err() error { return it.err } @@ -294,33 +288,33 @@ func (it *xorIterator) Reset(b []byte) { it.err = nil } -func (it *xorIterator) Next() bool { +func (it *xorIterator) Next() ValueType { if it.err != nil || it.numRead == it.numTotal { - return false + return ValNone } if it.numRead == 0 { t, err := binary.ReadVarint(&it.br) if err != nil { it.err = err - return false + return ValNone } v, err := it.br.readBits(64) if err != nil { it.err = err - return false + return ValNone } it.t = t it.val = math.Float64frombits(v) it.numRead++ - return true + return ValFloat } if it.numRead == 1 { tDelta, err := binary.ReadUvarint(&it.br) if err != nil { it.err = err - return false + return ValNone } it.tDelta = tDelta it.t = it.t + int64(it.tDelta) @@ -338,7 +332,7 @@ func (it *xorIterator) Next() bool { } if err != nil { it.err = err - return false + return ValNone } if bit == zero { break @@ -361,7 +355,7 @@ func (it *xorIterator) Next() bool { bits, err := it.br.readBits(64) if err != nil { it.err = err - return false + return ValNone } dod = int64(bits) @@ -374,7 +368,7 @@ func (it *xorIterator) Next() bool { } if err != nil { it.err = err - return false + return ValNone } // Account for negative numbers, which come back as high unsigned numbers. @@ -391,73 +385,122 @@ func (it *xorIterator) Next() bool { return it.readValue() } -func (it *xorIterator) readValue() bool { - bit, err := it.br.readBitFast() +func (it *xorIterator) readValue() ValueType { + err := xorRead(&it.br, &it.val, &it.leading, &it.trailing) if err != nil { - bit, err = it.br.readBit() + it.err = err + return ValNone + } + it.numRead++ + return ValFloat +} + +func xorWrite(b *bstream, newValue, currentValue float64, leading, trailing *uint8) { + delta := math.Float64bits(newValue) ^ math.Float64bits(currentValue) + + if delta == 0 { + b.writeBit(zero) + return + } + b.writeBit(one) + + newLeading := uint8(bits.LeadingZeros64(delta)) + newTrailing := uint8(bits.TrailingZeros64(delta)) + + // Clamp number of leading zeros to avoid overflow when encoding. + if newLeading >= 32 { + newLeading = 31 } + + if *leading != 0xff && newLeading >= *leading && newTrailing >= *trailing { + // In this case, we stick with the current leading/trailing. + b.writeBit(zero) + b.writeBits(delta>>*trailing, 64-int(*leading)-int(*trailing)) + return + } + + // Update leading/trailing for the caller. + *leading, *trailing = newLeading, newTrailing + + b.writeBit(one) + b.writeBits(uint64(newLeading), 5) + + // Note that if newLeading == newTrailing == 0, then sigbits == 64. But + // that value doesn't actually fit into the 6 bits we have. Luckily, we + // never need to encode 0 significant bits, since that would put us in + // the other case (vdelta == 0). So instead we write out a 0 and adjust + // it back to 64 on unpacking. + sigbits := 64 - newLeading - newTrailing + b.writeBits(uint64(sigbits), 6) + b.writeBits(delta>>newTrailing, int(sigbits)) +} + +func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error { + bit, err := br.readBitFast() if err != nil { - it.err = err - return false + bit, err = br.readBit() + } + if err != nil { + return err + } + if bit == zero { + return nil + } + bit, err = br.readBitFast() + if err != nil { + bit, err = br.readBit() } + if err != nil { + return err + } + + var ( + bits uint64 + newLeading, newTrailing, mbits uint8 + ) if bit == zero { - // it.val = it.val + // Reuse leading/trailing zero bits. + newLeading, newTrailing = *leading, *trailing + mbits = 64 - newLeading - newTrailing } else { - bit, err := it.br.readBitFast() + bits, err = br.readBitsFast(5) if err != nil { - bit, err = it.br.readBit() + bits, err = br.readBits(5) } if err != nil { - it.err = err - return false - } - if bit == zero { - // reuse leading/trailing zero bits - // it.leading, it.trailing = it.leading, it.trailing - } else { - bits, err := it.br.readBitsFast(5) - if err != nil { - bits, err = it.br.readBits(5) - } - if err != nil { - it.err = err - return false - } - it.leading = uint8(bits) - - bits, err = it.br.readBitsFast(6) - if err != nil { - bits, err = it.br.readBits(6) - } - if err != nil { - it.err = err - return false - } - mbits := uint8(bits) - // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder - if mbits == 0 { - mbits = 64 - } - it.trailing = 64 - it.leading - mbits + return err } + newLeading = uint8(bits) - mbits := 64 - it.leading - it.trailing - bits, err := it.br.readBitsFast(mbits) + bits, err = br.readBitsFast(6) if err != nil { - bits, err = it.br.readBits(mbits) + bits, err = br.readBits(6) } if err != nil { - it.err = err - return false + return err } - vbits := math.Float64bits(it.val) - vbits ^= bits << it.trailing - it.val = math.Float64frombits(vbits) + mbits = uint8(bits) + // 0 significant bits here means we overflowed and we actually + // need 64; see comment in xrWrite. + if mbits == 0 { + mbits = 64 + } + newTrailing = 64 - newLeading - mbits + // Update leading/trailing zero bits for the caller. + *leading, *trailing = newLeading, newTrailing } - - it.numRead++ - return true + bits, err = br.readBitsFast(mbits) + if err != nil { + bits, err = br.readBits(mbits) + } + if err != nil { + return err + } + vbits := math.Float64bits(*value) + vbits ^= bits << newTrailing + *value = math.Float64frombits(vbits) + return nil } // OOOXORChunk holds a XORChunk and overrides the Encoding() method. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index b2ad59fd6f4..a0bd735b8be 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -21,7 +21,6 @@ import ( "io" "os" "path/filepath" - "sort" "strconv" "sync" @@ -29,6 +28,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/tsdb/chunkenc" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -308,7 +308,7 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { } // Check for gaps in the files. - sort.Ints(chkFileIndices) + slices.Sort(chkFileIndices) if len(chkFileIndices) == 0 { return nil } @@ -777,7 +777,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu for seg := range cdm.mmappedChunkFiles { segIDs = append(segIDs, seg) } - sort.Ints(segIDs) + slices.Sort(segIDs) for _, segID := range segIDs { mmapFile := cdm.mmappedChunkFiles[segID] fileEnd := mmapFile.byteSlice.Len() @@ -894,7 +894,7 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error { for seq := range cdm.mmappedChunkFiles { chkFileIndices = append(chkFileIndices, seq) } - sort.Ints(chkFileIndices) + slices.Sort(chkFileIndices) var removedFiles []int for _, seq := range chkFileIndices { @@ -934,7 +934,7 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error { // deleteFiles deletes the given file sequences in order of the sequence. // In case of an error, it returns the sorted file sequences that were not deleted from the _disk_. func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) ([]int, error) { - sort.Ints(removedFiles) // To delete them in order. + slices.Sort(removedFiles) // To delete them in order. cdm.readPathMtx.Lock() for _, seq := range removedFiles { if err := cdm.closers[seq].Close(); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index d4642b77d02..6f6de1e6335 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -775,7 +775,8 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, chksIter := s.Iterator() chks = chks[:0] for chksIter.Next() { - // We are not iterating in streaming way over chunk as it's more efficient to do bulk write for index and + // We are not iterating in streaming way over chunk as + // it's more efficient to do bulk write for index and // chunk file purposes. chks = append(chks, chksIter.At()) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 854918c369b..def4442f520 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -45,7 +45,7 @@ import ( "github.com/prometheus/prometheus/tsdb/fileutil" _ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minium Go version is met. "github.com/prometheus/prometheus/tsdb/tsdbutil" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" ) const ( @@ -70,7 +70,7 @@ var ErrNotReady = errors.New("TSDB not ready") // millisecond precision timestamps. func DefaultOptions() *Options { return &Options{ - WALSegmentSize: wal.DefaultSegmentSize, + WALSegmentSize: wlog.DefaultSegmentSize, MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), MinBlockDuration: DefaultBlockDuration, @@ -81,6 +81,7 @@ func DefaultOptions() *Options { StripeSize: DefaultStripeSize, HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, IsolationDisabled: defaultIsolationDisabled, + HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, OutOfOrderCapMax: DefaultOutOfOrderCapMax, } } @@ -166,6 +167,9 @@ type Options struct { // Disables isolation between reads and in-flight appends. IsolationDisabled bool + // EnableNativeHistograms enables the ingestion of native histograms. + EnableNativeHistograms bool + // OutOfOrderTimeWindow specifies how much out of order is allowed, if any. // This can change during run-time, so this value from here should only be used // while initialising. @@ -389,14 +393,14 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { if len(blockReaders) > 0 { maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime } - w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal")) + w, err := wlog.Open(db.logger, filepath.Join(db.dir, "wal")) if err != nil { return err } - var wbl *wal.WAL - wblDir := filepath.Join(db.dir, wal.WblDirName) + var wbl *wlog.WL + wblDir := filepath.Join(db.dir, wlog.WblDirName) if _, err := os.Stat(wblDir); !os.IsNotExist(err) { - wbl, err = wal.Open(db.logger, wblDir) + wbl, err = wlog.Open(db.logger, wblDir) if err != nil { return err } @@ -473,14 +477,14 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue if err := head.Close(); err != nil { return nil, err } - w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal")) + w, err := wlog.Open(db.logger, filepath.Join(db.dir, "wal")) if err != nil { return nil, err } - var wbl *wal.WAL - wblDir := filepath.Join(db.dir, wal.WblDirName) + var wbl *wlog.WL + wblDir := filepath.Join(db.dir, wlog.WblDirName) if _, err := os.Stat(wblDir); !os.IsNotExist(err) { - wbl, err = wal.Open(db.logger, wblDir) + wbl, err = wlog.Open(db.logger, wblDir) if err != nil { return nil, err } @@ -677,7 +681,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } walDir := filepath.Join(dir, "wal") - wblDir := filepath.Join(dir, wal.WblDirName) + wblDir := filepath.Join(dir, wlog.WblDirName) // Migrate old WAL if one exists. if err := MigrateWAL(l, walDir); err != nil { @@ -739,15 +743,15 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } db.compactCancel = cancel - var wlog, wblog *wal.WAL - segmentSize := wal.DefaultSegmentSize + var wal, wbl *wlog.WL + segmentSize := wlog.DefaultSegmentSize // Wal is enabled. if opts.WALSegmentSize >= 0 { // Wal is set to a custom size. if opts.WALSegmentSize > 0 { segmentSize = opts.WALSegmentSize } - wlog, err = wal.NewSize(l, r, walDir, segmentSize, opts.WALCompression) + wal, err = wlog.NewSize(l, r, walDir, segmentSize, opts.WALCompression) if err != nil { return nil, err } @@ -757,7 +761,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return nil, err } if opts.OutOfOrderTimeWindow > 0 || wblSize > 0 { - wblog, err = wal.NewSize(l, r, wblDir, segmentSize, opts.WALCompression) + wbl, err = wlog.NewSize(l, r, wblDir, segmentSize, opts.WALCompression) if err != nil { return nil, err } @@ -775,13 +779,14 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.EnableExemplarStorage = opts.EnableExemplarStorage headOpts.MaxExemplars.Store(opts.MaxExemplars) headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown + headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms) headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow) headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax) if opts.IsolationDisabled { // We only override this flag if isolation is disabled at DB level. We use the default otherwise. headOpts.IsolationDisabled = opts.IsolationDisabled } - db.head, err = NewHead(r, l, wlog, wblog, headOpts, stats.Head) + db.head, err = NewHead(r, l, wal, wbl, headOpts, stats.Head) if err != nil { return nil, err } @@ -813,12 +818,12 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs isOOOErr := isErrLoadOOOWal(initErr) if isOOOErr { level.Warn(db.logger).Log("msg", "Encountered OOO WAL read error, attempting repair", "err", initErr) - if err := wblog.Repair(initErr); err != nil { + if err := wbl.Repair(initErr); err != nil { return nil, errors.Wrap(err, "repair corrupted OOO WAL") } } else { level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) - if err := wlog.Repair(initErr); err != nil { + if err := wal.Repair(initErr); err != nil { return nil, errors.Wrap(err, "repair corrupted WAL") } } @@ -947,19 +952,19 @@ func (db *DB) ApplyConfig(conf *config.Config) error { } // Create WBL if it was not present and if OOO is enabled with WAL enabled. - var wblog *wal.WAL + var wblog *wlog.WL var err error if db.head.wbl != nil { // The existing WBL from the disk might have been replayed while OOO was disabled. wblog = db.head.wbl } else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 { - segmentSize := wal.DefaultSegmentSize + segmentSize := wlog.DefaultSegmentSize // Wal is set to a custom size. if db.opts.WALSegmentSize > 0 { segmentSize = db.opts.WALSegmentSize } - oooWalDir := filepath.Join(db.dir, wal.WblDirName) - wblog, err = wal.NewSize(db.logger, db.registerer, oooWalDir, segmentSize, db.opts.WALCompression) + oooWalDir := filepath.Join(db.dir, wlog.WblDirName) + wblog, err = wlog.NewSize(db.logger, db.registerer, oooWalDir, segmentSize, db.opts.WALCompression) if err != nil { return err } @@ -974,6 +979,16 @@ func (db *DB) ApplyConfig(conf *config.Config) error { return nil } +// EnableNativeHistograms enables the native histogram feature. +func (db *DB) EnableNativeHistograms() { + db.head.EnableNativeHistograms() +} + +// DisableNativeHistograms disables the native histogram feature. +func (db *DB) DisableNativeHistograms() { + db.head.DisableNativeHistograms() +} + // dbAppender wraps the DB's head appender and triggers compactions on commit // if necessary. type dbAppender struct { @@ -1939,7 +1954,9 @@ func (db *DB) CleanTombstones() (err error) { defer db.cmtx.Unlock() start := time.Now() - defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds()) + defer func() { + db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds()) + }() cleanUpCompleted := false // Repeat cleanup until there is no tombstones left. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go index 1e17c2e37d1..ab97876a362 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go @@ -178,9 +178,10 @@ func NewDecbufRaw(bs ByteSlice, length int) Decbuf { return Decbuf{B: bs.Range(0, length)} } -func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) } -func (d *Decbuf) Be32int() int { return int(d.Be32()) } -func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) } +func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) } +func (d *Decbuf) Uvarint32() uint32 { return uint32(d.Uvarint64()) } +func (d *Decbuf) Be32int() int { return int(d.Be32()) } +func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) } // Crc32 returns a CRC32 checksum over the remaining bytes. func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 8dd15116391..be4b3b6a95f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -31,6 +31,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" @@ -41,7 +42,7 @@ import ( "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tsdbutil" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" ) var ( @@ -75,12 +76,13 @@ type Head struct { metrics *headMetrics opts *HeadOptions - wal, wbl *wal.WAL + wal, wbl *wlog.WL exemplarMetrics *ExemplarMetrics exemplars ExemplarStorage logger log.Logger appendPool sync.Pool exemplarsPool sync.Pool + histogramsPool sync.Pool metadataPool sync.Pool seriesPool sync.Pool bytesPool sync.Pool @@ -130,14 +132,18 @@ type HeadOptions struct { // https://pkg.go.dev/sync/atomic#pkg-note-BUG MaxExemplars atomic.Int64 + OutOfOrderTimeWindow atomic.Int64 + OutOfOrderCapMax atomic.Int64 + + // EnableNativeHistograms enables the ingestion of native histograms. + EnableNativeHistograms atomic.Bool + ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string ChunkPool chunkenc.Pool ChunkWriteBufferSize int ChunkWriteQueueSize int - OutOfOrderTimeWindow atomic.Int64 - OutOfOrderCapMax atomic.Int64 // StripeSize sets the number of entries in the hash map, it must be a power of 2. // A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series. @@ -186,7 +192,7 @@ type SeriesLifecycleCallback interface { } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) { +func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { var err error if l == nil { l = log.NewNopLogger() @@ -299,11 +305,11 @@ type headMetrics struct { chunksCreated prometheus.Counter chunksRemoved prometheus.Counter gcDuration prometheus.Summary - samplesAppended prometheus.Counter + samplesAppended *prometheus.CounterVec outOfOrderSamplesAppended prometheus.Counter - outOfBoundSamples prometheus.Counter - outOfOrderSamples prometheus.Counter - tooOldSamples prometheus.Counter + outOfBoundSamples *prometheus.CounterVec + outOfOrderSamples *prometheus.CounterVec + tooOldSamples *prometheus.CounterVec walTruncateDuration prometheus.Summary walCorruptionsTotal prometheus.Counter dataTotalReplayDuration prometheus.Gauge @@ -318,6 +324,11 @@ type headMetrics struct { oooHistogram prometheus.Histogram } +const ( + sampleMetricTypeFloat = "float" + sampleMetricTypeHistogram = "histogram" +) + func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { m := &headMetrics{ activeAppenders: prometheus.NewGauge(prometheus.GaugeOpts{ @@ -370,26 +381,26 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_data_replay_duration_seconds", Help: "Time taken to replay the data on disk.", }), - samplesAppended: prometheus.NewCounter(prometheus.CounterOpts{ + samplesAppended: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_samples_appended_total", Help: "Total number of appended samples.", - }), + }, []string{"type"}), outOfOrderSamplesAppended: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_out_of_order_samples_appended_total", Help: "Total number of appended out of order samples.", }), - outOfBoundSamples: prometheus.NewCounter(prometheus.CounterOpts{ + outOfBoundSamples: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "prometheus_tsdb_out_of_bound_samples_total", Help: "Total number of out of bound samples ingestion failed attempts with out of order support disabled.", - }), - outOfOrderSamples: prometheus.NewCounter(prometheus.CounterOpts{ + }, []string{"type"}), + outOfOrderSamples: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "prometheus_tsdb_out_of_order_samples_total", Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.", - }), - tooOldSamples: prometheus.NewCounter(prometheus.CounterOpts{ + }, []string{"type"}), + tooOldSamples: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "prometheus_tsdb_too_old_samples_total", Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window.", - }), + }, []string{"type"}), headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_truncations_failed_total", Help: "Total number of head truncations that failed.", @@ -602,13 +613,13 @@ func (h *Head) Init(minValidTime int64) error { checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. - dir, startFrom, err := wal.LastCheckpoint(h.wal.Dir()) + dir, startFrom, err := wlog.LastCheckpoint(h.wal.Dir()) if err != nil && err != record.ErrNotFound { return errors.Wrap(err, "find last checkpoint") } // Find the last segment. - _, endAt, e := wal.Segments(h.wal.Dir()) + _, endAt, e := wlog.Segments(h.wal.Dir()) if e != nil { return errors.Wrap(e, "finding WAL segments") } @@ -617,7 +628,7 @@ func (h *Head) Init(minValidTime int64) error { multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{} if err == nil && startFrom >= snapIdx { - sr, err := wal.NewSegmentsReader(dir) + sr, err := wlog.NewSegmentsReader(dir) if err != nil { return errors.Wrap(err, "open checkpoint") } @@ -629,7 +640,7 @@ func (h *Head) Init(minValidTime int64) error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. - if err := h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil { + if err := h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil { return errors.Wrap(err, "backfill checkpoint") } h.updateWALReplayStatusRead(startFrom) @@ -645,7 +656,7 @@ func (h *Head) Init(minValidTime int64) error { } // Backfill segments from the most recent checkpoint onwards. for i := startFrom; i <= endAt; i++ { - s, err := wal.OpenReadSegment(wal.SegmentName(h.wal.Dir(), i)) + s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wal.Dir(), i)) if err != nil { return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) } @@ -654,7 +665,7 @@ func (h *Head) Init(minValidTime int64) error { if i == snapIdx { offset = snapOffset } - sr, err := wal.NewSegmentBufReaderWithOffset(offset, s) + sr, err := wlog.NewSegmentBufReaderWithOffset(offset, s) if errors.Cause(err) == io.EOF { // File does not exist. continue @@ -662,7 +673,7 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { return errors.Wrapf(err, "segment reader (offset=%d)", offset) } - err = h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks) + err = h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) } @@ -677,20 +688,20 @@ func (h *Head) Init(minValidTime int64) error { wblReplayStart := time.Now() if h.wbl != nil { // Replay OOO WAL. - startFrom, endAt, e = wal.Segments(h.wbl.Dir()) + startFrom, endAt, e = wlog.Segments(h.wbl.Dir()) if e != nil { return errors.Wrap(e, "finding OOO WAL segments") } h.startWALReplayStatus(startFrom, endAt) for i := startFrom; i <= endAt; i++ { - s, err := wal.OpenReadSegment(wal.SegmentName(h.wbl.Dir(), i)) + s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wbl.Dir(), i)) if err != nil { return errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i)) } - sr := wal.NewSegmentBufReader(s) - err = h.loadWBL(wal.NewReader(sr), multiRef, lastMmapRef) + sr := wlog.NewSegmentBufReader(s) + err = h.loadWBL(wlog.NewReader(sr), multiRef, lastMmapRef) if err := sr.Close(); err != nil { level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err) } @@ -840,7 +851,7 @@ func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef return mmappedChunks, oooMmappedChunks, lastRef, nil } -func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) { +func (h *Head) ApplyConfig(cfg *config.Config, wbl *wlog.WL) { oooTimeWindow := int64(0) if cfg.StorageConfig.TSDBConfig != nil { oooTimeWindow = cfg.StorageConfig.TSDBConfig.OutOfOrderTimeWindow @@ -872,7 +883,7 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) { // SetOutOfOrderTimeWindow updates the out of order related parameters. // If the Head already has a WBL set, then the wbl will be ignored. -func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wal.WAL) { +func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wlog.WL) { if oooTimeWindow > 0 && h.wbl == nil { h.wbl = wbl } @@ -880,6 +891,16 @@ func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wal.WAL) { h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow) } +// EnableNativeHistograms enables the native histogram feature. +func (h *Head) EnableNativeHistograms() { + h.opts.EnableNativeHistograms.Store(true) +} + +// DisableNativeHistograms disables the native histogram feature. +func (h *Head) DisableNativeHistograms() { + h.opts.EnableNativeHistograms.Store(false) +} + // PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats { h.cardinalityMutex.Lock() @@ -1095,7 +1116,7 @@ func (h *Head) truncateWAL(mint int64) error { start := time.Now() h.lastWALTruncationTime.Store(mint) - first, last, err := wal.Segments(h.wal.Dir()) + first, last, err := wlog.Segments(h.wal.Dir()) if err != nil { return errors.Wrap(err, "get segment range") } @@ -1127,9 +1148,9 @@ func (h *Head) truncateWAL(mint int64) error { return ok } h.metrics.checkpointCreationTotal.Inc() - if _, err = wal.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { + if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { h.metrics.checkpointCreationFail.Inc() - if _, ok := errors.Cause(err).(*wal.CorruptionErr); ok { + if _, ok := errors.Cause(err).(*wlog.CorruptionErr); ok { h.metrics.walCorruptionsTotal.Inc() } return errors.Wrap(err, "create checkpoint") @@ -1152,7 +1173,7 @@ func (h *Head) truncateWAL(mint int64) error { h.deletedMtx.Unlock() h.metrics.checkpointDeleteTotal.Inc() - if err := wal.DeleteCheckpoints(h.wal.Dir(), last); err != nil { + if err := wlog.DeleteCheckpoints(h.wal.Dir(), last); err != nil { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher checkpoint exists. @@ -1395,7 +1416,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { h.tombstones.TruncateBefore(mint) if h.wal != nil { - _, last, _ := wal.Segments(h.wal.Dir()) + _, last, _ := wlog.Segments(h.wal.Dir()) h.deletedMtx.Lock() // Keep series records until we're past segment 'last' // because the WAL will still have samples records with @@ -1472,7 +1493,11 @@ func (h *Head) Close() error { h.closedMtx.Lock() defer h.closedMtx.Unlock() h.closed = true + errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close()) + if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown { + errs.Add(h.performChunkSnapshot()) + } if h.wal != nil { errs.Add(h.wal.Close()) } @@ -1765,13 +1790,31 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu } type sample struct { - t int64 - v float64 + t int64 + v float64 + h *histogram.Histogram + fh *histogram.FloatHistogram } -func newSample(t int64, v float64) tsdbutil.Sample { return sample{t, v} } -func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { return s.v } +func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { + return sample{t, v, h, fh} +} + +func (s sample) T() int64 { return s.t } +func (s sample) V() float64 { return s.v } +func (s sample) H() *histogram.Histogram { return s.h } +func (s sample) FH() *histogram.FloatHistogram { return s.fh } + +func (s sample) Type() chunkenc.ValueType { + switch { + case s.h != nil: + return chunkenc.ValHistogram + case s.fh != nil: + return chunkenc.ValFloatHistogram + default: + return chunkenc.ValFloat + } +} // memSeries is the in-memory representation of a series. None of its methods // are goroutine safe and it is the caller's responsibility to lock it. @@ -1806,6 +1849,9 @@ type memSeries struct { // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. lastValue float64 + // We keep the last histogram value here (in addition to appending it to the chunk) so we can check for duplicates. + lastHistogramValue *histogram.Histogram + // Current appender for the head chunk. Set when a new head chunk is cut. // It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit // (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series). @@ -1814,6 +1860,10 @@ type memSeries struct { // txs is nil if isolation is disabled. txs *txRing + // TODO(beorn7): The only reason we track this is to create a staleness + // marker as either histogram or float sample. Perhaps there is a better way. + isHistogramSeries bool + pendingCommit bool // Whether there are samples waiting to be committed to this series. } @@ -1974,3 +2024,22 @@ func (h *Head) updateWALReplayStatusRead(current int) { h.stats.WALReplayStatus.Current = current } + +func GenerateTestHistograms(n int) (r []*histogram.Histogram) { + for i := 0; i < n; i++ { + r = append(r, &histogram.Histogram{ + Count: 5 + uint64(i*4), + ZeroCount: 2 + uint64(i), + ZeroThreshold: 0.001, + Sum: 18.4 * float64(i+1), + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, + }) + } + + return r +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index f843aa1ec69..a3b5027ea49 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -22,8 +22,10 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -66,6 +68,16 @@ func (a *initAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e return a.app.AppendExemplar(ref, l, e) } +func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendHistogram(ref, l, t, h) + } + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendHistogram(ref, l, t, h) +} + func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { if a.app != nil { return a.app.UpdateMetadata(ref, l, m) @@ -143,6 +155,7 @@ func (h *Head) appender() *headAppender { samples: h.getAppendBuffer(), sampleSeries: h.getSeriesBuffer(), exemplars: exemplarsBuf, + histograms: h.getHistogramBuffer(), metadata: h.getMetadataBuffer(), appendID: appendID, cleanupAppendIDsBelow: cleanupAppendIDsBelow, @@ -210,6 +223,19 @@ func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) { h.exemplarsPool.Put(b[:0]) } +func (h *Head) getHistogramBuffer() []record.RefHistogramSample { + b := h.histogramsPool.Get() + if b == nil { + return make([]record.RefHistogramSample, 0, 512) + } + return b.([]record.RefHistogramSample) +} + +func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) { + //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + h.histogramsPool.Put(b[:0]) +} + func (h *Head) getMetadataBuffer() []record.RefMetadata { b := h.metadataPool.Get() if b == nil { @@ -261,12 +287,14 @@ type headAppender struct { headMaxt int64 // We track it here to not take the lock for every sample appended. oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample. - series []record.RefSeries // New series held by this appender. - metadata []record.RefMetadata // New metadata held by this appender. - samples []record.RefSample // New samples held by this appender. - exemplars []exemplarWithSeriesRef // New exemplars held by this appender. - sampleSeries []*memSeries // Series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). - metadataSeries []*memSeries // Series corresponding to the metadata held by this appender. + series []record.RefSeries // New series held by this appender. + samples []record.RefSample // New float samples held by this appender. + exemplars []exemplarWithSeriesRef // New exemplars held by this appender. + sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). + histograms []record.RefHistogramSample // New histogram samples held by this appender. + histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). + metadata []record.RefMetadata // New metadata held by this appender. + metadataSeries []*memSeries // Series corresponding to the metadata held by this appender. appendID, cleanupAppendIDsBelow uint64 closed bool @@ -276,7 +304,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 // For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append. // If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work. if a.oooTimeWindow == 0 && t < a.minValidTime { - a.head.metrics.outOfBoundSamples.Inc() + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Inc() return 0, storage.ErrOutOfBounds } @@ -306,6 +334,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } } + if value.IsStaleNaN(v) && s.isHistogramSeries { + return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}) + } + s.Lock() // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. @@ -320,9 +352,9 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 if err != nil { switch err { case storage.ErrOutOfOrderSample: - a.head.metrics.outOfOrderSamples.Inc() + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() case storage.ErrTooOldSample: - a.head.metrics.tooOldSamples.Inc() + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Inc() } return 0, err } @@ -385,6 +417,28 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi return false, headMaxt - t, storage.ErrOutOfOrderSample } +// appendableHistogram checks whether the given sample is valid for appending to the series. +func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { + c := s.head() + if c == nil { + return nil + } + + if t > c.maxTime { + return nil + } + if t < c.maxTime { + return storage.ErrOutOfOrderSample + } + + // We are allowing exact duplicates as we can encounter them in valid cases + // like federation and erroring out at that time would be extremely noisy. + if !h.Equals(s.lastHistogramValue) { + return storage.ErrDuplicateSampleForTimestamp + } + return nil +} + // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { @@ -422,6 +476,74 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, return storage.SeriesRef(s.ref), nil } +func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { + if !a.head.opts.EnableNativeHistograms.Load() { + return 0, storage.ErrNativeHistogramsDisabled + } + + if t < a.minValidTime { + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() + return 0, storage.ErrOutOfBounds + } + + if err := ValidateHistogram(h); err != nil { + return 0, err + } + + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + // Ensure no empty labels have gotten through. + lset = lset.WithoutEmpty() + if len(lset) == 0 { + return 0, errors.Wrap(ErrInvalidSample, "empty labelset") + } + + if l, dup := lset.HasDuplicateLabelNames(); dup { + return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) + } + + var created bool + var err error + s, created, err = a.head.getOrCreate(lset.Hash(), lset) + if err != nil { + return 0, err + } + s.isHistogramSeries = true + if created { + a.series = append(a.series, record.RefSeries{ + Ref: s.ref, + Labels: lset, + }) + } + } + + s.Lock() + if err := s.appendableHistogram(t, h); err != nil { + s.Unlock() + if err == storage.ErrOutOfOrderSample { + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() + } + return 0, err + } + s.pendingCommit = true + s.Unlock() + + if t < a.mint { + a.mint = t + } + if t > a.maxt { + a.maxt = t + } + + a.histograms = append(a.histograms, record.RefHistogramSample{ + Ref: s.ref, + T: t, + H: h, + }) + a.histogramSeries = append(a.histogramSeries, s) + return storage.SeriesRef(s.ref), nil +} + // UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { @@ -453,6 +575,76 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, return ref, nil } +func ValidateHistogram(h *histogram.Histogram) error { + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return errors.Wrap(err, "negative side") + } + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return errors.Wrap(err, "positive side") + } + + negativeCount, err := checkHistogramBuckets(h.NegativeBuckets) + if err != nil { + return errors.Wrap(err, "negative side") + } + positiveCount, err := checkHistogramBuckets(h.PositiveBuckets) + if err != nil { + return errors.Wrap(err, "positive side") + } + + if c := negativeCount + positiveCount; c > h.Count { + return errors.Wrap( + storage.ErrHistogramCountNotBigEnough, + fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count), + ) + } + + return nil +} + +func checkHistogramSpans(spans []histogram.Span, numBuckets int) error { + var spanBuckets int + for n, span := range spans { + if n > 0 && span.Offset < 0 { + return errors.Wrap( + storage.ErrHistogramSpanNegativeOffset, + fmt.Sprintf("span number %d with offset %d", n+1, span.Offset), + ) + } + spanBuckets += int(span.Length) + } + if spanBuckets != numBuckets { + return errors.Wrap( + storage.ErrHistogramSpansBucketsMismatch, + fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets), + ) + } + return nil +} + +func checkHistogramBuckets(buckets []int64) (uint64, error) { + if len(buckets) == 0 { + return 0, nil + } + + var count uint64 + var last int64 + + for i := 0; i < len(buckets); i++ { + c := last + buckets[i] + if c < 0 { + return 0, errors.Wrap( + storage.ErrHistogramNegativeBucketCount, + fmt.Sprintf("bucket number %d has observation count of %d", i+1, c), + ) + } + last = c + count += uint64(c) + } + + return count, nil +} + var _ storage.GetRef = &headAppender{} func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { @@ -508,6 +700,13 @@ func (a *headAppender) log() error { return errors.Wrap(err, "log exemplars") } } + if len(a.histograms) > 0 { + rec = enc.HistogramSamples(a.histograms, buf) + buf = rec[:0] + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log histograms") + } + } return nil } @@ -553,6 +752,7 @@ func (a *headAppender) Commit() (err error) { defer a.head.putAppendBuffer(a.samples) defer a.head.putSeriesBuffer(a.sampleSeries) defer a.head.putExemplarBuffer(a.exemplars) + defer a.head.putHistogramBuffer(a.histograms) defer a.head.putMetadataBuffer(a.metadata) defer a.head.iso.closeAppend(a.appendID) @@ -697,6 +897,33 @@ func (a *headAppender) Commit() (err error) { series.Unlock() } + histogramsTotal := len(a.histograms) + histoOOORejected := 0 + for i, s := range a.histograms { + series = a.histogramSeries[i] + series.Lock() + ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange) + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + + if ok { + if s.T < inOrderMint { + inOrderMint = s.T + } + if s.T > inOrderMaxt { + inOrderMaxt = s.T + } + } else { + histogramsTotal-- + histoOOORejected++ + } + if chunkCreated { + a.head.metrics.chunks.Inc() + a.head.metrics.chunksCreated.Inc() + } + } + for i, m := range a.metadata { series = a.metadataSeries[i] series.Lock() @@ -704,10 +931,12 @@ func (a *headAppender) Commit() (err error) { series.Unlock() } - a.head.metrics.outOfOrderSamples.Add(float64(oooRejected)) - a.head.metrics.outOfBoundSamples.Add(float64(oobRejected)) - a.head.metrics.tooOldSamples.Add(float64(tooOldRejected)) - a.head.metrics.samplesAppended.Add(float64(samplesAppended)) + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooRejected)) + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histoOOORejected)) + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(oobRejected)) + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(tooOldRejected)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(samplesAppended)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsTotal)) a.head.metrics.outOfOrderSamplesAppended.Add(float64(oooAccepted)) a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt) @@ -751,26 +980,126 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange) + if !sampleInOrder { + return sampleInOrder, chunkCreated + } + s.app.Append(t, v) + s.isHistogramSeries = false + + c.maxTime = t + + s.lastValue = v + + if appendID > 0 { + s.txs.add(appendID) + } + + return true, chunkCreated +} + +// appendHistogram adds the histogram. +// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. +func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { + // Head controls the execution of recoding, so that we own the proper + // chunk reference afterwards. We check for Appendable before + // appendPreprocessor because in case it ends up creating a new chunk, + // we need to know if there was also a counter reset or not to set the + // meta properly. + app, _ := s.app.(*chunkenc.HistogramAppender) + var ( + positiveInterjections, negativeInterjections []chunkenc.Interjection + okToAppend, counterReset bool + ) + c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) + if !sampleInOrder { + return sampleInOrder, chunkCreated + } + + if app != nil { + positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(h) + } + + if !chunkCreated { + // We have 3 cases here + // - !okToAppend -> We need to cut a new chunk. + // - okToAppend but we have interjections → Existing chunk needs + // recoding before we can append our histogram. + // - okToAppend and no interjections → Chunk is ready to support our histogram. + if !okToAppend || counterReset { + c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) + chunkCreated = true + } else if len(positiveInterjections) > 0 || len(negativeInterjections) > 0 { + // New buckets have appeared. We need to recode all + // prior histogram samples within the chunk before we + // can process this one. + chunk, app := app.Recode( + positiveInterjections, negativeInterjections, + h.PositiveSpans, h.NegativeSpans, + ) + c.chunk = chunk + s.app = app + } + } + + if chunkCreated { + hc := s.headChunk.chunk.(*chunkenc.HistogramChunk) + header := chunkenc.UnknownCounterReset + if counterReset { + header = chunkenc.CounterReset + } else if okToAppend { + header = chunkenc.NotCounterReset + } + hc.SetCounterResetHeader(header) + } + + s.app.AppendHistogram(t, h) + s.isHistogramSeries = true + + c.maxTime = t + + s.lastHistogramValue = h + + if appendID > 0 { + s.txs.add(appendID) + } + + return true, chunkCreated +} + +// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks. +// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. +// This should be called only when appending data. +func (s *memSeries) appendPreprocessor( + t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, +) (c *memChunk, sampleInOrder, chunkCreated bool) { // Based on Gorilla white papers this offers near-optimal compression ratio // so anything bigger that this has diminishing returns and increases // the time range within which we have to decompress all samples. const samplesPerChunk = 120 - c := s.head() + c = s.head() if c == nil { if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t { // Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it. - return false, false + return c, false, false } // There is no head chunk in this series yet, create the first chunk for the sample. - c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) chunkCreated = true } // Out of order sample. if c.maxTime >= t { - return false, chunkCreated + return c, false, chunkCreated + } + + if c.chunk.Encoding() != e { + // The chunk encoding expected by this append is different than the head chunk's + // encoding. So we cut a new chunk with the expected encoding. + c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) + chunkCreated = true } numSamples := c.chunk.NumSamples() @@ -794,19 +1123,11 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. if t >= s.nextAt || numSamples >= samplesPerChunk*2 { - c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange) + c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) chunkCreated = true } - s.app.Append(t, v) - - c.maxTime = t - s.lastValue = v - - if appendID > 0 && s.txs != nil { - s.txs.add(appendID) - } - return true, chunkCreated + return c, true, chunkCreated } // computeChunkEndTime estimates the end timestamp based the beginning of a @@ -822,15 +1143,26 @@ func computeChunkEndTime(start, cur, max int64) int64 { return start + (max-start)/n } -func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) *memChunk { +func (s *memSeries) cutNewHeadChunk( + mint int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, +) *memChunk { s.mmapCurrentHeadChunk(chunkDiskMapper) s.headChunk = &memChunk{ - chunk: chunkenc.NewXORChunk(), minTime: mint, maxTime: math.MinInt64, } + if chunkenc.IsValidEncoding(e) { + var err error + s.headChunk.chunk, err = chunkenc.NewEmptyChunk(e) + if err != nil { + panic(err) // This should never happen. + } + } else { + s.headChunk.chunk = chunkenc.NewXORChunk() + } + // Set upper bound on when the next chunk must be started. An earlier timestamp // may be chosen dynamically at a later point. s.nextAt = rangeForTimestamp(mint, chunkRange) @@ -874,7 +1206,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap } func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) { - if s.headChunk == nil { + if s.headChunk == nil || s.headChunk.chunk.NumSamples() == 0 { // There is no head chunk, so nothing to m-map here. return } @@ -912,11 +1244,20 @@ func (a *headAppender) Rollback() (err error) { series.pendingCommit = false series.Unlock() } + for i := range a.histograms { + series = a.histogramSeries[i] + series.Lock() + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + } a.head.putAppendBuffer(a.samples) a.head.putExemplarBuffer(a.exemplars) + a.head.putHistogramBuffer(a.histograms) a.head.putMetadataBuffer(a.metadata) a.samples = nil a.exemplars = nil + a.histograms = nil a.metadata = nil // Series are created in the head memory regardless of rollback. Thus we have diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index a97537a1fe2..6a273a0fd8a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -65,7 +66,7 @@ func (h *headIndexReader) Symbols() index.StringIter { func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { values, err := h.LabelValues(name, matchers...) if err == nil { - sort.Strings(values) + slices.Sort(values) } return values, err } @@ -95,7 +96,7 @@ func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, err if len(matchers) == 0 { labelNames := h.head.postings.LabelNames() - sort.Strings(labelNames) + slices.Sort(labelNames) return labelNames, nil } @@ -229,7 +230,7 @@ func (h *headIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, err for name := range namesMap { names = append(names, name) } - sort.Strings(names) + slices.Sort(names) return names, nil } @@ -485,7 +486,7 @@ func (o mergedOOOChunks) Bytes() []byte { panic(err) } it := o.Iterator(nil) - for it.Next() { + for it.Next() == chunkenc.ValFloat { t, v := it.At() app.Append(t, v) } @@ -534,7 +535,7 @@ func (b boundedChunk) Bytes() []byte { xor := chunkenc.NewXORChunk() a, _ := xor.Appender() it := b.Iterator(nil) - for it.Next() { + for it.Next() == chunkenc.ValFloat { t, v := it.At() a.Append(t, v) } @@ -563,33 +564,35 @@ type boundedIterator struct { // until its able to find a sample within the bounds minT and maxT. // If there are samples within bounds it will advance one by one amongst them. // If there are no samples within bounds it will return false. -func (b boundedIterator) Next() bool { - for b.Iterator.Next() { +func (b boundedIterator) Next() chunkenc.ValueType { + for b.Iterator.Next() == chunkenc.ValFloat { t, _ := b.Iterator.At() if t < b.minT { continue } else if t > b.maxT { - return false + return chunkenc.ValNone } - return true + return chunkenc.ValFloat } - return false + return chunkenc.ValNone } -func (b boundedIterator) Seek(t int64) bool { +func (b boundedIterator) Seek(t int64) chunkenc.ValueType { if t < b.minT { // We must seek at least up to b.minT if it is asked for something before that. - ok := b.Iterator.Seek(b.minT) - if !ok { - return false + val := b.Iterator.Seek(b.minT) + if !(val == chunkenc.ValFloat) { + return chunkenc.ValNone } t, _ := b.Iterator.At() - return t <= b.maxT + if t <= b.maxT { + return chunkenc.ValFloat + } } if t > b.maxT { // We seek anyway so that the subsequent Next() calls will also return false. b.Iterator.Seek(t) - return false + return chunkenc.ValNone } return b.Iterator.Seek(t) } @@ -683,6 +686,22 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, ch return makeStopIterator(c.chunk, it, stopAfter) } +// stopIterator wraps an Iterator, but only returns the first +// stopAfter values, if initialized with i=-1. +type stopIterator struct { + chunkenc.Iterator + + i, stopAfter int +} + +func (it *stopIterator) Next() chunkenc.ValueType { + if it.i+1 >= it.stopAfter { + return chunkenc.ValNone + } + it.i++ + return it.Iterator.Next() +} + func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chunkenc.Iterator { // Re-use the Iterator object if it is a stopIterator. if stopIter, ok := it.(*stopIterator); ok { @@ -691,25 +710,10 @@ func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chu stopIter.stopAfter = stopAfter return stopIter } + return &stopIterator{ Iterator: c.Iterator(it), i: -1, stopAfter: stopAfter, } } - -// stopIterator wraps an Iterator, but only returns the first -// stopAfter values, if initialized with i=-1. -type stopIterator struct { - chunkenc.Iterator - - i, stopAfter int -} - -func (it *stopIterator) Next() bool { - if it.i+1 >= it.stopAfter { - return false - } - it.i++ - return it.Iterator.Next() -} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 0a5ee05995a..b0fa7eb292a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -39,14 +39,15 @@ import ( "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" ) -func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { +func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { // Track number of samples that referenced a series we don't know about // for error reporting. var unknownRefs atomic.Uint64 var unknownExemplarRefs atomic.Uint64 + var unknownHistogramRefs atomic.Uint64 var unknownMetadataRefs atomic.Uint64 // Track number of series records that had overlapping m-map chunks. var mmapOverlappingChunks atomic.Uint64 @@ -58,8 +59,9 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H processors = make([]walSubsetProcessor, n) exemplarsInput chan record.RefExemplar - dec record.Decoder - shards = make([][]record.RefSample, n) + dec record.Decoder + shards = make([][]record.RefSample, n) + histogramShards = make([][]record.RefHistogramSample, n) decoded = make(chan interface{}, 10) decodeErr, seriesCreationErr error @@ -83,6 +85,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return []record.RefExemplar{} }, } + histogramsPool = sync.Pool{ + New: func() interface{} { + return []record.RefHistogramSample{} + }, + } metadataPool = sync.Pool{ New: func() interface{} { return []record.RefMetadata{} @@ -92,7 +99,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H defer func() { // For CorruptionErr ensure to terminate all workers before exiting. - _, ok := err.(*wal.CorruptionErr) + _, ok := err.(*wlog.CorruptionErr) if ok || seriesCreationErr != nil { for i := 0; i < n; i++ { processors[i].closeAndDrain() @@ -107,9 +114,10 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H processors[i].setup() go func(wp *walSubsetProcessor) { - unknown, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks) + unknown, unknownHistograms, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks) unknownRefs.Add(unknown) mmapOverlappingChunks.Add(overlapping) + unknownHistogramRefs.Add(unknownHistograms) wg.Done() }(&processors[i]) } @@ -148,7 +156,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series := seriesPool.Get().([]record.RefSeries)[:0] series, err = dec.Series(rec, series) if err != nil { - decodeErr = &wal.CorruptionErr{ + decodeErr = &wlog.CorruptionErr{ Err: errors.Wrap(err, "decode series"), Segment: r.Segment(), Offset: r.Offset(), @@ -160,7 +168,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H samples := samplesPool.Get().([]record.RefSample)[:0] samples, err = dec.Samples(rec, samples) if err != nil { - decodeErr = &wal.CorruptionErr{ + decodeErr = &wlog.CorruptionErr{ Err: errors.Wrap(err, "decode samples"), Segment: r.Segment(), Offset: r.Offset(), @@ -172,7 +180,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H tstones := tstonesPool.Get().([]tombstones.Stone)[:0] tstones, err = dec.Tombstones(rec, tstones) if err != nil { - decodeErr = &wal.CorruptionErr{ + decodeErr = &wlog.CorruptionErr{ Err: errors.Wrap(err, "decode tombstones"), Segment: r.Segment(), Offset: r.Offset(), @@ -184,7 +192,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0] exemplars, err = dec.Exemplars(rec, exemplars) if err != nil { - decodeErr = &wal.CorruptionErr{ + decodeErr = &wlog.CorruptionErr{ Err: errors.Wrap(err, "decode exemplars"), Segment: r.Segment(), Offset: r.Offset(), @@ -192,11 +200,23 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- exemplars + case record.HistogramSamples: + hists := histogramsPool.Get().([]record.RefHistogramSample)[:0] + hists, err = dec.HistogramSamples(rec, hists) + if err != nil { + decodeErr = &wlog.CorruptionErr{ + Err: errors.Wrap(err, "decode histograms"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- hists case record.Metadata: meta := metadataPool.Get().([]record.RefMetadata)[:0] meta, err := dec.Metadata(rec, meta) if err != nil { - decodeErr = &wal.CorruptionErr{ + decodeErr = &wlog.CorruptionErr{ Err: errors.Wrap(err, "decode metadata"), Segment: r.Segment(), Offset: r.Offset(), @@ -292,6 +312,43 @@ Outer: } //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. exemplarsPool.Put(v) + case []record.RefHistogramSample: + samples := v + minValidTime := h.minValidTime.Load() + // We split up the samples into chunks of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(samples) > 0 { + m := 5000 + if len(samples) < m { + m = len(samples) + } + for i := 0; i < n; i++ { + if histogramShards[i] == nil { + histogramShards[i] = processors[i].reuseHistogramBuf() + } + } + for _, sam := range samples[:m] { + if sam.T < minValidTime { + continue // Before minValidTime: discard. + } + if r, ok := multiRef[sam.Ref]; ok { + sam.Ref = r + } + mod := uint64(sam.Ref) % uint64(n) + histogramShards[mod] = append(histogramShards[mod], sam) + } + for i := 0; i < n; i++ { + if len(histogramShards[i]) > 0 { + processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]} + histogramShards[i] = nil + } + } + samples = samples[m:] + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + histogramsPool.Put(v) case []record.RefMetadata: for _, m := range v { s := h.series.getByID(chunks.HeadSeriesRef(m.Ref)) @@ -333,8 +390,14 @@ Outer: return errors.Wrap(r.Err(), "read records") } - if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 || unknownMetadataRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "metadata", unknownMetadataRefs.Load()) + if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { + level.Warn(h.logger).Log( + "msg", "Unknown series references", + "samples", unknownRefs.Load(), + "exemplars", unknownExemplarRefs.Load(), + "histograms", unknownHistogramRefs.Load(), + "metadata", unknownMetadataRefs.Load(), + ) } if count := mmapOverlappingChunks.Load(); count > 0 { level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count) @@ -402,25 +465,30 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m } type walSubsetProcessor struct { - input chan walSubsetProcessorInputItem - output chan []record.RefSample + input chan walSubsetProcessorInputItem + output chan []record.RefSample + histogramsOutput chan []record.RefHistogramSample } type walSubsetProcessorInputItem struct { - samples []record.RefSample - existingSeries *memSeries - walSeriesRef chunks.HeadSeriesRef + samples []record.RefSample + histogramSamples []record.RefHistogramSample + existingSeries *memSeries + walSeriesRef chunks.HeadSeriesRef } func (wp *walSubsetProcessor) setup() { - wp.output = make(chan []record.RefSample, 300) wp.input = make(chan walSubsetProcessorInputItem, 300) + wp.output = make(chan []record.RefSample, 300) + wp.histogramsOutput = make(chan []record.RefHistogramSample, 300) } func (wp *walSubsetProcessor) closeAndDrain() { close(wp.input) for range wp.output { } + for range wp.histogramsOutput { + } } // If there is a buffer in the output chan, return it for reuse, otherwise return nil. @@ -433,11 +501,24 @@ func (wp *walSubsetProcessor) reuseBuf() []record.RefSample { return nil } +// If there is a buffer in the output chan, return it for reuse, otherwise return nil. +func (wp *walSubsetProcessor) reuseHistogramBuf() []record.RefHistogramSample { + select { + case buf := <-wp.histogramsOutput: + return buf[:0] + default: + } + return nil +} + // processWALSamples adds the samples it receives to the head and passes // the buffer received to an output channel for reuse. -func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, mmapOverlappingChunks uint64) { +// Samples before the minValidTime timestamp are discarded. +func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, unknownHistogramRefs, mmapOverlappingChunks uint64) { defer close(wp.output) + defer close(wp.histogramsOutput) + minValidTime := h.minValidTime.Load() mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) chunkRange := h.chunkRange.Load() @@ -460,6 +541,10 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.T <= ms.mmMaxTime { continue } + ms.isHistogramSeries = false + if s.T <= ms.mmMaxTime { + continue + } if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() @@ -475,13 +560,43 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp case wp.output <- in.samples: default: } + + for _, s := range in.histogramSamples { + if s.T < minValidTime { + continue + } + ms := h.series.getByID(s.Ref) + if ms == nil { + unknownHistogramRefs++ + continue + } + ms.isHistogramSeries = true + if s.T <= ms.mmMaxTime { + continue + } + if _, chunkCreated := ms.appendHistogram(s.T, s.H, 0, h.chunkDiskMapper, chunkRange); chunkCreated { + h.metrics.chunksCreated.Inc() + h.metrics.chunks.Inc() + } + if s.T > maxt { + maxt = s.T + } + if s.T < mint { + mint = s.T + } + } + + select { + case wp.histogramsOutput <- in.histogramSamples: + default: + } } h.updateMinMaxTime(mint, maxt) - return unknownRefs, mmapOverlappingChunks + return unknownRefs, unknownHistogramRefs, mmapOverlappingChunks } -func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { +func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { // Track number of samples, m-map markers, that referenced a series we don't know about // for error reporting. var unknownRefs, mmapMarkerUnknownRefs atomic.Uint64 @@ -513,7 +628,7 @@ func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H defer func() { // For CorruptionErr ensure to terminate all workers before exiting. // We also wrap it to identify OOO WBL corruption. - _, ok := err.(*wal.CorruptionErr) + _, ok := err.(*wlog.CorruptionErr) if ok { err = &errLoadWbl{err: err} for i := 0; i < n; i++ { @@ -543,7 +658,7 @@ func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H samples := samplesPool.Get().([]record.RefSample)[:0] samples, err = dec.Samples(rec, samples) if err != nil { - decodeErr = &wal.CorruptionErr{ + decodeErr = &wlog.CorruptionErr{ Err: errors.Wrap(err, "decode samples"), Segment: r.Segment(), Offset: r.Offset(), @@ -555,7 +670,7 @@ func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H markers := markersPool.Get().([]record.RefMmapMarker)[:0] markers, err = dec.MmapMarkers(rec, markers) if err != nil { - decodeErr = &wal.CorruptionErr{ + decodeErr = &wlog.CorruptionErr{ Err: errors.Wrap(err, "decode mmap markers"), Segment: r.Segment(), Offset: r.Offset(), @@ -745,7 +860,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { } } wp.mx.Unlock() - wp.output <- samples + } h.updateMinOOOMaxOOOTime(mint, maxt) @@ -931,7 +1046,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return stats, errors.Wrap(err, "create chunk snapshot dir") } - cp, err := wal.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled()) + cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled()) if err != nil { return stats, errors.Wrap(err, "open chunk snapshot") } @@ -1170,7 +1285,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie } start := time.Now() - sr, err := wal.NewSegmentsReader(dir) + sr, err := wlog.NewSegmentsReader(dir) if err != nil { return snapIdx, snapOffset, nil, errors.Wrap(err, "open chunk snapshot") } @@ -1241,7 +1356,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie }(i, recordChan) } - r := wal.NewReader(sr) + r := wlog.NewReader(sr) var loopErr error Outer: for r.Next() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 29295c45f54..06a0f3e71ee 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -29,6 +29,7 @@ import ( "unsafe" "github.com/pkg/errors" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -819,7 +820,7 @@ func (w *Writer) writePostingsToTmpFiles() error { for n := range w.labelNames { names = append(names, n) } - sort.Strings(names) + slices.Sort(names) if err := w.f.Flush(); err != nil { return err @@ -1469,7 +1470,7 @@ func (r *Reader) SymbolTableSize() uint64 { func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { values, err := r.LabelValues(name, matchers...) if err == nil && r.version == FormatV1 { - sort.Strings(values) + slices.Sort(values) } return values, err } @@ -1571,7 +1572,7 @@ func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { names = append(names, name) } - sort.Strings(names) + slices.Sort(names) return names, nil } @@ -1743,7 +1744,7 @@ func (r *Reader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { } labelNames = append(labelNames, name) } - sort.Strings(labelNames) + slices.Sort(labelNames) return labelNames, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 8df2bccf67e..22e85ab3660 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -21,6 +21,7 @@ import ( "sync" "github.com/pkg/errors" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -90,7 +91,7 @@ func (p *MemPostings) Symbols() StringIter { res = append(res, k) } - sort.Strings(res) + slices.Sort(res) return NewStringListIter(res) } @@ -239,11 +240,9 @@ func (p *MemPostings) EnsureOrder() { for i := 0; i < n; i++ { go func() { - var sortable seriesRefSlice for job := range workc { for _, l := range *job { - sortable = l - sort.Sort(&sortable) + slices.Sort(l) } *job = (*job)[:0] @@ -830,13 +829,6 @@ func (it *bigEndianPostings) Err() error { return nil } -// seriesRefSlice attaches the methods of sort.Interface to []storage.SeriesRef, sorting in increasing order. -type seriesRefSlice []storage.SeriesRef - -func (x seriesRefSlice) Len() int { return len(x) } -func (x seriesRefSlice) Less(i, j int) bool { return x[i] < x[j] } -func (x seriesRefSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - // FindIntersectingPostings checks the intersection of p and candidates[i] for each i in candidates, // if intersection is non empty, then i is added to the indexes returned. // Returned indexes are not sorted. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go index 3af60399122..c246ff2e55a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go @@ -41,7 +41,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool { if i >= len(o.samples) { // none found. append it at the end - o.samples = append(o.samples, sample{t, v}) + o.samples = append(o.samples, sample{t, v, nil, nil}) return true } @@ -52,7 +52,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool { // Expand length by 1 to make room. use a zero sample, we will overwrite it anyway. o.samples = append(o.samples, sample{}) copy(o.samples[i+1:], o.samples[i:]) - o.samples[i] = sample{t, v} + o.samples[i] = sample{t, v, nil, nil} return true } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 5141a2c1d26..0f9b4d3b47d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -14,13 +14,15 @@ package tsdb import ( + "fmt" "math" - "sort" "strings" "unicode/utf8" "github.com/pkg/errors" + "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -317,7 +319,7 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro if m.Type == labels.MatchRegexp { setMatches := findSetMatches(m.GetRegexString()) if len(setMatches) > 0 { - sort.Strings(setMatches) + slices.Sort(setMatches) return ix.Postings(m.Name, setMatches...) } } @@ -344,7 +346,7 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro } if !isSorted { - sort.Strings(res) + slices.Sort(res) } return ix.Postings(m.Name, res...) } @@ -369,7 +371,7 @@ func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Posting } if !isSorted { - sort.Strings(res) + slices.Sort(res) } return ix.Postings(m.Name, res...) } @@ -526,16 +528,20 @@ func (b *blockBaseSeriesSet) Err() error { func (b *blockBaseSeriesSet) Warnings() storage.Warnings { return nil } -// populateWithDelGenericSeriesIterator allows to iterate over given chunk metas. In each iteration it ensures -// that chunks are trimmed based on given tombstones interval if any. +// populateWithDelGenericSeriesIterator allows to iterate over given chunk +// metas. In each iteration it ensures that chunks are trimmed based on given +// tombstones interval if any. // -// populateWithDelGenericSeriesIterator assumes that chunks that would be fully removed by intervals are filtered out in previous phase. +// populateWithDelGenericSeriesIterator assumes that chunks that would be fully +// removed by intervals are filtered out in previous phase. // -// On each iteration currChkMeta is available. If currDelIter is not nil, it means that chunk iterator in currChkMeta -// is invalid and chunk rewrite is needed, currDelIter should be used. +// On each iteration currChkMeta is available. If currDelIter is not nil, it +// means that the chunk iterator in currChkMeta is invalid and a chunk rewrite +// is needed, for which currDelIter should be used. type populateWithDelGenericSeriesIterator struct { chunks ChunkReader - // chks are expected to be sorted by minTime and should be related to the same, single series. + // chks are expected to be sorted by minTime and should be related to + // the same, single series. chks []chunks.Meta i int @@ -587,15 +593,17 @@ func (p *populateWithDelGenericSeriesIterator) next() bool { // The chunk.Bytes() method is not safe for open chunks hence the re-encoding. // This happens when snapshotting the head block or just fetching chunks from TSDB. // - // TODO think how to avoid the typecasting to verify when it is head block. + // TODO(codesome): think how to avoid the typecasting to verify when it is head block. _, isSafeChunk := p.currChkMeta.Chunk.(*safeChunk) if len(p.bufIter.Intervals) == 0 && !(isSafeChunk && p.currChkMeta.MaxTime == math.MaxInt64) { - // If there are no overlap with deletion intervals AND it's NOT an "open" head chunk, we can take chunk as it is. + // If there is no overlap with deletion intervals AND it's NOT + // an "open" head chunk, we can take chunk as it is. p.currDelIter = nil return true } - // We don't want full chunk or it's potentially still opened, take just part of it. + // We don't want the full chunk, or it's potentially still opened, take + // just a part of it. p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(nil) p.currDelIter = p.bufIter return true @@ -618,9 +626,11 @@ type populateWithDelSeriesIterator struct { curr chunkenc.Iterator } -func (p *populateWithDelSeriesIterator) Next() bool { - if p.curr != nil && p.curr.Next() { - return true +func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType { + if p.curr != nil { + if valueType := p.curr.Next(); valueType != chunkenc.ValNone { + return valueType + } } for p.next() { @@ -629,26 +639,42 @@ func (p *populateWithDelSeriesIterator) Next() bool { } else { p.curr = p.currChkMeta.Chunk.Iterator(nil) } - if p.curr.Next() { - return true + if valueType := p.curr.Next(); valueType != chunkenc.ValNone { + return valueType } } - return false + return chunkenc.ValNone } -func (p *populateWithDelSeriesIterator) Seek(t int64) bool { - if p.curr != nil && p.curr.Seek(t) { - return true +func (p *populateWithDelSeriesIterator) Seek(t int64) chunkenc.ValueType { + if p.curr != nil { + if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone { + return valueType + } } - for p.Next() { - if p.curr.Seek(t) { - return true + for p.Next() != chunkenc.ValNone { + if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone { + return valueType } } - return false + return chunkenc.ValNone +} + +func (p *populateWithDelSeriesIterator) At() (int64, float64) { + return p.curr.At() +} + +func (p *populateWithDelSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + return p.curr.AtHistogram() } -func (p *populateWithDelSeriesIterator) At() (int64, float64) { return p.curr.At() } +func (p *populateWithDelSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + return p.curr.AtFloatHistogram() +} + +func (p *populateWithDelSeriesIterator) AtT() int64 { + return p.curr.AtT() +} func (p *populateWithDelSeriesIterator) Err() error { if err := p.populateWithDelGenericSeriesIterator.Err(); err != nil { @@ -670,38 +696,94 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { if !p.next() { return false } - p.curr = p.currChkMeta if p.currDelIter == nil { return true } - // Re-encode the chunk if iterator is provider. This means that it has some samples to be deleted or chunk is opened. - newChunk := chunkenc.NewXORChunk() - app, err := newChunk.Appender() - if err != nil { - p.err = err - return false - } - - if !p.currDelIter.Next() { + valueType := p.currDelIter.Next() + if valueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { p.err = errors.Wrap(err, "iterate chunk while re-encoding") - return false } - - // Empty chunk, this should not happen, as we assume full deletions being filtered before this iterator. - p.err = errors.Wrap(err, "populateWithDelChunkSeriesIterator: unexpected empty chunk found while rewriting chunk") return false } - t, v := p.currDelIter.At() - p.curr.MinTime = t - app.Append(t, v) + // Re-encode the chunk if iterator is provider. This means that it has + // some samples to be deleted or chunk is opened. + var ( + newChunk chunkenc.Chunk + app chunkenc.Appender + t int64 + err error + ) + switch valueType { + case chunkenc.ValHistogram: + newChunk = chunkenc.NewHistogramChunk() + if app, err = newChunk.Appender(); err != nil { + break + } + if hc, ok := p.currChkMeta.Chunk.(*chunkenc.HistogramChunk); ok { + newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader()) + } + var h *histogram.Histogram + t, h = p.currDelIter.AtHistogram() + p.curr.MinTime = t + + app.AppendHistogram(t, h) + for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { + if vt != chunkenc.ValHistogram { + err = fmt.Errorf("found value type %v in histogram chunk", vt) + break + } + t, h = p.currDelIter.AtHistogram() + + // Defend against corrupted chunks. + pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h) + if len(pI)+len(nI) > 0 { + err = fmt.Errorf( + "bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required", + len(pI), len(nI), + ) + break + } + if counterReset { + err = errors.New("detected unexpected counter reset in histogram") + break + } + if !okToAppend { + err = errors.New("unable to append histogram due to unexpected schema change") + break + } - for p.currDelIter.Next() { + app.AppendHistogram(t, h) + } + case chunkenc.ValFloat: + newChunk = chunkenc.NewXORChunk() + if app, err = newChunk.Appender(); err != nil { + break + } + var v float64 t, v = p.currDelIter.At() + p.curr.MinTime = t app.Append(t, v) + for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { + if vt != chunkenc.ValFloat { + err = fmt.Errorf("found value type %v in float chunk", vt) + break + } + t, v = p.currDelIter.At() + app.Append(t, v) + } + + default: + // TODO(beorn7): Need FloatHistogram eventually. + err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType) + } + + if err != nil { + p.err = errors.Wrap(err, "iterate chunk while re-encoding") + return false } if err := p.currDelIter.Err(); err != nil { p.err = errors.Wrap(err, "iterate chunk while re-encoding") @@ -838,19 +920,34 @@ func (it *DeletedIterator) At() (int64, float64) { return it.Iter.At() } -func (it *DeletedIterator) Seek(t int64) bool { +func (it *DeletedIterator) AtHistogram() (int64, *histogram.Histogram) { + t, h := it.Iter.AtHistogram() + return t, h +} + +func (it *DeletedIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + t, h := it.Iter.AtFloatHistogram() + return t, h +} + +func (it *DeletedIterator) AtT() int64 { + return it.Iter.AtT() +} + +func (it *DeletedIterator) Seek(t int64) chunkenc.ValueType { if it.Iter.Err() != nil { - return false + return chunkenc.ValNone } - if ok := it.Iter.Seek(t); !ok { - return false + valueType := it.Iter.Seek(t) + if valueType == chunkenc.ValNone { + return chunkenc.ValNone } // Now double check if the entry falls into a deleted interval. - ts, _ := it.At() + ts := it.AtT() for _, itv := range it.Intervals { if ts < itv.Mint { - return true + return valueType } if ts > itv.Maxt { @@ -863,27 +960,26 @@ func (it *DeletedIterator) Seek(t int64) bool { } // The timestamp is greater than all the deleted intervals. - return true + return valueType } -func (it *DeletedIterator) Next() bool { +func (it *DeletedIterator) Next() chunkenc.ValueType { Outer: - for it.Iter.Next() { - ts, _ := it.Iter.At() - + for valueType := it.Iter.Next(); valueType != chunkenc.ValNone; valueType = it.Iter.Next() { + ts := it.AtT() for _, tr := range it.Intervals { if tr.InBounds(ts) { continue Outer } if ts <= tr.Maxt { - return true + return valueType } it.Intervals = it.Intervals[1:] } - return true + return valueType } - return false + return chunkenc.ValNone } func (it *DeletedIterator) Err() error { return it.Iter.Err() } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 162414a3ce6..86cc93890c2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -21,6 +21,7 @@ import ( "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/storage" @@ -47,6 +48,8 @@ const ( MmapMarkers Type = 5 // Metadata is used to match WAL records of type Metadata. Metadata Type = 6 + // HistogramSamples is used to match WAL records of type Histograms. + HistogramSamples Type = 7 ) func (rt Type) String() string { @@ -55,10 +58,12 @@ func (rt Type) String() string { return "series" case Samples: return "samples" - case Exemplars: - return "exemplars" case Tombstones: return "tombstones" + case Exemplars: + return "exemplars" + case HistogramSamples: + return "histogram_samples" case MmapMarkers: return "mmapmarkers" case Metadata: @@ -72,14 +77,14 @@ func (rt Type) String() string { type MetricType uint8 const ( - UnknownMT MetricType = 0 - Counter MetricType = 1 - Gauge MetricType = 2 - Histogram MetricType = 3 - GaugeHistogram MetricType = 4 - Summary MetricType = 5 - Info MetricType = 6 - Stateset MetricType = 7 + UnknownMT MetricType = 0 + Counter MetricType = 1 + Gauge MetricType = 2 + HistogramSample MetricType = 3 + GaugeHistogram MetricType = 4 + Summary MetricType = 5 + Info MetricType = 6 + Stateset MetricType = 7 ) func GetMetricType(t textparse.MetricType) uint8 { @@ -89,7 +94,7 @@ func GetMetricType(t textparse.MetricType) uint8 { case textparse.MetricTypeGauge: return uint8(Gauge) case textparse.MetricTypeHistogram: - return uint8(Histogram) + return uint8(HistogramSample) case textparse.MetricTypeGaugeHistogram: return uint8(GaugeHistogram) case textparse.MetricTypeSummary: @@ -109,7 +114,7 @@ func ToTextparseMetricType(m uint8) textparse.MetricType { return textparse.MetricTypeCounter case uint8(Gauge): return textparse.MetricTypeGauge - case uint8(Histogram): + case uint8(HistogramSample): return textparse.MetricTypeHistogram case uint8(GaugeHistogram): return textparse.MetricTypeGaugeHistogram @@ -139,6 +144,7 @@ type RefSeries struct { } // RefSample is a timestamp/value pair associated with a reference to a series. +// TODO(beorn7): Perhaps make this "polymorphic", including histogram and float-histogram pointers? Then get rid of RefHistogramSample. type RefSample struct { Ref chunks.HeadSeriesRef T int64 @@ -161,6 +167,13 @@ type RefExemplar struct { Labels labels.Labels } +// RefHistogramSample is a histogram. +type RefHistogramSample struct { + Ref chunks.HeadSeriesRef + T int64 + H *histogram.Histogram +} + // RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk. type RefMmapMarker struct { Ref chunks.HeadSeriesRef @@ -178,7 +191,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples: return t } return Unknown @@ -392,6 +405,88 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar return markers, nil } +func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { + dec := encoding.Decbuf{B: rec} + t := Type(dec.Byte()) + if t != HistogramSamples { + return nil, errors.New("invalid record type") + } + if dec.Len() == 0 { + return histograms, nil + } + var ( + baseRef = dec.Be64() + baseTime = dec.Be64int64() + ) + for len(dec.B) > 0 && dec.Err() == nil { + dref := dec.Varint64() + dtime := dec.Varint64() + + rh := RefHistogramSample{ + Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)), + T: baseTime + dtime, + H: &histogram.Histogram{ + Schema: 0, + ZeroThreshold: 0, + ZeroCount: 0, + Count: 0, + Sum: 0, + }, + } + + rh.H.Schema = int32(dec.Varint64()) + rh.H.ZeroThreshold = math.Float64frombits(dec.Be64()) + + rh.H.ZeroCount = dec.Uvarint64() + rh.H.Count = dec.Uvarint64() + rh.H.Sum = math.Float64frombits(dec.Be64()) + + l := dec.Uvarint() + if l > 0 { + rh.H.PositiveSpans = make([]histogram.Span, l) + } + for i := range rh.H.PositiveSpans { + rh.H.PositiveSpans[i].Offset = int32(dec.Varint64()) + rh.H.PositiveSpans[i].Length = dec.Uvarint32() + } + + l = dec.Uvarint() + if l > 0 { + rh.H.NegativeSpans = make([]histogram.Span, l) + } + for i := range rh.H.NegativeSpans { + rh.H.NegativeSpans[i].Offset = int32(dec.Varint64()) + rh.H.NegativeSpans[i].Length = dec.Uvarint32() + } + + l = dec.Uvarint() + if l > 0 { + rh.H.PositiveBuckets = make([]int64, l) + } + for i := range rh.H.PositiveBuckets { + rh.H.PositiveBuckets[i] = dec.Varint64() + } + + l = dec.Uvarint() + if l > 0 { + rh.H.NegativeBuckets = make([]int64, l) + } + for i := range rh.H.NegativeBuckets { + rh.H.NegativeBuckets[i] = dec.Varint64() + } + + histograms = append(histograms, rh) + } + + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return histograms, nil +} + // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. type Encoder struct{} @@ -517,3 +612,54 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } + +func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(HistogramSamples)) + + if len(histograms) == 0 { + return buf.Get() + } + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + buf.PutVarint64(int64(h.H.Schema)) + buf.PutBE64(math.Float64bits(h.H.ZeroThreshold)) + + buf.PutUvarint64(h.H.ZeroCount) + buf.PutUvarint64(h.H.Count) + buf.PutBE64(math.Float64bits(h.H.Sum)) + + buf.PutUvarint(len(h.H.PositiveSpans)) + for _, s := range h.H.PositiveSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } + + buf.PutUvarint(len(h.H.NegativeSpans)) + for _, s := range h.H.NegativeSpans { + buf.PutVarint64(int64(s.Offset)) + buf.PutUvarint32(s.Length) + } + + buf.PutUvarint(len(h.H.PositiveBuckets)) + for _, b := range h.H.PositiveBuckets { + buf.PutVarint64(b) + } + + buf.PutUvarint(len(h.H.NegativeBuckets)) + for _, b := range h.H.NegativeBuckets { + buf.PutVarint64(b) + } + } + + return buf.Get() +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go index a1dcd9a5f3f..777db5e90ea 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time") @@ -53,13 +54,34 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l ref := storage.SeriesRef(0) it := s.Iterator() lset := s.Labels() - for it.Next() { - t, v := it.At() - ref, err = app.Append(ref, lset, t, v) + typ := it.Next() + lastTyp := typ + for ; typ != chunkenc.ValNone; typ = it.Next() { + if lastTyp != typ { + // The behaviour of appender is undefined if samples of different types + // are appended to the same series in a single Commit(). + if err = app.Commit(); err != nil { + return "", err + } + app = w.Appender(ctx) + sampleCount = 0 + } + + switch typ { + case chunkenc.ValFloat: + t, v := it.At() + ref, err = app.Append(ref, lset, t, v) + case chunkenc.ValHistogram: + t, h := it.AtHistogram() + ref, err = app.AppendHistogram(ref, lset, t, h) + default: + return "", fmt.Errorf("unknown sample type %s", typ.String()) + } if err != nil { return "", err } sampleCount++ + lastTyp = typ } if it.Err() != nil { return "", it.Err() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go index 541d592d484..87cc345dd0f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go @@ -14,6 +14,9 @@ package tsdbutil import ( + "fmt" + + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" ) @@ -26,6 +29,9 @@ type Samples interface { type Sample interface { T() int64 V() float64 + H() *histogram.Histogram + FH() *histogram.FloatHistogram + Type() chunkenc.ValueType } type SampleSlice []Sample @@ -33,10 +39,12 @@ type SampleSlice []Sample func (s SampleSlice) Get(i int) Sample { return s[i] } func (s SampleSlice) Len() int { return len(s) } +// ChunkFromSamples requires all samples to have the same type. func ChunkFromSamples(s []Sample) chunks.Meta { return ChunkFromSamplesGeneric(SampleSlice(s)) } +// ChunkFromSamplesGeneric requires all samples to have the same type. func ChunkFromSamplesGeneric(s Samples) chunks.Meta { mint, maxt := int64(0), int64(0) @@ -44,11 +52,29 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T() } - c := chunkenc.NewXORChunk() + if s.Len() == 0 { + return chunks.Meta{ + Chunk: chunkenc.NewXORChunk(), + } + } + + sampleType := s.Get(0).Type() + c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding()) + if err != nil { + panic(err) // TODO(codesome): dont panic. + } + ca, _ := c.Appender() for i := 0; i < s.Len(); i++ { - ca.Append(s.Get(i).T(), s.Get(i).V()) + switch sampleType { + case chunkenc.ValFloat: + ca.Append(s.Get(i).T(), s.Get(i).V()) + case chunkenc.ValHistogram: + ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) + default: + panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) + } } return chunks.Meta{ MinTime: mint, @@ -58,8 +84,10 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { } type sample struct { - t int64 - v float64 + t int64 + v float64 + h *histogram.Histogram + fh *histogram.FloatHistogram } func (s sample) T() int64 { @@ -70,11 +98,30 @@ func (s sample) V() float64 { return s.v } +func (s sample) H() *histogram.Histogram { + return s.h +} + +func (s sample) FH() *histogram.FloatHistogram { + return s.fh +} + +func (s sample) Type() chunkenc.ValueType { + switch { + case s.h != nil: + return chunkenc.ValHistogram + case s.fh != nil: + return chunkenc.ValFloatHistogram + default: + return chunkenc.ValFloat + } +} + // PopulatedChunk creates a chunk populated with samples every second starting at minTime func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { samples := make([]Sample, numSamples) for i := 0; i < numSamples; i++ { - samples[i] = sample{minTime + int64(i*1000), 1.0} + samples[i] = sample{t: minTime + int64(i*1000), v: 1.0} } return ChunkFromSamples(samples) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go index 615903c6395..03043c781b9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go @@ -37,7 +37,7 @@ import ( "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" ) // WALEntryType indicates what data a WAL entry contains. @@ -89,7 +89,7 @@ func newWalMetrics(r prometheus.Registerer) *walMetrics { // WAL is a write ahead log that can log new series labels and samples. // It must be completely read before new entries are logged. // -// DEPRECATED: use wal pkg combined with the record codex instead. +// DEPRECATED: use wlog pkg combined with the record codex instead. type WAL interface { Reader() WALReader LogSeries([]record.RefSeries) error @@ -146,7 +146,7 @@ func newCRC32() hash.Hash32 { // SegmentWAL is a write ahead log for series data. // -// DEPRECATED: use wal pkg combined with the record coders instead. +// DEPRECATED: use wlog pkg combined with the record coders instead. type SegmentWAL struct { mtx sync.Mutex metrics *walMetrics @@ -1229,7 +1229,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { if err := os.RemoveAll(tmpdir); err != nil { return errors.Wrap(err, "cleanup replacement dir") } - repl, err := wal.New(logger, nil, tmpdir, false) + repl, err := wlog.New(logger, nil, tmpdir, false) if err != nil { return errors.Wrap(err, "open new WAL") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go similarity index 87% rename from vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go rename to vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index f8f9f387d59..42b03f48fb9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package wal +package wlog import ( "fmt" @@ -38,12 +38,12 @@ import ( // CheckpointStats returns stats about a created checkpoint. type CheckpointStats struct { DroppedSeries int - DroppedSamples int + DroppedSamples int // Includes histograms. DroppedTombstones int DroppedExemplars int DroppedMetadata int TotalSeries int // Processed series including dropped ones. - TotalSamples int // Processed samples including dropped ones. + TotalSamples int // Processed float and histogram samples including dropped ones. TotalTombstones int // Processed tombstones including dropped ones. TotalExemplars int // Processed exemplars including dropped ones. TotalMetadata int // Processed metadata including dropped ones. @@ -93,7 +93,7 @@ const checkpointPrefix = "checkpoint." // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser @@ -148,20 +148,21 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea r := NewReader(sgmReader) var ( - series []record.RefSeries - samples []record.RefSample - tstones []tombstones.Stone - exemplars []record.RefExemplar - metadata []record.RefMetadata - dec record.Decoder - enc record.Encoder - buf []byte - recs [][]byte + series []record.RefSeries + samples []record.RefSample + histogramSamples []record.RefHistogramSample + tstones []tombstones.Stone + exemplars []record.RefExemplar + metadata []record.RefMetadata + dec record.Decoder + enc record.Encoder + buf []byte + recs [][]byte latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata) ) for r.Next() { - series, samples, tstones, exemplars, metadata = series[:0], samples[:0], tstones[:0], exemplars[:0], metadata[:0] + series, samples, histogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0] // We don't reset the buffer since we batch up multiple records // before writing them to the checkpoint. @@ -206,6 +207,24 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea stats.TotalSamples += len(samples) stats.DroppedSamples += len(samples) - len(repl) + case record.HistogramSamples: + histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) + if err != nil { + return nil, errors.Wrap(err, "decode histogram samples") + } + // Drop irrelevant histogramSamples in place. + repl := histogramSamples[:0] + for _, h := range histogramSamples { + if h.T >= mint { + repl = append(repl, h) + } + } + if len(repl) > 0 { + buf = enc.HistogramSamples(repl, buf) + } + stats.TotalSamples += len(samples) + stats.DroppedSamples += len(samples) - len(repl) + case record.Tombstones: tstones, err = dec.Tombstones(rec, tstones) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go similarity index 99% rename from vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go rename to vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go index f09d149aa39..fd949a9630e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package wal +package wlog import ( "encoding/binary" diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go similarity index 99% rename from vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go rename to vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go index 7612f8775fa..e2b50d4b2a3 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package wal +package wlog import ( "encoding/binary" diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go similarity index 91% rename from vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go rename to vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index 5b949fc00ff..5d7c84d34a7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package wal +package wlog import ( "fmt" @@ -19,7 +19,6 @@ import ( "math" "os" "path" - "sort" "strconv" "strings" "time" @@ -28,6 +27,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/tsdb/record" @@ -49,6 +49,7 @@ type WriteTo interface { // Once returned, the WAL Watcher will not attempt to pass that data again. Append([]record.RefSample) bool AppendExemplars([]record.RefExemplar) bool + AppendHistograms([]record.RefHistogramSample) bool StoreSeries([]record.RefSeries, int) // Next two methods are intended for garbage-collection: first we call @@ -74,6 +75,7 @@ type Watcher struct { walDir string lastCheckpoint string sendExemplars bool + sendHistograms bool metrics *WatcherMetrics readerMetrics *LiveReaderMetrics @@ -144,18 +146,19 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { } // NewWatcher creates a new WAL watcher for a given WriteTo. -func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars bool) *Watcher { +func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms bool) *Watcher { if logger == nil { logger = log.NewNopLogger() } return &Watcher{ - logger: logger, - writer: writer, - metrics: metrics, - readerMetrics: readerMetrics, - walDir: path.Join(dir, "wal"), - name: name, - sendExemplars: sendExemplars, + logger: logger, + writer: writer, + metrics: metrics, + readerMetrics: readerMetrics, + walDir: path.Join(dir, "wal"), + name: name, + sendExemplars: sendExemplars, + sendHistograms: sendHistograms, quit: make(chan struct{}), done: make(chan struct{}), @@ -301,7 +304,7 @@ func (w *Watcher) firstAndLast() (int, int, error) { return refs[0], refs[len(refs)-1], nil } -// Copied from tsdb/wal/wal.go so we do not have to open a WAL. +// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL. // Plan is to move WAL watcher to TSDB and dedupe these implementations. func (w *Watcher) segments(dir string) ([]int, error) { files, err := os.ReadDir(dir) @@ -317,7 +320,7 @@ func (w *Watcher) segments(dir string) ([]int, error) { } refs = append(refs, k) } - sort.Ints(refs) + slices.Sort(refs) for i := 0; i < len(refs)-1; i++ { if refs[i]+1 != refs[i+1] { return nil, errors.New("segments are not sequential") @@ -473,11 +476,13 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { // Also used with readCheckpoint - implements segmentReadFn. func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { var ( - dec record.Decoder - series []record.RefSeries - samples []record.RefSample - send []record.RefSample - exemplars []record.RefExemplar + dec record.Decoder + series []record.RefSeries + samples []record.RefSample + samplesToSend []record.RefSample + exemplars []record.RefExemplar + histograms []record.RefHistogramSample + histogramsToSend []record.RefHistogramSample ) for r.Next() && !isClosed(w.quit) { rec := r.Record() @@ -510,12 +515,12 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { duration := time.Since(w.startTime) level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) } - send = append(send, s) + samplesToSend = append(samplesToSend, s) } } - if len(send) > 0 { - w.writer.Append(send) - send = send[:0] + if len(samplesToSend) > 0 { + w.writer.Append(samplesToSend) + samplesToSend = samplesToSend[:0] } case record.Exemplars: @@ -535,6 +540,34 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { } w.writer.AppendExemplars(exemplars) + case record.HistogramSamples: + // Skip if experimental "histograms over remote write" is not enabled. + if !w.sendHistograms { + break + } + if !tail { + break + } + histograms, err := dec.HistogramSamples(rec, histograms[:0]) + if err != nil { + w.recordDecodeFailsMetric.Inc() + return err + } + for _, h := range histograms { + if h.T > w.startTimestamp { + if !w.sendSamples { + w.sendSamples = true + duration := time.Since(w.startTime) + level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + } + histogramsToSend = append(histogramsToSend, h) + } + } + if len(histogramsToSend) > 0 { + w.writer.AppendHistograms(histogramsToSend) + histogramsToSend = histogramsToSend[:0] + } + case record.Tombstones: default: diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go similarity index 92% rename from vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go rename to vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go index 191b09ed992..5ae308d4eac 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package wal +package wlog import ( "bufio" @@ -133,7 +133,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { - level.Warn(logger).Log("msg", "Last page of the wal is torn, filling it with zeros", "segment", segName) + level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, errors.Wrap(err, "zero-pad torn page") @@ -164,7 +164,7 @@ func OpenReadSegment(fn string) (*Segment, error) { return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil } -// WAL is a write ahead log that stores records in segment files. +// WL is a write log that stores records in segment files. // It must be read from start to end once before logging new data. // If an error occurs during read, the repair procedure must be called // before it's safe to do further writes. @@ -174,7 +174,7 @@ func OpenReadSegment(fn string) (*Segment, error) { // Records are never split across segments to allow full segments to be // safely truncated. It also ensures that torn writes never corrupt records // beyond the most recent segment. -type WAL struct { +type WL struct { dir string logger log.Logger segmentSize int @@ -188,10 +188,10 @@ type WAL struct { compress bool snappyBuf []byte - metrics *walMetrics + metrics *wlMetrics } -type walMetrics struct { +type wlMetrics struct { fsyncDuration prometheus.Summary pageFlushes prometheus.Counter pageCompletions prometheus.Counter @@ -201,12 +201,12 @@ type walMetrics struct { writesFailed prometheus.Counter } -func newWALMetrics(r prometheus.Registerer) *walMetrics { - m := &walMetrics{} +func newWLMetrics(r prometheus.Registerer) *wlMetrics { + m := &wlMetrics{} m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ Name: "fsync_duration_seconds", - Help: "Duration of WAL fsync.", + Help: "Duration of write log fsync.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ @@ -219,19 +219,19 @@ func newWALMetrics(r prometheus.Registerer) *walMetrics { }) m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{ Name: "truncations_failed_total", - Help: "Total number of WAL truncations that failed.", + Help: "Total number of write log truncations that failed.", }) m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ Name: "truncations_total", - Help: "Total number of WAL truncations attempted.", + Help: "Total number of write log truncations attempted.", }) m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "segment_current", - Help: "WAL segment index that TSDB is currently writing to.", + Help: "Write log segment index that TSDB is currently writing to.", }) m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{ Name: "writes_failed_total", - Help: "Total number of WAL writes that failed.", + Help: "Total number of write log writes that failed.", }) if r != nil { @@ -250,13 +250,13 @@ func newWALMetrics(r prometheus.Registerer) *walMetrics { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WAL, error) { +func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } -// NewSize returns a new WAL over the given directory. +// NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WAL, error) { +func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -266,7 +266,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi if logger == nil { logger = log.NewNopLogger() } - w := &WAL{ + w := &WL{ dir: dir, logger: logger, segmentSize: segmentSize, @@ -277,9 +277,9 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } prefix := "prometheus_tsdb_wal_" if filepath.Base(dir) == WblDirName { - prefix = "prometheus_tsdb_out_of_order_wal_" + prefix = "prometheus_tsdb_out_of_order_wbl_" } - w.metrics = newWALMetrics(prometheus.WrapRegistererWithPrefix(prefix, reg)) + w.metrics = newWLMetrics(prometheus.WrapRegistererWithPrefix(prefix, reg)) _, last, err := Segments(w.Dir()) if err != nil { @@ -308,11 +308,11 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } // Open an existing WAL. -func Open(logger log.Logger, dir string) (*WAL, error) { +func Open(logger log.Logger, dir string) (*WL, error) { if logger == nil { logger = log.NewNopLogger() } - w := &WAL{ + w := &WL{ dir: dir, logger: logger, } @@ -321,16 +321,16 @@ func Open(logger log.Logger, dir string) (*WAL, error) { } // CompressionEnabled returns if compression is enabled on this WAL. -func (w *WAL) CompressionEnabled() bool { +func (w *WL) CompressionEnabled() bool { return w.compress } // Dir returns the directory of the WAL. -func (w *WAL) Dir() string { +func (w *WL) Dir() string { return w.dir } -func (w *WAL) run() { +func (w *WL) run() { Loop: for { select { @@ -350,7 +350,7 @@ Loop: // Repair attempts to repair the WAL based on the error. // It discards all data after the corruption. -func (w *WAL) Repair(origErr error) error { +func (w *WL) Repair(origErr error) error { // We could probably have a mode that only discards torn records right around // the corruption to preserve as data much as possible. // But that's not generally applicable if the records have any kind of causality. @@ -466,7 +466,7 @@ func SegmentName(dir string, i int) string { // NextSegment creates the next segment and closes the previous one asynchronously. // It returns the file number of the new file. -func (w *WAL) NextSegment() (int, error) { +func (w *WL) NextSegment() (int, error) { w.mtx.Lock() defer w.mtx.Unlock() return w.nextSegment(true) @@ -474,7 +474,7 @@ func (w *WAL) NextSegment() (int, error) { // NextSegmentSync creates the next segment and closes the previous one in sync. // It returns the file number of the new file. -func (w *WAL) NextSegmentSync() (int, error) { +func (w *WL) NextSegmentSync() (int, error) { w.mtx.Lock() defer w.mtx.Unlock() return w.nextSegment(false) @@ -482,9 +482,9 @@ func (w *WAL) NextSegmentSync() (int, error) { // nextSegment creates the next segment and closes the previous one. // It returns the file number of the new file. -func (w *WAL) nextSegment(async bool) (int, error) { +func (w *WL) nextSegment(async bool) (int, error) { if w.closed { - return 0, errors.New("wal is closed") + return 0, errors.New("wlog is closed") } // Only flush the current page if it actually holds data. @@ -519,7 +519,7 @@ func (w *WAL) nextSegment(async bool) (int, error) { return next.Index(), nil } -func (w *WAL) setSegment(segment *Segment) error { +func (w *WL) setSegment(segment *Segment) error { w.segment = segment // Correctly initialize donePages. @@ -535,7 +535,7 @@ func (w *WAL) setSegment(segment *Segment) error { // flushPage writes the new contents of the page to disk. If no more records will fit into // the page, the remaining bytes will be set to zero and a new page will be started. // If clear is true, this is enforced regardless of how many bytes are left in the page. -func (w *WAL) flushPage(clear bool) error { +func (w *WL) flushPage(clear bool) error { w.metrics.pageFlushes.Inc() p := w.page @@ -601,13 +601,13 @@ func (t recType) String() string { } } -func (w *WAL) pagesPerSegment() int { +func (w *WL) pagesPerSegment() int { return w.segmentSize / pageSize } // Log writes the records into the log. // Multiple records can be passed at once to reduce writes and increase throughput. -func (w *WAL) Log(recs ...[]byte) error { +func (w *WL) Log(recs ...[]byte) error { w.mtx.Lock() defer w.mtx.Unlock() // Callers could just implement their own list record format but adding @@ -625,7 +625,7 @@ func (w *WAL) Log(recs ...[]byte) error { // - the final record of a batch // - the record is bigger than the page size // - the current page is full. -func (w *WAL) log(rec []byte, final bool) error { +func (w *WL) log(rec []byte, final bool) error { // When the last page flush failed the page will remain full. // When the page is full, need to flush it before trying to add more records to it. if w.page.full() { @@ -721,7 +721,7 @@ func (w *WAL) log(rec []byte, final bool) error { // LastSegmentAndOffset returns the last segment number of the WAL // and the offset in that file upto which the segment has been filled. -func (w *WAL) LastSegmentAndOffset() (seg, offset int, err error) { +func (w *WL) LastSegmentAndOffset() (seg, offset int, err error) { w.mtx.Lock() defer w.mtx.Unlock() @@ -736,7 +736,7 @@ func (w *WAL) LastSegmentAndOffset() (seg, offset int, err error) { } // Truncate drops all segments before i. -func (w *WAL) Truncate(i int) (err error) { +func (w *WL) Truncate(i int) (err error) { w.metrics.truncateTotal.Inc() defer func() { if err != nil { @@ -758,27 +758,27 @@ func (w *WAL) Truncate(i int) (err error) { return nil } -func (w *WAL) fsync(f *Segment) error { +func (w *WL) fsync(f *Segment) error { start := time.Now() err := f.Sync() w.metrics.fsyncDuration.Observe(time.Since(start).Seconds()) return err } -// Sync forces a file sync on the current wal segment. This function is meant +// Sync forces a file sync on the current write log segment. This function is meant // to be used only on tests due to different behaviour on Operating Systems // like windows and linux -func (w *WAL) Sync() error { +func (w *WL) Sync() error { return w.fsync(w.segment) } // Close flushes all writes and closes active segment. -func (w *WAL) Close() (err error) { +func (w *WL) Close() (err error) { w.mtx.Lock() defer w.mtx.Unlock() if w.closed { - return errors.New("wal already closed") + return errors.New("wlog already closed") } if w.segment == nil { @@ -811,8 +811,8 @@ func (w *WAL) Close() (err error) { // Segments returns the range [first, n] of currently existing segments. // If no segments are found, first and n are -1. -func Segments(walDir string) (first, last int, err error) { - refs, err := listSegments(walDir) +func Segments(wlDir string) (first, last int, err error) { + refs, err := listSegments(wlDir) if err != nil { return 0, 0, err } @@ -979,8 +979,8 @@ func (r *segmentBufReader) Read(b []byte) (n int, err error) { return n, nil } -// Computing size of the WAL. +// Size computes the size of the write log. // We do this by adding the sizes of all the files under the WAL dir. -func (w *WAL) Size() (int64, error) { +func (w *WL) Size() (int64, error) { return fileutil.DirSize(w.Dir()) } diff --git a/vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go b/vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go new file mode 100644 index 00000000000..a82ae100d7f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/jsonutil/marshal.go @@ -0,0 +1,62 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonutil + +import ( + "math" + "strconv" + + jsoniter "github.com/json-iterator/go" +) + +// MarshalTimestamp marshals a point timestamp using the passed jsoniter stream. +func MarshalTimestamp(t int64, stream *jsoniter.Stream) { + // Write out the timestamp as a float divided by 1000. + // This is ~3x faster than converting to a float. + if t < 0 { + stream.WriteRaw(`-`) + t = -t + } + stream.WriteInt64(t / 1000) + fraction := t % 1000 + if fraction != 0 { + stream.WriteRaw(`.`) + if fraction < 100 { + stream.WriteRaw(`0`) + } + if fraction < 10 { + stream.WriteRaw(`0`) + } + stream.WriteInt64(fraction) + } +} + +// MarshalValue marshals a point value using the passed jsoniter stream. +func MarshalValue(v float64, stream *jsoniter.Stream) { + stream.WriteRaw(`"`) + // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround + // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). + buf := stream.Buffer() + abs := math.Abs(v) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + buf = strconv.AppendFloat(buf, v, fmt, -1, 64) + stream.SetBuffer(buf) + stream.WriteRaw(`"`) +} diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go index ff9b33bbdd1..5d95437e99a 100644 --- a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go +++ b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go @@ -39,6 +39,7 @@ func New(t testutil.T) *TestStorage { opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond) opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond) opts.RetentionDuration = 0 + opts.EnableNativeHistograms = true db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats()) require.NoError(t, err, "unexpected error while opening test storage") reg := prometheus.NewRegistry() diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 9ceaa8c3f2d..54652471db7 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -23,7 +23,6 @@ import ( "net/url" "os" "path/filepath" - "sort" "strconv" "strings" "time" @@ -37,9 +36,11 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/route" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" @@ -52,6 +53,7 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/util/httputil" + "github.com/prometheus/prometheus/util/jsonutil" "github.com/prometheus/prometheus/util/stats" ) @@ -202,6 +204,8 @@ type API struct { } func init() { + jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) } @@ -549,12 +553,12 @@ func (api *API) queryExemplars(r *http.Request) apiFuncResult { ctx := r.Context() eq, err := api.ExemplarQueryable.ExemplarQuerier(ctx) if err != nil { - return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} + return apiFuncResult{nil, returnAPIError(err), nil, nil} } res, err := eq.Select(timestamp.FromTime(start), timestamp.FromTime(end), selectors...) if err != nil { - return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} + return apiFuncResult{nil, returnAPIError(err), nil, nil} } return apiFuncResult{res, nil, nil, nil} @@ -599,7 +603,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { - return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} + return apiFuncResult{nil, returnAPIError(err), nil, nil} } defer q.Close() @@ -613,7 +617,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { for _, matchers := range matcherSets { vals, callWarnings, err := q.LabelNames(matchers...) if err != nil { - return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} + return apiFuncResult{nil, returnAPIError(err), warnings, nil} } warnings = append(warnings, callWarnings...) @@ -627,7 +631,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { for key := range labelNamesSet { names = append(names, key) } - sort.Strings(names) + slices.Sort(names) } else { names, warnings, err = q.LabelNames() if err != nil { @@ -712,7 +716,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { } } - sort.Strings(vals) + slices.Sort(vals) return apiFuncResult{vals, nil, warnings, closer} } @@ -749,7 +753,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { - return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} + return apiFuncResult{nil, returnAPIError(err), nil, nil} } // From now on, we must only return with a finalizer in the result (to // be called by the caller) or call q.Close ourselves (which is required @@ -790,7 +794,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { warnings := set.Warnings() if set.Err() != nil { - return apiFuncResult{nil, &apiError{errorExec, set.Err()}, warnings, closer} + return apiFuncResult{nil, returnAPIError(set.Err()), warnings, closer} } return apiFuncResult{metrics, nil, warnings, closer} @@ -907,7 +911,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { keys = append(keys, k) n += len(targets[k]) } - sort.Strings(keys) + slices.Sort(keys) return keys, n } @@ -1655,13 +1659,134 @@ OUTER: return matcherSets, nil } +// marshalSeriesJSON writes something like the following: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "values": [ +// [ 1435781451.781, "1" ], +// < more values> +// ], +// "histograms": [ +// [ 1435781451.781, { < histogram, see below > } ], +// < more histograms > +// ], +// }, +func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + s := *((*promql.Series)(ptr)) + stream.WriteObjectStart() + stream.WriteObjectField(`metric`) + m, err := s.Metric.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), m...)) + + // We make two passes through the series here: In the first marshaling + // all value points, in the second marshaling all histogram + // points. That's probably cheaper than just one pass in which we copy + // out histogram Points into a newly allocated slice for separate + // marshaling. (Could be benchmarked, though.) + var foundValue, foundHistogram bool + for _, p := range s.Points { + if p.H == nil { + stream.WriteMore() + if !foundValue { + stream.WriteObjectField(`values`) + stream.WriteArrayStart() + } + foundValue = true + marshalPointJSON(unsafe.Pointer(&p), stream) + } else { + foundHistogram = true + } + } + if foundValue { + stream.WriteArrayEnd() + } + if foundHistogram { + firstHistogram := true + for _, p := range s.Points { + if p.H != nil { + stream.WriteMore() + if firstHistogram { + stream.WriteObjectField(`histograms`) + stream.WriteArrayStart() + } + firstHistogram = false + marshalPointJSON(unsafe.Pointer(&p), stream) + } + } + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// marshalSampleJSON writes something like the following for normal value samples: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "value": [ 1435781451.781, "1" ] +// }, +// +// For histogram samples, it writes something like this: +// +// { +// "metric" : { +// "__name__" : "up", +// "job" : "prometheus", +// "instance" : "localhost:9090" +// }, +// "histogram": [ 1435781451.781, { < histogram, see below > } ] +// }, +func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + s := *((*promql.Sample)(ptr)) + stream.WriteObjectStart() + stream.WriteObjectField(`metric`) + m, err := s.Metric.MarshalJSON() + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), m...)) + stream.WriteMore() + if s.Point.H == nil { + stream.WriteObjectField(`value`) + } else { + stream.WriteObjectField(`histogram`) + } + marshalPointJSON(unsafe.Pointer(&s.Point), stream) + stream.WriteObjectEnd() +} + +func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + // marshalPointJSON writes `[ts, "val"]`. func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { p := *((*promql.Point)(ptr)) stream.WriteArrayStart() - marshalTimestamp(p.T, stream) + jsonutil.MarshalTimestamp(p.T, stream) stream.WriteMore() - marshalValue(p.V, stream) + if p.H == nil { + jsonutil.MarshalValue(p.V, stream) + } else { + marshalHistogram(p.H, stream) + } stream.WriteArrayEnd() } @@ -1669,6 +1794,78 @@ func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { return false } +// marshalHistogramJSON writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + jsonutil.MarshalValue(h.Count, stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + jsonutil.MarshalValue(h.Sum, stream) + + bucketFound := false + it := h.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + boundaries := 2 // Exclusive on both sides AKA open interval. + if bucket.LowerInclusive { + if bucket.UpperInclusive { + boundaries = 3 // Inclusive on both sides AKA closed interval. + } else { + boundaries = 1 // Inclusive only on lower end AKA right open. + } + } else { + if bucket.UpperInclusive { + boundaries = 0 // Inclusive only on upper end AKA left open. + } + } + stream.WriteArrayStart() + stream.WriteInt(boundaries) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Lower, stream) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Upper, stream) + stream.WriteMore() + jsonutil.MarshalValue(bucket.Count, stream) + stream.WriteArrayEnd() + } + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + // marshalExemplarJSON writes. // // { @@ -1692,13 +1889,12 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { // "value" key. stream.WriteMore() stream.WriteObjectField(`value`) - marshalValue(p.Value, stream) + jsonutil.MarshalValue(p.Value, stream) // "timestamp" key. stream.WriteMore() stream.WriteObjectField(`timestamp`) - marshalTimestamp(p.Ts, stream) - // marshalTimestamp(p.Ts, stream) + jsonutil.MarshalTimestamp(p.Ts, stream) stream.WriteObjectEnd() } @@ -1706,42 +1902,3 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool { return false } - -func marshalTimestamp(t int64, stream *jsoniter.Stream) { - // Write out the timestamp as a float divided by 1000. - // This is ~3x faster than converting to a float. - if t < 0 { - stream.WriteRaw(`-`) - t = -t - } - stream.WriteInt64(t / 1000) - fraction := t % 1000 - if fraction != 0 { - stream.WriteRaw(`.`) - if fraction < 100 { - stream.WriteRaw(`0`) - } - if fraction < 10 { - stream.WriteRaw(`0`) - } - stream.WriteInt64(fraction) - } -} - -func marshalValue(v float64, stream *jsoniter.Stream) { - stream.WriteRaw(`"`) - // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround - // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). - buf := stream.Buffer() - abs := math.Abs(v) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - buf = strconv.AppendFloat(buf, v, fmt, -1, 64) - stream.SetBuffer(buf) - stream.WriteRaw(`"`) -} diff --git a/vendor/github.com/rueian/rueidis/.gitignore b/vendor/github.com/rueian/rueidis/.gitignore new file mode 100644 index 00000000000..6cb4d50ce94 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/.gitignore @@ -0,0 +1,3 @@ +.idea/ +dist/ + diff --git a/vendor/github.com/rueian/rueidis/.goreleaser.yaml b/vendor/github.com/rueian/rueidis/.goreleaser.yaml new file mode 100644 index 00000000000..f1f8e5d546e --- /dev/null +++ b/vendor/github.com/rueian/rueidis/.goreleaser.yaml @@ -0,0 +1,8 @@ +builds: + - skip: true +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/rueian/rueidis/LICENSE b/vendor/github.com/rueian/rueidis/LICENSE new file mode 100644 index 00000000000..989e2c59e97 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/rueian/rueidis/NOTICE b/vendor/github.com/rueian/rueidis/NOTICE new file mode 100644 index 00000000000..b60b62a711f --- /dev/null +++ b/vendor/github.com/rueian/rueidis/NOTICE @@ -0,0 +1,2 @@ +rueidis +Copyright 2021 Rueian (https://github.com/rueian) diff --git a/vendor/github.com/rueian/rueidis/README.md b/vendor/github.com/rueian/rueidis/README.md new file mode 100644 index 00000000000..8b6f95c0ff0 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/README.md @@ -0,0 +1,667 @@ +# rueidis + +[![Go Reference](https://pkg.go.dev/badge/github.com/rueian/rueidis.svg)](https://pkg.go.dev/github.com/rueian/rueidis) +[![Build status](https://badge.buildkite.com/d15fbd91b3b22b55c8d799564f84918a322118ae02590858c4.svg)](https://buildkite.com/rueian/rueidis) +[![Go Report Card](https://goreportcard.com/badge/github.com/rueian/rueidis)](https://goreportcard.com/report/github.com/rueian/rueidis) +[![codecov](https://codecov.io/gh/rueian/rueidis/branch/master/graph/badge.svg?token=wGTB8GdY06)](https://codecov.io/gh/rueian/rueidis) +[![Total alerts](https://img.shields.io/lgtm/alerts/g/rueian/rueidis.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/rueian/rueidis/alerts/) +[![Maintainability](https://api.codeclimate.com/v1/badges/0d93d524c2b8497aacbe/maintainability)](https://codeclimate.com/github/rueian/rueidis/maintainability) + +A Fast Golang Redis client that does auto pipelining and supports client side caching. + +## Features + +* Auto pipeline for non-blocking redis commands +* Connection pooling for blocking redis commands +* Opt-in client side caching in RESP3 +* Pub/Sub, Redis 7 Sharded Pub/Sub +* Redis Cluster, Sentinel, Streams, TLS, RedisJSON, RedisBloom, RediSearch, RedisGraph, RedisTimeseries, RedisAI, RedisGears +* IDE friendly redis command builder +* Generic Hash/RedisJSON Object Mapping with client side caching and optimistic locking +* OpenTelemetry tracing and metrics +* Distributed Locks with client side caching +* Helpers for writing tests with rueidis mock + +## Limitations + +Rueidis is built around RESP2 and RESP3 protocol, and supports almost all redis features. +However, the following features has not yet been implemented in RESP2 mode: + +* Client side caching only works in RESP3 and Redis >= 6.0 + +## Getting Started + +```golang +package main + +import ( + "context" + "github.com/rueian/rueidis" +) + +func main() { + c, err := rueidis.NewClient(rueidis.ClientOption{ + InitAddress: []string{"127.0.0.1:6379"}, + }) + if err != nil { + panic(err) + } + defer c.Close() + + ctx := context.Background() + + // SET key val NX + c.Do(ctx, c.B().Set().Key("key").Value("val").Nx().Build()).Error() + // GET key + c.Do(ctx, c.B().Get().Key("key").Build()).ToString() +} +``` + +## Auto Pipeline + +All non-blocking commands sending to a single redis node are automatically pipelined through connections, +which reduces the overall round trips and system calls, and gets higher throughput. + +### Benchmark comparison with go-redis v9 + +Rueidis has higher throughput than go-redis v9 across 1, 8, and 64 parallelism settings. + +It is even able to achieve ~14x throughput over go-redis in a local benchmark of Macbook Pro 16" M1 Pro 2021. (see `parallelism(64)-key(16)-value(64)-10`) + +#### Single Client +![client_test_set](https://github.com/rueian/rueidis-benchmark/blob/master/client_test_set_8.png) +#### Cluster Client +![cluster_test_set](https://github.com/rueian/rueidis-benchmark/blob/master/cluster_test_set_8.png) + +Benchmark source code: https://github.com/rueian/rueidis-benchmark + +There is also a benchmark result performed on two GCP n2-highcpu-2 machines shows that rueidis can achieve higher throughput with lower latencies: https://github.com/rueian/rueidis/pull/93 + +## Client Side Caching + +The Opt-In mode of server-assisted client side caching is enabled by default, and can be used by calling `DoCache()` or `DoMultiCache()` with +an explicit client side TTL. + +```golang +c.DoCache(ctx, c.B().Hmget().Key("myhash").Field("1", "2").Cache(), time.Minute).ToArray() +c.DoMultiCache(ctx, + rueidis.CT(c.B().Get().Key("k1").Cache(), 1*time.Minute), + rueidis.CT(c.B().Get().Key("k2").Cache(), 2*time.Minute)) +``` + +An explicit client side TTL is required because redis server may not send invalidation message in time when +a key is expired on the server. Please follow [#6833](https://github.com/redis/redis/issues/6833) and [#6867](https://github.com/redis/redis/issues/6867) + +Although an explicit client side TTL is required, the `DoCache()` and `DoMultiCache()` still sends a `PTTL` command to server and make sure that +the client side TTL is not longer than the TTL on server side. + +Users can use `IsCacheHit()` to verify that if the response came from the client side memory: + +```golang +c.DoCache(ctx, c.B().Get().Key("k1").Cache(), time.Minute).IsCacheHit() == true +``` + +And use `CacheTTL()` to check the remaining client side TTL in seconds: + +```golang +c.DoCache(ctx, c.B().Get().Key("k1").Cache(), time.Minute).CacheTTL() == 60 +``` + +If the OpenTelemetry is enabled by the `rueidisotel.WithClient(client)`, then there are also two metrics instrumented: +* rueidis_do_cache_miss +* rueidis_do_cache_hits + +### Benchmark + +![client_test_get](https://github.com/rueian/rueidis-benchmark/blob/master/client_test_get_8.png) + +Benchmark source code: https://github.com/rueian/rueidis-benchmark + +### Supported Commands by Client Side Caching + +* bitcount +* bitfieldro +* bitpos +* expiretime +* geodist +* geohash +* geopos +* georadiusro +* georadiusbymemberro +* geosearch +* get +* mget +* getbit +* getrange +* hexists +* hget +* hgetall +* hkeys +* hlen +* hmget +* hstrlen +* hvals +* lindex +* llen +* lpos +* lrange +* pexpiretime +* pttl +* scard +* sismember +* smembers +* smismember +* sortro +* strlen +* ttl +* type +* zcard +* zcount +* zlexcount +* zmscore +* zrange +* zrangebylex +* zrangebyscore +* zrank +* zrevrange +* zrevrangebylex +* zrevrangebyscore +* zrevrank +* zscore +* jsonget +* jsonmget +* jsonstrlen +* jsonarrindex +* jsonarrlen +* jsonobjkeys +* jsonobjlen +* jsontype +* jsonresp +* bfexists +* bfinfo +* cfexists +* cfcount +* cfinfo +* cmsquery +* cmsinfo +* topkquery +* topklist +* topkinfo +* aitensorget +* aimodelget +* aimodelexecute +* aiscriptget + +### MGET/JSON.MGET Client Side Caching Helpers + +`rueidis.MGetCache` and `rueidis.JsonMGetCache` are handy helpers fetching multiple keys across different slots through the client side caching. +They will first group keys by slot to build `MGET` or `JSON.MGET` commands respectively and then send requests with only cache missed keys to redis nodes. + +### Broadcast Mode Client Side Caching + +Although the default is opt-in mode, you can use broadcast mode by specifying your prefixes in `ClientOption.ClientTrackingOptions`: + +```go +c, err := rueidis.NewClient(rueidis.ClientOption{ + InitAddress: []string{"127.0.0.1:6379"}, + ClientTrackingOptions: []string{"PREFIX", "prefix1:", "PREFIX", "prefix2:", "BCAST"}, +}) +if err != nil { + panic(err) +} +c.DoCache(ctx, c.B().Get().Key("prefix1:1").Cache(), time.Minute).IsCacheHit() == false +c.DoCache(ctx, c.B().Get().Key("prefix1:1").Cache(), time.Minute).IsCacheHit() == true +``` + +Please make sure that commands passed to `DoCache()` and `DoMultiCache()` are covered by your prefixes. +Otherwise, their client-side cache will not be invalidated by redis. + +### Disable Client Side Caching + +Some Redis provider doesn't support client-side caching, ex. Google Cloud Memorystore. +You can disable client-side caching by setting `ClientOption.DisableCache` to `true`. +This will also fall back `Client.DoCache/Client.DoMultiCache` to `Client.Do/Client.DoMulti`. + +## Blocking Commands + +The following blocking commands use another connection pool and will not share the same connection +with non-blocking commands and thus will not cause the pipeline to be blocked: + +* xread with block +* xreadgroup with block +* blpop +* brpop +* brpoplpush +* blmove +* blmpop +* bzpopmin +* bzpopmax +* bzmpop +* clientpause +* migrate +* wait + +## Context Cancellation + +`Client.Do()`, `Client.DoMulti()`, `Client.DoCache()` and `Client.DoMultiCache()` can return early if the context is canceled or the deadline is reached. + +```golang +ctx, cancel := context.WithTimeout(context.Background(), time.Second) +defer cancel() +c.Do(ctx, c.B().Set().Key("key").Value("val").Nx().Build()).Error() == context.DeadlineExceeded +``` + +Please note that though operations can return early, the command is likely sent already. + +## Pub/Sub + +To receive messages from channels, `Client.Receive()` should be used. It supports `SUBSCRIBE`, `PSUBSCRIBE` and Redis 7.0's `SSUBSCRIBE`: + +```golang +err = c.Receive(context.Background(), c.B().Subscribe().Channel("ch1", "ch2").Build(), func(msg rueidis.PubSubMessage) { + // handle the msg +}) +``` + +The provided handler will be called with received message. + +It is important to note that `Client.Receive()` will keep blocking and return only when the following cases: +1. return `nil` when received any unsubscribe/punsubscribe message related to the provided `subscribe` command. +2. return `rueidis.ErrClosing` when the client is closed manually. +3. return `ctx.Err()` when the `ctx` is done. +4. return non-nil `err` when the provided `subscribe` command failed. + +While the `Client.Receive()` call is blocking, the `Client` is still able to accept other concurrent requests, +and they are sharing the same tcp connection. If your message handler may take some time to complete, it is recommended +to use the `Client.Receive()` inside a `Client.Dedicated()` for not blocking other concurrent requests. + +### Alternative PubSub Hooks + +The `Client.Receive()` requires users to provide a subscription command in advance. +There is an alternative `DedicatedClient.SetPubSubHooks()` allows users to subscribe/unsubscribe channels later. + +```golang +client, cancel := c.Dedicate() +defer cancel() + +wait := client.SetPubSubHooks(rueidis.PubSubHooks{ + OnMessage: func(m rueidis.PubSubMessage) { + // Handle message. This callback will be called sequentially, but in another goroutine. + } +}) +client.Do(ctx, client.B().Subscribe().Channel("ch").Build()) +err := <-wait // disconnected with err +``` + +If the hooks are not nil, the above `wait` channel is guaranteed to be close when the hooks will not be called anymore, +and produce at most one error describing the reason. Users can use this channel to detect disconnection. + +## CAS Pattern + +To do a CAS operation (WATCH + MULTI + EXEC), a dedicated connection should be used, because there should be no +unintentional write commands between WATCH and EXEC. Otherwise, the EXEC may not fail as expected. + +The dedicated connection shares the same connection pool with blocking commands. + +```golang +c.Dedicated(func(client client.DedicatedClient) error { + // watch keys first + client.Do(ctx, client.B().Watch().Key("k1", "k2").Build()) + // perform read here + client.Do(ctx, client.B().Mget().Key("k1", "k2").Build()) + // perform write with MULTI EXEC + client.DoMulti( + ctx, + client.B().Multi().Build(), + client.B().Set().Key("k1").Value("1").Build(), + client.B().Set().Key("k2").Value("2").Build(), + client.B().Exec().Build(), + ) + return nil +}) + +``` + +Or use Dedicate and invoke `cancel()` when finished to put the connection back to the pool. + +``` golang +client, cancel := c.Dedicate() +defer cancel() + +// watch keys first +client.Do(ctx, client.B().Watch().Key("k1", "k2").Build()) +// perform read here +client.Do(ctx, client.B().Mget().Key("k1", "k2").Build()) +// perform write with MULTI EXEC +client.DoMulti( + ctx, + client.B().Multi().Build(), + client.B().Set().Key("k1").Value("1").Build(), + client.B().Set().Key("k2").Value("2").Build(), + client.B().Exec().Build(), +) +``` + +However, occupying a connection is not good in terms of throughput. It is better to use Lua script to perform +optimistic locking instead. + +## Memory Consumption Consideration + +Each underlying connection in rueidis allocates a ring buffer for pipelining. +Its size is controlled by the `ClientOption.RingScaleEachConn` and the default value is 10 which results into each ring of size 2^10. + +If you have many rueidis connections, you may find that they occupy quite amount of memory. +In that case, you may consider reducing `ClientOption.RingScaleEachConn` to 8 or 9 at the cost of potential throughput degradation. + +## Bulk Operations + +The `rueidis.Commands` and `DoMulti()` can also be used for bulk operations: + +``` golang +cmds := make(rueidis.Commands, 0, 10) +for i := 0; i < 10; i++ { + cmds = append(cmds, c.B().Set().Key(strconv.Itoa(i)).Value(strconv.Itoa(i)).Build()) +} +for _, resp := range c.DoMulti(ctx, cmds...) { + if err := resp.Error(); err != nil { + panic(err) + } +} +``` + +## Lua Script + +The `NewLuaScript` or `NewLuaScriptReadOnly` will create a script which is safe for concurrent usage. + +When calling the `script.Exec`, it will try sending EVALSHA to the client and if the server returns NOSCRIPT, +it will send EVAL to try again. + +```golang +script := rueidis.NewLuaScript("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}") +// the script.Exec is safe for concurrent call +list, err := script.Exec(ctx, client, []string{"k1", "k2"}, []string{"a1", "a2"}).ToArray() +``` + +## Redis Cluster, Single Redis and Sentinel + +To connect to a redis cluster, the `NewClient` should be used: + +```golang +c, err := rueidis.NewClient(rueidis.ClientOption{ + InitAddress: []string{"127.0.0.1:7001", "127.0.0.1:7002", "127.0.0.1:7003"}, + ShuffleInit: true, +}) +``` + +To connect to a single redis node, still use the `NewClient` with one InitAddress + +```golang +c, err := rueidis.NewClient(rueidis.ClientOption{ + InitAddress: []string{"127.0.0.1:6379"}, +}) +``` + +To connect to sentinels, specify the required master set name: + +```golang +c, err := rueidis.NewClient(rueidis.ClientOption{ + InitAddress: []string{"127.0.0.1:26379", "127.0.0.1:26380", "127.0.0.1:26381"}, + Sentinel: rueidis.SentinelOption{ + MasterSet: "my_master", + }, +}) +``` + +## Command Builder + +Redis commands are very complex and their formats are very different from each other. + +This library provides a type safe command builder within `client.B()` that can be used as +an entrypoint to construct a redis command. Once the command is completed, call the `Build()` or `Cache()` to get the actual command. +And then pass it to either `Client.Do()` or `Client.DoCache()`. + +```golang +c.Do(ctx, c.B().Set().Key("mykey").Value("myval").Ex(10).Nx().Build()) +``` + +**Once the command is passed to the `Client.Do()`, `Client.DoCache()`, the command will be recycled and SHOULD NOT be reused.** + +**The `ClusterClient.B()` also checks if the command contains multiple keys belongs to different slots. If it does, then panic.** + +### Arbitrary command + +If you want to construct commands that are not yet supported, you can use `c.B().Arbitrary()`: + +```golang +// This will result into [ANY CMD k1 k2 a1 a2] +c.B().Arbitrary("ANY", "CMD").Keys("k1", "k2").Args("a1", "a2").Build() +``` + +### Working with JSON string and `[]byte` + +The command builder treats all the parameters as Redis strings, which are binary safe. This means that users can store `[]byte` +directly into Redis without conversion. And the `rueidis.BinaryString` helper can convert `[]byte` to `string` without copy. For example: + +```golang +client.B().Set().Key("b").Value(rueidis.BinaryString([]byte{...})).Build() +``` + +Treating all the parameters as Redis strings also means that the command builder doesn't do any quoting, conversion automatically for users. + +When working with RedisJSON, users frequently need to prepare JSON string in Redis string. And `rueidis.JSON` can help: + +```golang +client.B().JsonSet().Key("j").Path("$.myStrField").Value(rueidis.JSON("str")).Build() +// equivalent to +client.B().JsonSet().Key("j").Path("$.myStrField").Value(`"str"`).Build() +``` + +## High level go-redis like API + +Though it is easier to know what command will be sent to redis at first glance if the command is constructed by the command builder, +users may sometimes feel it too verbose to write. + +For users who don't like the command builder, `rueidiscompat.Adapter`, contributed mainly by [@418Coffee](https://github.com/418Coffee), is an alternative. +It is a high level API which is close to go-redis's `Cmdable` interface. + +### Migrating from go-redis + +You can also try adapting `rueidis` with existing go-redis code by replacing go-redis's `UniversalClient` with `rueidiscompat.Adapter`. + +### Client side caching + +To use client side caching with `rueidiscompat.Adapter`, chain `Cache(ttl)` call in front of supported command. + +```golang +package main + +import ( + "context" + "time" + "github.com/rueian/rueidis" + "github.com/rueian/rueidis/rueidiscompat" +) + +func main() { + ctx := context.Background() + client, err := rueidis.NewClient(rueidis.ClientOption{InitAddress: []string{"127.0.0.1:6379"}}) + if err != nil { + panic(err) + } + defer client.Close() + + compat := rueidiscompat.NewAdapter(client) + ok, _ := compat.SetNX(ctx, "key", "val", time.Second).Result() + + // with client side caching + res, _ := compat.Cache(time.Second).Get(ctx, "key").Result() +} +``` + +## Generic Object Mapping + +The `NewHashRepository` and `NewJSONRepository` creates an OM repository backed by redis hash or RedisJSON. + +```golang +package main + +import ( + "context" + "fmt" + "time" + + "github.com/rueian/rueidis" + "github.com/rueian/rueidis/om" +) + +type Example struct { + Key string `json:"key" redis:",key"` // the redis:",key" is required to indicate which field is the ULID key + Ver int64 `json:"ver" redis:",ver"` // the redis:",ver" is required to do optimistic locking to prevent lost update + Str string `json:"myStr"` // both NewHashRepository and NewJSONRepository use json tag as field name +} + +func main() { + ctx := context.Background() + c, err := rueidis.NewClient(rueidis.ClientOption{InitAddress: []string{"127.0.0.1:6379"}}) + if err != nil { + panic(err) + } + // create the repo with NewHashRepository or NewJSONRepository + repo := om.NewHashRepository("my_prefix", Example{}, c) + + exp := repo.NewEntity() + exp.Str = "mystr" + fmt.Println(exp.Key) // output 01FNH4FCXV9JTB9WTVFAAKGSYB + repo.Save(ctx, exp) // success + + // lookup "my_prefix:01FNH4FCXV9JTB9WTVFAAKGSYB" through client side caching + exp2, _ := repo.FetchCache(ctx, exp.Key, time.Second*5) + fmt.Println(exp2.Str) // output "mystr", which equals to exp.Str + + exp2.Ver = 0 // if someone changes the version during your GET then SET operation, + repo.Save(ctx, exp2) // the save will fail with ErrVersionMismatch. +} + +``` + +### Object Mapping + RediSearch + +If you have RediSearch, you can create and search the repository against the index. + +```golang + +if _, ok := repo.(*om.HashRepository[Example]); ok { + repo.CreateIndex(ctx, func(schema om.FtCreateSchema) om.Completed { + return schema.FieldName("myStr").Text().Build() // Note that the Example.Str field is mapped to myStr on redis by its json tag + }) +} + +if _, ok := repo.(*om.JSONRepository[Example]); ok { + repo.CreateIndex(ctx, func(schema om.FtCreateSchema) om.Completed { + return schema.FieldName("$.myStr").Text().Build() // the field name of json index should be a json path syntax + }) +} + +exp := repo.NewEntity() +exp.Str = "foo" +repo.Save(ctx, exp) + +n, records, _ := repo.Search(ctx, func(search om.FtSearchIndex) om.Completed { + return search.Query("foo").Build() // you have full query capability by building the command from om.FtSearchIndex +}) + +fmt.Println("total", n) // n is total number of results matched in redis, which is >= len(records) + +for _, v := range records { + fmt.Println(v.Str) // print "foo" +} +``` + +### Object Mapping Limitation + +`NewHashRepository` only accepts these field types: +* `string`, `*string` +* `int64`, `*int64` +* `bool`, `*bool` +* `[]byte` + +Field projection by RediSearch is not supported. + +## OpenTelemetry Tracing + +Use `rueidisotel.WithClient` to create a client with OpenTelemetry Tracing enabled. + +```golang +package main + +import ( + "github.com/rueian/rueidis" + "github.com/rueian/rueidis/rueidisotel" +) + +func main() { + client, err := rueidis.NewClient(rueidis.ClientOption{InitAddress: []string{"127.0.0.1:6379"}}) + if err != nil { + panic(err) + } + client = rueidisotel.WithClient(client) + defer client.Close() +} +``` + +## Hooks and Other Observability Integration + +In addition to `rueidisotel`, `rueidishook` provides a general hook mechanism for users to intercept `rueidis.Client` interface. + +See [rueidishook](./rueidishook) for more details. + +## Distributed Locks with client side caching + +See [rueidislock](./rueidislock) for more details. + +## Writing tests by mocking rueidis + +See [mock](./mock) for more details. + +## Command Response Cheatsheet + +It is hard to remember what message type is returned from redis and which parsing method should be used with. So, here is some common examples: + +```golang +// GET +client.Do(ctx, client.B().Get().Key("k").Build()).ToString() +client.Do(ctx, client.B().Get().Key("k").Build()).AsInt64() +// MGET +client.Do(ctx, client.B().Mget().Key("k1", "k2").Build()).ToArray() +// SET +client.Do(ctx, client.B().Set().Key("k").Value("v").Build()).Error() +// INCR +client.Do(ctx, client.B().Incr().Key("k").Build()).AsInt64() +// HGET +client.Do(ctx, client.B().Hget().Key("k").Field("f").Build()).ToString() +// HMGET +client.Do(ctx, client.B().Hmget().Key("h").Field("a", "b").Build()).ToArray() +// HGETALL +client.Do(ctx, client.B().Hgetall().Key("h").Build()).AsStrMap() +// ZRANGE +client.Do(ctx, client.B().Zrange().Key("k").Min("1").Max("2").Build()).AsStrSlice() +// ZRANK +client.Do(ctx, client.B().Zrank().Key("k").Member("m").Build()).AsInt64() +// ZSCORE +client.Do(ctx, client.B().Zscore().Key("k").Member("m").Build()).AsFloat64() +// ZRANGE +client.Do(ctx, client.B().Zrange().Key("k").Min("0").Max("-1").Build()).AsStrSlice() +client.Do(ctx, client.B().Zrange().Key("k").Min("0").Max("-1").Withscores().Build()).AsZScores() +// ZPOPMIN +client.Do(ctx, client.B().Zpopmin().Key("k").Build()).AsZScore() +client.Do(ctx, client.B().Zpopmin().Key("myzset").Count(2).Build()).AsZScores() +// SCARD +client.Do(ctx, client.B().Scard().Key("k").Build()).AsInt64() +// SMEMBERS +client.Do(ctx, client.B().Smembers().Key("k").Build()).AsStrSlice() +// LINDEX +client.Do(ctx, client.B().Lindex().Key("k").Index(0).Build()).ToString() +// LPOP +client.Do(ctx, client.B().Lpop().Key("k").Build()).ToString() +client.Do(ctx, client.B().Lpop().Key("k").Count(2).Build()).AsStrSlice() +``` diff --git a/vendor/github.com/rueian/rueidis/binary.go b/vendor/github.com/rueian/rueidis/binary.go new file mode 100644 index 00000000000..c0aefa9cf6c --- /dev/null +++ b/vendor/github.com/rueian/rueidis/binary.go @@ -0,0 +1,26 @@ +package rueidis + +import ( + "encoding/json" + "unsafe" +) + +// BinaryString convert the provided []byte into a string without copy. It does what strings.Builder.String() does. +// Redis Strings are binary safe, this means that it is safe to store any []byte into Redis directly. +// Users can use this BinaryString helper to insert a []byte as the part of redis command. For example: +// client.B().Set().Key(rueidis.BinaryString([]byte{0})).Value(rueidis.BinaryString([]byte{0})).Build() +// To read back the []byte of the string returned from the Redis, it is recommended to use the RedisMessage.AsReader. +func BinaryString(bs []byte) string { + return *(*string)(unsafe.Pointer(&bs)) +} + +// JSON convert the provided parameter into a JSON string. Users can use this JSON helper to work with RedisJSON commands. +// For example: +// client.B().JsonSet().Key("a").Path("$.myField").Value(rueidis.JSON("str")).Build() +func JSON(in any) string { + bs, err := json.Marshal(in) + if err != nil { + panic(err) + } + return BinaryString(bs) +} diff --git a/vendor/github.com/rueian/rueidis/client.go b/vendor/github.com/rueian/rueidis/client.go new file mode 100644 index 00000000000..ef6e124ad79 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/client.go @@ -0,0 +1,250 @@ +package rueidis + +import ( + "context" + "sync/atomic" + "time" + + "github.com/rueian/rueidis/internal/cmds" +) + +type singleClient struct { + conn conn + stop uint32 + cmd cmds.Builder + retry bool +} + +func newSingleClient(opt *ClientOption, prev conn, connFn connFn) (*singleClient, error) { + if len(opt.InitAddress) == 0 { + return nil, ErrNoAddr + } + + conn := connFn(opt.InitAddress[0], opt) + conn.Override(prev) + if err := conn.Dial(); err != nil { + return nil, err + } + return newSingleClientWithConn(conn, cmds.NewBuilder(cmds.NoSlot), !opt.DisableRetry), nil +} + +func newSingleClientWithConn(conn conn, builder cmds.Builder, retry bool) *singleClient { + return &singleClient{cmd: builder, conn: conn, retry: retry} +} + +func (c *singleClient) B() cmds.Builder { + return c.cmd +} + +func (c *singleClient) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { +retry: + resp = c.conn.Do(ctx, cmd) + if c.retry && cmd.IsReadOnly() && c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + if resp.NonRedisError() == nil { // not recycle cmds if error, since cmds may be used later in pipe. consider recycle them by pipe + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *singleClient) DoMulti(ctx context.Context, multi ...cmds.Completed) (resps []RedisResult) { + if len(multi) == 0 { + return nil + } +retry: + resps = c.conn.DoMulti(ctx, multi...) + if c.retry && allReadOnly(multi) { + for _, resp := range resps { + if c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + } + } + for i, cmd := range multi { + if resps[i].NonRedisError() == nil { + cmds.Put(cmd.CommandSlice()) + } + } + return resps +} + +func (c *singleClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (resps []RedisResult) { + if len(multi) == 0 { + return nil + } +retry: + resps = c.conn.DoMultiCache(ctx, multi...) + if c.retry { + for _, resp := range resps { + if c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + } + } + for i, cmd := range multi { + if err := resps[i].NonRedisError(); err == nil || err == ErrDoCacheAborted { + cmds.Put(cmd.Cmd.CommandSlice()) + } + } + return resps +} + +func (c *singleClient) DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) (resp RedisResult) { +retry: + resp = c.conn.DoCache(ctx, cmd, ttl) + if c.retry && c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + if err := resp.NonRedisError(); err == nil || err == ErrDoCacheAborted { + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *singleClient) Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) (err error) { +retry: + err = c.conn.Receive(ctx, subscribe, fn) + if c.retry { + if _, ok := err.(*RedisError); !ok && c.isRetryable(err, ctx) { + goto retry + } + } + if err == nil { + cmds.Put(subscribe.CommandSlice()) + } + return err +} + +func (c *singleClient) Dedicated(fn func(DedicatedClient) error) (err error) { + wire := c.conn.Acquire() + dsc := &dedicatedSingleClient{cmd: c.cmd, conn: c.conn, wire: wire, retry: c.retry} + err = fn(dsc) + dsc.release() + return err +} + +func (c *singleClient) Dedicate() (DedicatedClient, func()) { + wire := c.conn.Acquire() + dsc := &dedicatedSingleClient{cmd: c.cmd, conn: c.conn, wire: wire, retry: c.retry} + return dsc, dsc.release +} + +func (c *singleClient) Nodes() map[string]Client { + return map[string]Client{c.conn.Addr(): c} +} + +func (c *singleClient) Close() { + atomic.StoreUint32(&c.stop, 1) + c.conn.Close() +} + +type dedicatedSingleClient struct { + conn conn + wire wire + mark uint32 + cmd cmds.Builder + + retry bool +} + +func (c *dedicatedSingleClient) B() cmds.Builder { + return c.cmd +} + +func (c *dedicatedSingleClient) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { +retry: + resp = c.wire.Do(ctx, cmd) + if c.retry && cmd.IsReadOnly() && isRetryable(resp.NonRedisError(), c.wire, ctx) { + goto retry + } + if resp.NonRedisError() == nil { + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *dedicatedSingleClient) DoMulti(ctx context.Context, multi ...cmds.Completed) (resp []RedisResult) { + if len(multi) == 0 { + return nil + } + retryable := c.retry + if retryable { + retryable = allReadOnly(multi) + } +retry: + resp = c.wire.DoMulti(ctx, multi...) + if retryable && anyRetryable(resp, c.wire, ctx) { + goto retry + } + for i, cmd := range multi { + if resp[i].NonRedisError() == nil { + cmds.Put(cmd.CommandSlice()) + } + } + return resp +} + +func (c *dedicatedSingleClient) Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) (err error) { +retry: + err = c.wire.Receive(ctx, subscribe, fn) + if c.retry { + if _, ok := err.(*RedisError); !ok && isRetryable(err, c.wire, ctx) { + goto retry + } + } + if err == nil { + cmds.Put(subscribe.CommandSlice()) + } + return err +} + +func (c *dedicatedSingleClient) SetPubSubHooks(hooks PubSubHooks) <-chan error { + return c.wire.SetPubSubHooks(hooks) +} + +func (c *dedicatedSingleClient) Close() { + c.wire.Close() + c.release() +} + +func (c *dedicatedSingleClient) release() { + if atomic.CompareAndSwapUint32(&c.mark, 0, 1) { + c.conn.Store(c.wire) + } +} + +func (c *singleClient) isRetryable(err error, ctx context.Context) bool { + return err != nil && atomic.LoadUint32(&c.stop) == 0 && ctx.Err() == nil +} + +func isRetryable(err error, w wire, ctx context.Context) bool { + return err != nil && w.Error() == nil && ctx.Err() == nil +} + +func anyRetryable(resp []RedisResult, w wire, ctx context.Context) bool { + for _, r := range resp { + if isRetryable(r.NonRedisError(), w, ctx) { + return true + } + } + return false +} + +func allReadOnly(multi []cmds.Completed) bool { + for _, cmd := range multi { + if cmd.IsWrite() { + return false + } + } + return true +} + +func allSameSlot(multi []cmds.Completed) bool { + for i := 1; i < len(multi); i++ { + if multi[0].Slot() != multi[i].Slot() { + return false + } + } + return true +} diff --git a/vendor/github.com/rueian/rueidis/cluster.go b/vendor/github.com/rueian/rueidis/cluster.go new file mode 100644 index 00000000000..6011caa4ef6 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/cluster.go @@ -0,0 +1,764 @@ +package rueidis + +import ( + "context" + "errors" + "fmt" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/rueian/rueidis/internal/cmds" + "github.com/rueian/rueidis/internal/util" +) + +// ErrNoSlot indicates that there is no redis node owns the key slot. +var ErrNoSlot = errors.New("the slot has no redis node") + +type clusterClient struct { + slots [16384]conn + opt *ClientOption + conns map[string]conn + connFn connFn + sc call + mu sync.RWMutex + stop uint32 + cmd cmds.Builder + retry bool +} + +func newClusterClient(opt *ClientOption, connFn connFn) (client *clusterClient, err error) { + client = &clusterClient{ + cmd: cmds.NewBuilder(cmds.InitSlot), + opt: opt, + connFn: connFn, + conns: make(map[string]conn), + retry: !opt.DisableRetry, + } + + if err = client.init(); err != nil { + return nil, err + } + + if err = client.refresh(); err != nil { + return client, err + } + + return client, nil +} + +func (c *clusterClient) init() error { + if len(c.opt.InitAddress) == 0 { + return ErrNoAddr + } + results := make(chan error, len(c.opt.InitAddress)) + for _, addr := range c.opt.InitAddress { + cc := c.connFn(addr, c.opt) + go func(addr string, cc conn) { + if err := cc.Dial(); err == nil { + c.mu.Lock() + if _, ok := c.conns[addr]; ok { + go cc.Close() // abort the new connection instead of closing the old one which may already been used + } else { + c.conns[addr] = cc + } + c.mu.Unlock() + results <- nil + } else { + results <- err + } + }(addr, cc) + } + es := make([]error, cap(results)) + for i := 0; i < cap(results); i++ { + if err := <-results; err == nil { + return nil + } else { + es[i] = err + } + } + return es[0] +} + +func (c *clusterClient) refresh() (err error) { + return c.sc.Do(c._refresh) +} + +func (c *clusterClient) _refresh() (err error) { + var reply RedisMessage + +retry: + c.mu.RLock() + results := make(chan RedisResult, len(c.conns)) + for _, cc := range c.conns { + go func(c conn) { results <- c.Do(context.Background(), cmds.SlotCmd) }(cc) + } + c.mu.RUnlock() + + for i := 0; i < cap(results); i++ { + if reply, err = (<-results).ToMessage(); len(reply.values) != 0 { + break + } + } + + if err != nil { + return err + } + + if len(reply.values) == 0 { + if err = c.init(); err != nil { + return err + } + goto retry + } + + groups := parseSlots(reply) + + // TODO support read from replicas + conns := make(map[string]conn, len(groups)) + for _, g := range groups { + for _, addr := range g.nodes { + conns[addr] = c.connFn(addr, c.opt) + } + } + // make sure InitAddress always be present + for _, addr := range c.opt.InitAddress { + if _, ok := conns[addr]; !ok { + conns[addr] = c.connFn(addr, c.opt) + } + } + + var removes []conn + + c.mu.RLock() + for addr, cc := range c.conns { + if _, ok := conns[addr]; ok { + conns[addr] = cc + } else { + removes = append(removes, cc) + } + } + c.mu.RUnlock() + + slots := [16384]conn{} + for master, g := range groups { + cc := conns[master] + for _, slot := range g.slots { + for i := slot[0]; i <= slot[1]; i++ { + slots[i] = cc + } + } + } + + c.mu.Lock() + c.slots = slots + c.conns = conns + c.mu.Unlock() + + for _, cc := range removes { + go cc.Close() + } + + return nil +} + +func (c *clusterClient) single() conn { + return c._pick(cmds.InitSlot) +} + +func (c *clusterClient) nodes() []string { + c.mu.RLock() + nodes := make([]string, 0, len(c.conns)) + for addr := range c.conns { + nodes = append(nodes, addr) + } + c.mu.RUnlock() + return nodes +} + +type group struct { + nodes []string + slots [][2]int64 +} + +func parseSlots(slots RedisMessage) map[string]group { + groups := make(map[string]group, len(slots.values)) + for _, v := range slots.values { + master := fmt.Sprintf("%s:%d", v.values[2].values[0].string, v.values[2].values[1].integer) + g, ok := groups[master] + if !ok { + g.slots = make([][2]int64, 0) + g.nodes = make([]string, 0, len(v.values)-2) + for i := 2; i < len(v.values); i++ { + dst := fmt.Sprintf("%s:%d", v.values[i].values[0].string, v.values[i].values[1].integer) + g.nodes = append(g.nodes, dst) + } + } + g.slots = append(g.slots, [2]int64{v.values[0].integer, v.values[1].integer}) + groups[master] = g + } + return groups +} + +func (c *clusterClient) _pick(slot uint16) (p conn) { + c.mu.RLock() + if slot == cmds.InitSlot { + for _, cc := range c.conns { + p = cc + break + } + } else { + p = c.slots[slot] + } + c.mu.RUnlock() + return p +} + +func (c *clusterClient) pick(slot uint16) (p conn, err error) { + if p = c._pick(slot); p == nil { + if err := c.refresh(); err != nil { + return nil, err + } + if p = c._pick(slot); p == nil { + return nil, ErrNoSlot + } + } + return p, nil +} + +func (c *clusterClient) redirectOrNew(addr string) (p conn) { + c.mu.RLock() + p = c.conns[addr] + c.mu.RUnlock() + if p != nil && p.Addr() != addr { + return p + } + c.mu.Lock() + if p = c.conns[addr]; p == nil { + p = c.connFn(addr, c.opt) + c.conns[addr] = p + } else if p.Addr() == addr { + // try reconnection if the MOVED redirects to the same host, + // because the same hostname may actually be resolved into another destination + // depending on the fail-over implementation. ex: AWS MemoryDB's resize process. + go p.Close() + p = c.connFn(addr, c.opt) + c.conns[addr] = p + } + c.mu.Unlock() + return p +} + +func (c *clusterClient) B() cmds.Builder { + return c.cmd +} + +func (c *clusterClient) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { +retry: + cc, err := c.pick(cmd.Slot()) + if err != nil { + resp = newErrResult(err) + goto ret + } + resp = cc.Do(ctx, cmd) +process: + switch addr, mode := c.shouldRefreshRetry(resp.Error(), ctx); mode { + case RedirectMove: + resp = c.redirectOrNew(addr).Do(ctx, cmd) + goto process + case RedirectAsk: + resp = c.redirectOrNew(addr).DoMulti(ctx, cmds.AskingCmd, cmd)[1] + goto process + case RedirectRetry: + if c.retry && cmd.IsReadOnly() { + runtime.Gosched() + goto retry + } + } +ret: + if resp.NonRedisError() == nil { // not recycle cmds if error, since cmds may be used later in pipe. consider recycle them by pipe + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *clusterClient) DoMulti(ctx context.Context, multi ...cmds.Completed) (results []RedisResult) { + if len(multi) == 0 { + return nil + } + results = make([]RedisResult, len(multi)) + slots := make(map[uint16]int, 16) + for _, cmd := range multi { + slots[cmd.Slot()]++ + } + if slots[cmds.InitSlot] > 0 && len(slots) > 2 { + panic(panicMixCxSlot) + } + commands := make(map[uint16][]cmds.Completed, len(slots)) + cIndexes := make(map[uint16][]int, len(slots)) + if len(slots) == 2 && slots[cmds.InitSlot] > 0 { + delete(slots, cmds.InitSlot) + for slot := range slots { + commands[slot] = make([]cmds.Completed, 0, len(multi)+2) + commands[slot] = append(commands[slot], cmds.MultiCmd) + commands[slot] = append(commands[slot], multi...) + commands[slot] = append(commands[slot], cmds.ExecCmd) + } + } else { + for slot, count := range slots { + cIndexes[slot] = make([]int, 0, count) + commands[slot] = make([]cmds.Completed, 0, count+2) + commands[slot] = append(commands[slot], cmds.MultiCmd) + } + for i, cmd := range multi { + slot := cmd.Slot() + commands[slot] = append(commands[slot], cmd) + cIndexes[slot] = append(cIndexes[slot], i) + } + for slot := range slots { + commands[slot] = append(commands[slot], cmds.ExecCmd) + } + } + + util.ParallelKeys(commands, func(slot uint16) { + c.doMulti(ctx, slot, commands[slot], cIndexes[slot], results) + }) + + for i, cmd := range multi { + if results[i].NonRedisError() == nil { + cmds.Put(cmd.CommandSlice()) + } + } + return results +} + +func fillErrs(idx []int, results []RedisResult, err error) { + if idx == nil { + for i := range results { + results[i] = newErrResult(err) + } + } else { + for _, i := range idx { + results[i] = newErrResult(err) + } + } +} + +func (c *clusterClient) doMulti(ctx context.Context, slot uint16, multi []cmds.Completed, idx []int, results []RedisResult) { +retry: + cc, err := c.pick(slot) + if err != nil { + fillErrs(idx, results, err) + return + } + resps := cc.DoMulti(ctx, multi...) +process: + for _, resp := range resps { + switch addr, mode := c.shouldRefreshRetry(resp.Error(), ctx); mode { + case RedirectMove: + resps = c.redirectOrNew(addr).DoMulti(ctx, multi...) + goto process + case RedirectAsk: + resps = c.redirectOrNew(addr).DoMulti(ctx, append([]cmds.Completed{cmds.AskingCmd}, multi...)...)[1:] + goto process + case RedirectRetry: + if c.retry && allReadOnly(multi[1:len(multi)-1]) { + runtime.Gosched() + goto retry + } + } + } + msgs, err := resps[len(resps)-1].ToArray() + if err != nil { + fillErrs(idx, results, err) + return + } + if idx == nil { + for i, msg := range msgs { + results[i] = newResult(msg, nil) + } + } else { + for i, msg := range msgs { + results[idx[i]] = newResult(msg, nil) + } + } +} + +func (c *clusterClient) doCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) (resp RedisResult) { +retry: + cc, err := c.pick(cmd.Slot()) + if err != nil { + return newErrResult(err) + } + resp = cc.DoCache(ctx, cmd, ttl) +process: + switch addr, mode := c.shouldRefreshRetry(resp.Error(), ctx); mode { + case RedirectMove: + resp = c.redirectOrNew(addr).DoCache(ctx, cmd, ttl) + goto process + case RedirectAsk: + // TODO ASKING OPT-IN Caching + resp = c.redirectOrNew(addr).DoMulti(ctx, cmds.AskingCmd, cmds.Completed(cmd))[1] + goto process + case RedirectRetry: + if c.retry { + runtime.Gosched() + goto retry + } + } + return resp +} + +func (c *clusterClient) DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) (resp RedisResult) { + resp = c.doCache(ctx, cmd, ttl) + if err := resp.NonRedisError(); err == nil || err == ErrDoCacheAborted { + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *clusterClient) doMultiCache(ctx context.Context, slot uint16, multi []CacheableTTL, idx []int, results []RedisResult) { +retry: + cc, err := c.pick(slot) + if err != nil { + fillErrs(idx, results, err) + return + } + resps := cc.DoMultiCache(ctx, multi...) +process: + for _, resp := range resps { + switch addr, mode := c.shouldRefreshRetry(resp.Error(), ctx); mode { + case RedirectMove: + resps = c.redirectOrNew(addr).DoMultiCache(ctx, multi...) + goto process + case RedirectAsk: + commands := make([]cmds.Completed, 0, len(multi)+3) + commands = append(commands, cmds.AskingCmd, cmds.MultiCmd) + for _, cmd := range multi { + commands = append(commands, cmds.Completed(cmd.Cmd)) + } + commands = append(commands, cmds.ExecCmd) + if asked, err := c.redirectOrNew(addr).DoMulti(ctx, commands...)[len(commands)-1].ToArray(); err != nil { + for i := range resps { + resps[i] = newErrResult(err) + } + } else { + for i, ret := range asked { + resps[i] = newResult(ret, nil) + } + } + goto process + case RedirectRetry: + if c.retry { + runtime.Gosched() + goto retry + } + } + } + if idx == nil { + copy(results, resps) + } else { + for i, resp := range resps { + results[idx[i]] = resp + } + } +} + +func (c *clusterClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (results []RedisResult) { + if len(multi) == 0 { + return nil + } + results = make([]RedisResult, len(multi)) + slots := make(map[uint16]int, 16) + for _, cmd := range multi { + slots[cmd.Cmd.Slot()]++ + } + commands := make(map[uint16][]CacheableTTL, len(slots)) + cIndexes := make(map[uint16][]int, len(slots)) + if len(slots) == 1 { + for slot := range slots { + commands[slot] = multi + } + } else { + for slot, count := range slots { + cIndexes[slot] = make([]int, 0, count) + commands[slot] = make([]CacheableTTL, 0, count) + } + for i, cmd := range multi { + slot := cmd.Cmd.Slot() + commands[slot] = append(commands[slot], cmd) + cIndexes[slot] = append(cIndexes[slot], i) + } + } + + util.ParallelKeys(commands, func(slot uint16) { + c.doMultiCache(ctx, slot, commands[slot], cIndexes[slot], results) + }) + + for i, cmd := range multi { + if err := results[i].NonRedisError(); err == nil || err == ErrDoCacheAborted { + cmds.Put(cmd.Cmd.CommandSlice()) + } + } + return results +} + +func (c *clusterClient) Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) (err error) { +retry: + cc, err := c.pick(subscribe.Slot()) + if err != nil { + goto ret + } + err = cc.Receive(ctx, subscribe, fn) + if _, mode := c.shouldRefreshRetry(err, ctx); c.retry && mode != RedirectNone { + runtime.Gosched() + goto retry + } +ret: + if err == nil { + cmds.Put(subscribe.CommandSlice()) + } + return err +} + +func (c *clusterClient) Dedicated(fn func(DedicatedClient) error) (err error) { + dcc := &dedicatedClusterClient{cmd: c.cmd, client: c, slot: cmds.NoSlot, retry: c.retry} + err = fn(dcc) + dcc.release() + return err +} + +func (c *clusterClient) Dedicate() (DedicatedClient, func()) { + dcc := &dedicatedClusterClient{cmd: c.cmd, client: c, slot: cmds.NoSlot, retry: c.retry} + return dcc, dcc.release +} + +func (c *clusterClient) Nodes() map[string]Client { + c.mu.RLock() + nodes := make(map[string]Client, len(c.conns)) + for addr, conn := range c.conns { + nodes[addr] = newSingleClientWithConn(conn, c.cmd, c.retry) + } + c.mu.RUnlock() + return nodes +} + +func (c *clusterClient) Close() { + atomic.StoreUint32(&c.stop, 1) + c.mu.RLock() + for _, cc := range c.conns { + go cc.Close() + } + c.mu.RUnlock() +} + +func (c *clusterClient) shouldRefreshRetry(err error, ctx context.Context) (addr string, mode RedirectMode) { + if err != nil && atomic.LoadUint32(&c.stop) == 0 { + if err, ok := err.(*RedisError); ok { + if addr, ok = err.IsMoved(); ok { + mode = RedirectMove + } else if addr, ok = err.IsAsk(); ok { + mode = RedirectAsk + } else if err.IsClusterDown() || err.IsTryAgain() { + mode = RedirectRetry + } + } else { + mode = RedirectRetry + } + if mode != RedirectNone { + go c.refresh() + } + if mode == RedirectRetry && ctx.Err() != nil { + mode = RedirectNone + } + } + return +} + +type dedicatedClusterClient struct { + client *clusterClient + conn conn + wire wire + pshks *pshks + + mu sync.Mutex + cmd cmds.Builder + slot uint16 + mark bool + retry bool +} + +func (c *dedicatedClusterClient) acquire(slot uint16) (wire wire, err error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.slot == cmds.NoSlot { + if slot == cmds.NoSlot { + panic(panicMsgNoSlot) + } + c.slot = slot + } else if c.slot != slot && slot != cmds.NoSlot { + panic(panicMsgCxSlot) + } + if c.wire != nil { + return c.wire, nil + } + if c.conn, err = c.client.pick(c.slot); err != nil { + if p := c.pshks; p != nil { + c.pshks = nil + p.close <- err + close(p.close) + } + return nil, err + } + c.wire = c.conn.Acquire() + if p := c.pshks; p != nil { + c.pshks = nil + ch := c.wire.SetPubSubHooks(p.hooks) + go func(ch <-chan error) { + for e := range ch { + p.close <- e + } + close(p.close) + }(ch) + } + return c.wire, nil +} + +func (c *dedicatedClusterClient) release() { + c.mu.Lock() + if !c.mark { + if p := c.pshks; p != nil { + c.pshks = nil + close(p.close) + } + if c.wire != nil { + c.conn.Store(c.wire) + } + } + c.mark = true + c.mu.Unlock() +} + +func (c *dedicatedClusterClient) B() cmds.Builder { + return c.cmd +} + +func (c *dedicatedClusterClient) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { +retry: + if w, err := c.acquire(cmd.Slot()); err != nil { + resp = newErrResult(err) + } else { + resp = w.Do(ctx, cmd) + switch _, mode := c.client.shouldRefreshRetry(resp.Error(), ctx); mode { + case RedirectRetry: + if c.retry && cmd.IsReadOnly() && w.Error() == nil { + runtime.Gosched() + goto retry + } + } + } + if resp.NonRedisError() == nil { + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *dedicatedClusterClient) DoMulti(ctx context.Context, multi ...cmds.Completed) (resp []RedisResult) { + if len(multi) == 0 { + return nil + } + if !allSameSlot(multi) { + panic(panicMsgCxSlot) + } + retryable := c.retry + if retryable { + retryable = allReadOnly(multi) + } +retry: + if w, err := c.acquire(multi[0].Slot()); err == nil { + resp = w.DoMulti(ctx, multi...) + for _, r := range resp { + _, mode := c.client.shouldRefreshRetry(r.Error(), ctx) + if mode == RedirectRetry && retryable && w.Error() == nil { + runtime.Gosched() + goto retry + } + if mode != RedirectNone { + break + } + } + } else { + resp = make([]RedisResult, len(multi)) + for i := range resp { + resp[i] = newErrResult(err) + } + } + for i, cmd := range multi { + if resp[i].NonRedisError() == nil { + cmds.Put(cmd.CommandSlice()) + } + } + return resp +} + +func (c *dedicatedClusterClient) Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) (err error) { + var w wire +retry: + if w, err = c.acquire(subscribe.Slot()); err == nil { + err = w.Receive(ctx, subscribe, fn) + if _, mode := c.client.shouldRefreshRetry(err, ctx); c.retry && mode == RedirectRetry && w.Error() == nil { + runtime.Gosched() + goto retry + } + } + if err == nil { + cmds.Put(subscribe.CommandSlice()) + } + return err +} + +func (c *dedicatedClusterClient) SetPubSubHooks(hooks PubSubHooks) <-chan error { + c.mu.Lock() + defer c.mu.Unlock() + if p := c.pshks; p != nil { + c.pshks = nil + close(p.close) + } + if c.wire != nil { + return c.wire.SetPubSubHooks(hooks) + } + if hooks.isZero() { + return nil + } + ch := make(chan error, 1) + c.pshks = &pshks{hooks: hooks, close: ch} + return ch +} + +func (c *dedicatedClusterClient) Close() { + c.mu.Lock() + if p := c.pshks; p != nil { + c.pshks = nil + p.close <- ErrClosing + close(p.close) + } + if c.wire != nil { + c.wire.Close() + } + c.mu.Unlock() + c.release() +} + +type RedirectMode int + +const ( + RedirectNone RedirectMode = iota + RedirectMove + RedirectAsk + RedirectRetry + + panicMsgCxSlot = "cross slot command in Dedicated is prohibited" + panicMixCxSlot = "Mixing no-slot and cross slot commands in DoMulti is prohibited" + panicMsgNoSlot = "the first command should contain the slot key" +) diff --git a/vendor/github.com/rueian/rueidis/cmds.go b/vendor/github.com/rueian/rueidis/cmds.go new file mode 100644 index 00000000000..a581bc03ebd --- /dev/null +++ b/vendor/github.com/rueian/rueidis/cmds.go @@ -0,0 +1,19 @@ +package rueidis + +import "github.com/rueian/rueidis/internal/cmds" + +// Commands is an exported alias to []cmds.Completed. +// This allows users to store commands for later usage, for example: +// c, release := client.Dedicate() +// defer release() +// +// cmds := make(rueidis.Commands, 0, 10) +// for i := 0; i < 10; i++ { +// cmds = append(cmds, c.B().Set().Key(strconv.Itoa(i)).Value(strconv.Itoa(i)).Build()) +// } +// for _, resp := range c.DoMulti(ctx, cmds...) { +// if err := resp.Error(); err != nil { +// panic(err) +// } +// However, please know that once commands are processed by the Do() or DoMulti(), they are recycled and should not be reused. +type Commands []cmds.Completed diff --git a/vendor/github.com/rueian/rueidis/codecov.yml b/vendor/github.com/rueian/rueidis/codecov.yml new file mode 100644 index 00000000000..410f064be25 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/codecov.yml @@ -0,0 +1,4 @@ +coverage: + status: + project: false + patch: false diff --git a/vendor/github.com/rueian/rueidis/docker-compose.yml b/vendor/github.com/rueian/rueidis/docker-compose.yml new file mode 100644 index 00000000000..af10c028cea --- /dev/null +++ b/vendor/github.com/rueian/rueidis/docker-compose.yml @@ -0,0 +1,93 @@ +version: "3.8" + +services: + redis: + image: redis:7.0.5-alpine + ports: + - "6379:6379" + redislock: + image: redis:7.0.5-alpine + ports: + - "6376:6379" + redis5: + image: redis:5-alpine + ports: + - "6355:6379" + keydb6: + image: eqalpha/keydb:alpine_x86_64_v6.3.1 + ports: + - "6344:6379" + dragonflydb: + image: docker.dragonflydb.io/dragonflydb/dragonfly:v0.9.1 + ports: + - "6333:6379" + kvrocks: + image: apache/kvrocks:2.2.0 + ports: + - "6666:6666" + redisearch: + image: redislabs/redisearch:2.4.9 + ports: + - "6377:6379" + compat: + image: redis:7.0.5-alpine + ports: + - "6378:6379" + compat5: + image: redis:5-alpine + ports: + - "6356:6379" + sentinel: + image: redis:7.0.5-alpine + entrypoint: + - /bin/sh + - -c + - | + redis-server --save "" --appendonly no --port 6380 & + echo "sentinel monitor test 127.0.0.1 6380 2\n" > sentinel.conf + redis-server sentinel.conf --sentinel + ports: + - "6380:6380" + - "26379:26379" + sentinel5: + image: redis:5-alpine + entrypoint: + - /bin/sh + - -c + - | + redis-server --save "" --appendonly no --port 6385 & + echo "sentinel monitor test5 127.0.0.1 6385 2\n" > sentinel.conf + redis-server sentinel.conf --sentinel + ports: + - "6385:6385" + - "26355:26379" + cluster: + image: redis:7.0.5-alpine + entrypoint: + - /bin/sh + - -c + - | + redis-server --port 7001 --save "" --appendonly no --cluster-enabled yes --cluster-config-file 7001.conf & + redis-server --port 7002 --save "" --appendonly no --cluster-enabled yes --cluster-config-file 7002.conf & + redis-server --port 7003 --save "" --appendonly no --cluster-enabled yes --cluster-config-file 7003.conf & + while ! redis-cli --cluster create 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 --cluster-yes; do sleep 1; done + wait + ports: + - "7001:7001" + - "7002:7002" + - "7003:7003" + cluster5: + image: redis:5-alpine + entrypoint: + - /bin/sh + - -c + - | + redis-server --port 7004 --save "" --appendonly no --cluster-enabled yes --cluster-config-file 7004.conf & + redis-server --port 7005 --save "" --appendonly no --cluster-enabled yes --cluster-config-file 7005.conf & + redis-server --port 7006 --save "" --appendonly no --cluster-enabled yes --cluster-config-file 7006.conf & + while ! redis-cli --cluster create 127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006 --cluster-yes; do sleep 1; done + wait + ports: + - "7004:7004" + - "7005:7005" + - "7006:7006" \ No newline at end of file diff --git a/vendor/github.com/rueian/rueidis/dockertest.sh b/vendor/github.com/rueian/rueidis/dockertest.sh new file mode 100644 index 00000000000..cb974a73d90 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/dockertest.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -ev + +if [ ! -z "$CC_TEST_REPORTER_ID" ]; then + curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-$(go env GOOS)-$(go env GOARCH) > ./cc-test-reporter + chmod +x ./cc-test-reporter + ./cc-test-reporter before-build +fi + +docker-compose up -d +go test -coverprofile=./c.out -v -race -timeout 30m ./... +docker-compose down -v + +if [ ! -z "$CC_TEST_REPORTER_ID" ]; then + ./cc-test-reporter after-build -p $(go list -m) +fi + +if [ ! -z "$CODECOV_TOKEN" ]; then + cp c.out coverage.txt + bash <(curl -s https://codecov.io/bash) +fi \ No newline at end of file diff --git a/vendor/github.com/rueian/rueidis/helper.go b/vendor/github.com/rueian/rueidis/helper.go new file mode 100644 index 00000000000..07677da4318 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/helper.go @@ -0,0 +1,70 @@ +package rueidis + +import ( + "context" + "sync" + "time" + + "github.com/rueian/rueidis/internal/cmds" + "github.com/rueian/rueidis/internal/util" +) + +// MGetCache is a helper that consults the client-side caches with multiple keys by grouping keys within same slot into MGETs +func MGetCache(client Client, ctx context.Context, ttl time.Duration, keys []string) (ret map[string]RedisMessage, err error) { + if len(keys) == 0 { + return make(map[string]RedisMessage), nil + } + ret = make(map[string]RedisMessage, len(keys)) + if cc, ok := client.(*clusterClient); ok { + return clusterMGetCache(cc, ctx, ttl, cmds.MGets(keys), keys) + } + return clientMGetCache(client, ctx, ttl, client.B().Mget().Key(keys...).Cache(), keys) +} + +// JsonMGetCache is a helper that consults the client-side caches with multiple keys by grouping keys within same slot into JSON.MGETs +func JsonMGetCache(client Client, ctx context.Context, ttl time.Duration, keys []string, path string) (ret map[string]RedisMessage, err error) { + if len(keys) == 0 { + return make(map[string]RedisMessage), nil + } + if cc, ok := client.(*clusterClient); ok { + return clusterMGetCache(cc, ctx, ttl, cmds.JsonMGets(keys, path), keys) + } + return clientMGetCache(client, ctx, ttl, client.B().JsonMget().Key(keys...).Path(path).Cache(), keys) +} + +func clientMGetCache(client Client, ctx context.Context, ttl time.Duration, cmd cmds.Cacheable, keys []string) (ret map[string]RedisMessage, err error) { + arr, err := client.DoCache(ctx, cmd, ttl).ToArray() + if err != nil { + return nil, err + } + ret = make(map[string]RedisMessage, len(keys)) + for i, resp := range arr { + ret[keys[i]] = resp + } + return ret, nil +} + +func clusterMGetCache(cc *clusterClient, ctx context.Context, ttl time.Duration, mgets map[uint16]cmds.Completed, keys []string) (ret map[string]RedisMessage, err error) { + var mu sync.Mutex + ret = make(map[string]RedisMessage, len(keys)) + util.ParallelVals(mgets, func(cmd cmds.Completed) { + c := cmds.Cacheable(cmd) + arr, err2 := cc.doCache(ctx, c, ttl).ToArray() + mu.Lock() + if err2 != nil { + err = err2 + } else { + for i, resp := range arr { + ret[c.MGetCacheKey(i)] = resp + } + } + mu.Unlock() + }) + if err != nil { + return nil, err + } + for _, mget := range mgets { // not recycle cmds if error, since cmds may be used later in pipe. consider recycle them by pipe + cmds.Put(mget.CommandSlice()) + } + return ret, nil +} diff --git a/vendor/github.com/rueian/rueidis/internal/cmds/builder.go b/vendor/github.com/rueian/rueidis/internal/cmds/builder.go new file mode 100644 index 00000000000..3f9f6597a57 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/internal/cmds/builder.go @@ -0,0 +1,119 @@ +package cmds + +import ( + "strings" + "sync" +) + +var pool = &sync.Pool{New: newCommandSlice} + +// CommandSlice is the command container managed by the sync.Pool +type CommandSlice struct { + s []string +} + +func newCommandSlice() interface{} { + return &CommandSlice{s: make([]string, 0, 2)} +} + +// NewBuilder creates a Builder and initializes the internal sync.Pool +func NewBuilder(initSlot uint16) Builder { + return Builder{ks: initSlot} +} + +// Builder builds commands by reusing CommandSlice from the sync.Pool +type Builder struct { + ks uint16 +} + +func get() *CommandSlice { + return pool.Get().(*CommandSlice) +} + +// Put recycles the CommandSlice +func Put(cs *CommandSlice) { + cs.s = cs.s[:0] + pool.Put(cs) +} + +// Arbitrary allows user to build an arbitrary redis command with Builder.Arbitrary +type Arbitrary Completed + +// Arbitrary allows user to build an arbitrary redis command by following Arbitrary.Keys and Arbitrary.Args +func (b Builder) Arbitrary(token ...string) (c Arbitrary) { + c = Arbitrary{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, token...) + return c +} + +// Keys calculate which key slot the command belongs to. +// Users must use Keys to construct the key part of the command, otherwise +// the command will not be sent to correct redis node. +func (c Arbitrary) Keys(keys ...string) Arbitrary { + if c.ks&NoSlot == NoSlot { + for _, k := range keys { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range keys { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, keys...) + return c +} + +// Args is used to construct non-key parts of the command. +func (c Arbitrary) Args(args ...string) Arbitrary { + c.cs.s = append(c.cs.s, args...) + return c +} + +// Build is used to complete constructing a command +func (c Arbitrary) Build() Completed { + if len(c.cs.s) == 0 || len(c.cs.s[0]) == 0 { + panic(arbitraryNoCommand) + } + if strings.HasSuffix(strings.ToUpper(c.cs.s[0]), "SUBSCRIBE") { + panic(arbitrarySubscribe) + } + return Completed(c) +} + +// Blocking is used to complete constructing a command and mark it as blocking command. +// Blocking command will occupy a connection from a separated connection pool. +func (c Arbitrary) Blocking() Completed { + c.cf = blockTag + return c.Build() +} + +// ReadOnly is used to complete constructing a command and mark it as readonly command. +// ReadOnly will be retried under network issues. +func (c Arbitrary) ReadOnly() Completed { + c.cf = readonly + return c.Build() +} + +// MultiGet is used to complete constructing a command and mark it as mtGetTag command. +func (c Arbitrary) MultiGet() Completed { + if len(c.cs.s) == 0 || len(c.cs.s[0]) == 0 { + panic(arbitraryNoCommand) + } + if c.cs.s[0] != "MGET" && c.cs.s[0] != "JSON.MGET" { + panic(arbitraryMultiGet) + } + c.cf = mtGetTag + return c.Build() +} + +// IsZero is used to test if Arbitrary is initialized +func (c Arbitrary) IsZero() bool { + return c.cs == nil +} + +var ( + arbitraryNoCommand = "Arbitrary should be provided with redis command" + arbitrarySubscribe = "Arbitrary does not support SUBSCRIBE/UNSUBSCRIBE" + arbitraryMultiGet = "Arbitrary.MultiGet is only valid for MGET and JSON.MGET" +) diff --git a/vendor/github.com/rueian/rueidis/internal/cmds/cmds.go b/vendor/github.com/rueian/rueidis/internal/cmds/cmds.go new file mode 100644 index 00000000000..72be554d76a --- /dev/null +++ b/vendor/github.com/rueian/rueidis/internal/cmds/cmds.go @@ -0,0 +1,263 @@ +package cmds + +import "strings" + +const ( + optInTag = uint16(1 << 15) + blockTag = uint16(1 << 14) + readonly = uint16(1 << 13) + noRetTag = uint16(1<<12) | readonly // make noRetTag can also be retried + mtGetTag = uint16(1<<11) | readonly // make mtGetTag can also be retried + // InitSlot indicates that the command be sent to any redis node in cluster + InitSlot = uint16(1 << 14) + // NoSlot indicates that the command has no key slot specified + NoSlot = uint16(1 << 15) +) + +var ( + // OptInCmd is predefined CLIENT CACHING YES + OptInCmd = Completed{ + cs: &CommandSlice{s: []string{"CLIENT", "CACHING", "YES"}}, + cf: optInTag, + } + // MultiCmd is predefined MULTI + MultiCmd = Completed{ + cs: &CommandSlice{s: []string{"MULTI"}}, + } + // ExecCmd is predefined EXEC + ExecCmd = Completed{ + cs: &CommandSlice{s: []string{"EXEC"}}, + } + // RoleCmd is predefined ROLE + RoleCmd = Completed{ + cs: &CommandSlice{s: []string{"ROLE"}}, + } + // QuitCmd is predefined QUIT + QuitCmd = Completed{ + cs: &CommandSlice{s: []string{"QUIT"}}, + } + // UnsubscribeCmd is predefined UNSUBSCRIBE + UnsubscribeCmd = Completed{ + cs: &CommandSlice{s: []string{"UNSUBSCRIBE"}}, + cf: noRetTag, + } + // PUnsubscribeCmd is predefined PUNSUBSCRIBE + PUnsubscribeCmd = Completed{ + cs: &CommandSlice{s: []string{"PUNSUBSCRIBE"}}, + cf: noRetTag, + } + // SUnsubscribeCmd is predefined SUNSUBSCRIBE + SUnsubscribeCmd = Completed{ + cs: &CommandSlice{s: []string{"SUNSUBSCRIBE"}}, + cf: noRetTag, + } + // PingCmd is predefined PING + PingCmd = Completed{ + cs: &CommandSlice{s: []string{"PING"}}, + } + // SlotCmd is predefined CLUSTER SLOTS + SlotCmd = Completed{ + cs: &CommandSlice{s: []string{"CLUSTER", "SLOTS"}}, + } + // AskingCmd is predefined CLUSTER ASKING + AskingCmd = Completed{ + cs: &CommandSlice{s: []string{"ASKING"}}, + } + // SentinelSubscribe is predefined SUBSCRIBE ASKING + SentinelSubscribe = Completed{ + cs: &CommandSlice{s: []string{"SUBSCRIBE", "+sentinel", "+switch-master", "+reboot"}}, + cf: noRetTag, + } +) + +// Completed represents a completed Redis command, should be created by the Build() of command builder. +type Completed struct { + cs *CommandSlice + cf uint16 + ks uint16 +} + +// IsEmpty checks if it is an empty command. +func (c *Completed) IsEmpty() bool { + return c.cs == nil || len(c.cs.s) == 0 +} + +// IsOptIn checks if it is client side caching opt-int command. +func (c *Completed) IsOptIn() bool { + return c.cf&optInTag == optInTag +} + +// IsBlock checks if it is blocking command which needs to be process by dedicated connection. +func (c *Completed) IsBlock() bool { + return c.cf&blockTag == blockTag +} + +// ToBlock marks the command with blockTag +func (c *Completed) ToBlock() { + c.cf |= blockTag +} + +// NoReply checks if it is one of the SUBSCRIBE, PSUBSCRIBE, UNSUBSCRIBE or PUNSUBSCRIBE commands. +func (c *Completed) NoReply() bool { + return c.cf&noRetTag == noRetTag +} + +// IsReadOnly checks if it is readonly command and can be retried when network error. +func (c *Completed) IsReadOnly() bool { + return c.cf&readonly == readonly +} + +// IsWrite checks if it is not readonly command. +func (c *Completed) IsWrite() bool { + return !c.IsReadOnly() +} + +// Commands returns the commands as []string. +// Note that the returned []string should not be modified +// and should not be read after passing into the Client interface, because it will be recycled. +func (c *Completed) Commands() []string { + return c.cs.s +} + +// CommandSlice it the command container which will be recycled by the sync.Pool. +func (c *Completed) CommandSlice() *CommandSlice { + return c.cs +} + +// Slot returns the command key slot +func (c *Completed) Slot() uint16 { + return c.ks +} + +// Cacheable represents a completed Redis command which supports server-assisted client side caching, +// and it should be created by the Cache() of command builder. +type Cacheable Completed + +// Slot returns the command key slot +func (c *Cacheable) Slot() uint16 { + return c.ks +} + +// Commands returns the commands as []string. +// Note that the returned []string should not be modified +// and should not be read after passing into the Client interface, because it will be recycled. +func (c *Cacheable) Commands() []string { + return c.cs.s +} + +// CommandSlice returns the command container which will be recycled by the sync.Pool. +func (c *Cacheable) CommandSlice() *CommandSlice { + return c.cs +} + +// IsMGet returns if the command is MGET +func (c *Cacheable) IsMGet() bool { + return c.cf == mtGetTag +} + +// MGetCacheCmd returns the cache command of the MGET singular command +func (c *Cacheable) MGetCacheCmd() string { + if c.cs.s[0][0] == 'J' { + return "JSON.GET" + c.cs.s[len(c.cs.s)-1] + } + return "GET" +} + +// MGetCacheKey returns the cache key of the MGET singular command +func (c *Cacheable) MGetCacheKey(i int) string { + return c.cs.s[i+1] +} + +// CacheKey returns the cache key used by the server-assisted client side caching +func (c *Cacheable) CacheKey() (key, command string) { + if len(c.cs.s) == 2 { + return c.cs.s[1], c.cs.s[0] + } + + length := 0 + for i, v := range c.cs.s { + if i == 1 { + continue + } + length += len(v) + } + sb := strings.Builder{} + sb.Grow(length) + for i, v := range c.cs.s { + if i == 1 { + key = v + } else { + sb.WriteString(v) + } + } + return key, sb.String() +} + +// NewCompleted creates an arbitrary Completed command. +func NewCompleted(ss []string) Completed { + return Completed{cs: &CommandSlice{s: ss}} +} + +// NewBlockingCompleted creates an arbitrary blocking Completed command. +func NewBlockingCompleted(ss []string) Completed { + return Completed{cs: &CommandSlice{s: ss}, cf: blockTag} +} + +// NewReadOnlyCompleted creates an arbitrary readonly Completed command. +func NewReadOnlyCompleted(ss []string) Completed { + return Completed{cs: &CommandSlice{s: ss}, cf: readonly} +} + +// NewMGetCompleted creates an arbitrary readonly Completed command. +func NewMGetCompleted(ss []string) Completed { + return Completed{cs: &CommandSlice{s: ss}, cf: mtGetTag} +} + +// MGets groups keys by their slot and returns multi MGET commands +func MGets(keys []string) map[uint16]Completed { + return slotMGets("MGET", keys) +} + +// JsonMGets groups keys by their slot and returns multi JSON.MGET commands +func JsonMGets(keys []string, path string) map[uint16]Completed { + ret := slotMGets("JSON.MGET", keys) + for _, jsonmget := range ret { + jsonmget.cs.s = append(jsonmget.cs.s, path) + } + return ret +} + +func slotMGets(cmd string, keys []string) map[uint16]Completed { + ret := make(map[uint16]Completed, 16) + for _, key := range keys { + var cs *CommandSlice + ks := slot(key) + if cp, ok := ret[ks]; ok { + cs = cp.cs + } else { + cs = get() + cs.s = append(cs.s, cmd) + ret[ks] = Completed{cs: cs, cf: mtGetTag, ks: ks} + } + cs.s = append(cs.s, key) + } + return ret +} + +// NewMultiCompleted creates multiple arbitrary Completed commands. +func NewMultiCompleted(cs [][]string) []Completed { + ret := make([]Completed, len(cs)) + for i, c := range cs { + ret[i] = NewCompleted(c) + } + return ret +} + +func check(prev, new uint16) uint16 { + if prev == InitSlot || prev == new { + return new + } + panic(multiKeySlotErr) +} + +const multiKeySlotErr = "multi key command with different key slots are not allowed" diff --git a/vendor/github.com/rueian/rueidis/internal/cmds/gen.go b/vendor/github.com/rueian/rueidis/internal/cmds/gen.go new file mode 100644 index 00000000000..788134c2652 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/internal/cmds/gen.go @@ -0,0 +1,34266 @@ +// Code generated DO NOT EDIT + +package cmds + +import "strconv" + +type AclCat Completed + +func (b Builder) AclCat() (c AclCat) { + c = AclCat{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "CAT") + return c +} + +func (c AclCat) Categoryname(categoryname string) AclCatCategoryname { + c.cs.s = append(c.cs.s, categoryname) + return (AclCatCategoryname)(c) +} + +func (c AclCat) Build() Completed { + return Completed(c) +} + +type AclCatCategoryname Completed + +func (c AclCatCategoryname) Build() Completed { + return Completed(c) +} + +type AclDeluser Completed + +func (b Builder) AclDeluser() (c AclDeluser) { + c = AclDeluser{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "DELUSER") + return c +} + +func (c AclDeluser) Username(username ...string) AclDeluserUsername { + c.cs.s = append(c.cs.s, username...) + return (AclDeluserUsername)(c) +} + +type AclDeluserUsername Completed + +func (c AclDeluserUsername) Username(username ...string) AclDeluserUsername { + c.cs.s = append(c.cs.s, username...) + return c +} + +func (c AclDeluserUsername) Build() Completed { + return Completed(c) +} + +type AclDryrun Completed + +func (b Builder) AclDryrun() (c AclDryrun) { + c = AclDryrun{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "DRYRUN") + return c +} + +func (c AclDryrun) Username(username string) AclDryrunUsername { + c.cs.s = append(c.cs.s, username) + return (AclDryrunUsername)(c) +} + +type AclDryrunArg Completed + +func (c AclDryrunArg) Arg(arg ...string) AclDryrunArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c AclDryrunArg) Build() Completed { + return Completed(c) +} + +type AclDryrunCommand Completed + +func (c AclDryrunCommand) Arg(arg ...string) AclDryrunArg { + c.cs.s = append(c.cs.s, arg...) + return (AclDryrunArg)(c) +} + +func (c AclDryrunCommand) Build() Completed { + return Completed(c) +} + +type AclDryrunUsername Completed + +func (c AclDryrunUsername) Command(command string) AclDryrunCommand { + c.cs.s = append(c.cs.s, command) + return (AclDryrunCommand)(c) +} + +type AclGenpass Completed + +func (b Builder) AclGenpass() (c AclGenpass) { + c = AclGenpass{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "GENPASS") + return c +} + +func (c AclGenpass) Bits(bits int64) AclGenpassBits { + c.cs.s = append(c.cs.s, strconv.FormatInt(bits, 10)) + return (AclGenpassBits)(c) +} + +func (c AclGenpass) Build() Completed { + return Completed(c) +} + +type AclGenpassBits Completed + +func (c AclGenpassBits) Build() Completed { + return Completed(c) +} + +type AclGetuser Completed + +func (b Builder) AclGetuser() (c AclGetuser) { + c = AclGetuser{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "GETUSER") + return c +} + +func (c AclGetuser) Username(username string) AclGetuserUsername { + c.cs.s = append(c.cs.s, username) + return (AclGetuserUsername)(c) +} + +type AclGetuserUsername Completed + +func (c AclGetuserUsername) Build() Completed { + return Completed(c) +} + +type AclHelp Completed + +func (b Builder) AclHelp() (c AclHelp) { + c = AclHelp{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "HELP") + return c +} + +func (c AclHelp) Build() Completed { + return Completed(c) +} + +type AclList Completed + +func (b Builder) AclList() (c AclList) { + c = AclList{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "LIST") + return c +} + +func (c AclList) Build() Completed { + return Completed(c) +} + +type AclLoad Completed + +func (b Builder) AclLoad() (c AclLoad) { + c = AclLoad{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "LOAD") + return c +} + +func (c AclLoad) Build() Completed { + return Completed(c) +} + +type AclLog Completed + +func (b Builder) AclLog() (c AclLog) { + c = AclLog{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "LOG") + return c +} + +func (c AclLog) Count(count int64) AclLogCountCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (AclLogCountCount)(c) +} + +func (c AclLog) Reset() AclLogCountReset { + c.cs.s = append(c.cs.s, "RESET") + return (AclLogCountReset)(c) +} + +type AclLogCountCount Completed + +func (c AclLogCountCount) Reset() AclLogCountReset { + c.cs.s = append(c.cs.s, "RESET") + return (AclLogCountReset)(c) +} + +func (c AclLogCountCount) Build() Completed { + return Completed(c) +} + +type AclLogCountReset Completed + +func (c AclLogCountReset) Build() Completed { + return Completed(c) +} + +type AclSave Completed + +func (b Builder) AclSave() (c AclSave) { + c = AclSave{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "SAVE") + return c +} + +func (c AclSave) Build() Completed { + return Completed(c) +} + +type AclSetuser Completed + +func (b Builder) AclSetuser() (c AclSetuser) { + c = AclSetuser{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "SETUSER") + return c +} + +func (c AclSetuser) Username(username string) AclSetuserUsername { + c.cs.s = append(c.cs.s, username) + return (AclSetuserUsername)(c) +} + +type AclSetuserRule Completed + +func (c AclSetuserRule) Rule(rule ...string) AclSetuserRule { + c.cs.s = append(c.cs.s, rule...) + return c +} + +func (c AclSetuserRule) Build() Completed { + return Completed(c) +} + +type AclSetuserUsername Completed + +func (c AclSetuserUsername) Rule(rule ...string) AclSetuserRule { + c.cs.s = append(c.cs.s, rule...) + return (AclSetuserRule)(c) +} + +func (c AclSetuserUsername) Build() Completed { + return Completed(c) +} + +type AclUsers Completed + +func (b Builder) AclUsers() (c AclUsers) { + c = AclUsers{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "USERS") + return c +} + +func (c AclUsers) Build() Completed { + return Completed(c) +} + +type AclWhoami Completed + +func (b Builder) AclWhoami() (c AclWhoami) { + c = AclWhoami{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ACL", "WHOAMI") + return c +} + +func (c AclWhoami) Build() Completed { + return Completed(c) +} + +type AiModeldel Completed + +func (b Builder) AiModeldel() (c AiModeldel) { + c = AiModeldel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "AI.MODELDEL") + return c +} + +func (c AiModeldel) Key(key string) AiModeldelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiModeldelKey)(c) +} + +type AiModeldelKey Completed + +func (c AiModeldelKey) Build() Completed { + return Completed(c) +} + +type AiModelexecute Completed + +func (b Builder) AiModelexecute() (c AiModelexecute) { + c = AiModelexecute{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "AI.MODELEXECUTE") + return c +} + +func (c AiModelexecute) Key(key string) AiModelexecuteKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiModelexecuteKey)(c) +} + +type AiModelexecuteInputsInput Completed + +func (c AiModelexecuteInputsInput) Input(input ...string) AiModelexecuteInputsInput { + c.cs.s = append(c.cs.s, input...) + return c +} + +func (c AiModelexecuteInputsInput) Outputs(outputCount int64) AiModelexecuteOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelexecuteOutputsOutputs)(c) +} + +type AiModelexecuteInputsInputs Completed + +func (c AiModelexecuteInputsInputs) Input(input ...string) AiModelexecuteInputsInput { + c.cs.s = append(c.cs.s, input...) + return (AiModelexecuteInputsInput)(c) +} + +type AiModelexecuteKey Completed + +func (c AiModelexecuteKey) Inputs(inputCount int64) AiModelexecuteInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiModelexecuteInputsInputs)(c) +} + +type AiModelexecuteOutputsOutput Completed + +func (c AiModelexecuteOutputsOutput) Output(output ...string) AiModelexecuteOutputsOutput { + c.cs.s = append(c.cs.s, output...) + return c +} + +func (c AiModelexecuteOutputsOutput) Timeout(timeout int64) AiModelexecuteTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (AiModelexecuteTimeout)(c) +} + +func (c AiModelexecuteOutputsOutput) Build() Completed { + return Completed(c) +} + +func (c AiModelexecuteOutputsOutput) Cache() Cacheable { + return Cacheable(c) +} + +type AiModelexecuteOutputsOutputs Completed + +func (c AiModelexecuteOutputsOutputs) Output(output ...string) AiModelexecuteOutputsOutput { + c.cs.s = append(c.cs.s, output...) + return (AiModelexecuteOutputsOutput)(c) +} + +type AiModelexecuteTimeout Completed + +func (c AiModelexecuteTimeout) Build() Completed { + return Completed(c) +} + +func (c AiModelexecuteTimeout) Cache() Cacheable { + return Cacheable(c) +} + +type AiModelget Completed + +func (b Builder) AiModelget() (c AiModelget) { + c = AiModelget{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "AI.MODELGET") + return c +} + +func (c AiModelget) Key(key string) AiModelgetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiModelgetKey)(c) +} + +type AiModelgetBlob Completed + +func (c AiModelgetBlob) Build() Completed { + return Completed(c) +} + +func (c AiModelgetBlob) Cache() Cacheable { + return Cacheable(c) +} + +type AiModelgetKey Completed + +func (c AiModelgetKey) Meta() AiModelgetMeta { + c.cs.s = append(c.cs.s, "META") + return (AiModelgetMeta)(c) +} + +func (c AiModelgetKey) Blob() AiModelgetBlob { + c.cs.s = append(c.cs.s, "BLOB") + return (AiModelgetBlob)(c) +} + +func (c AiModelgetKey) Build() Completed { + return Completed(c) +} + +func (c AiModelgetKey) Cache() Cacheable { + return Cacheable(c) +} + +type AiModelgetMeta Completed + +func (c AiModelgetMeta) Blob() AiModelgetBlob { + c.cs.s = append(c.cs.s, "BLOB") + return (AiModelgetBlob)(c) +} + +func (c AiModelgetMeta) Build() Completed { + return Completed(c) +} + +func (c AiModelgetMeta) Cache() Cacheable { + return Cacheable(c) +} + +type AiModelstore Completed + +func (b Builder) AiModelstore() (c AiModelstore) { + c = AiModelstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "AI.MODELSTORE") + return c +} + +func (c AiModelstore) Key(key string) AiModelstoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiModelstoreKey)(c) +} + +type AiModelstoreBackendOnnx Completed + +func (c AiModelstoreBackendOnnx) Cpu() AiModelstoreDeviceCpu { + c.cs.s = append(c.cs.s, "CPU") + return (AiModelstoreDeviceCpu)(c) +} + +func (c AiModelstoreBackendOnnx) Gpu() AiModelstoreDeviceGpu { + c.cs.s = append(c.cs.s, "GPU") + return (AiModelstoreDeviceGpu)(c) +} + +type AiModelstoreBackendTf Completed + +func (c AiModelstoreBackendTf) Cpu() AiModelstoreDeviceCpu { + c.cs.s = append(c.cs.s, "CPU") + return (AiModelstoreDeviceCpu)(c) +} + +func (c AiModelstoreBackendTf) Gpu() AiModelstoreDeviceGpu { + c.cs.s = append(c.cs.s, "GPU") + return (AiModelstoreDeviceGpu)(c) +} + +type AiModelstoreBackendTorch Completed + +func (c AiModelstoreBackendTorch) Cpu() AiModelstoreDeviceCpu { + c.cs.s = append(c.cs.s, "CPU") + return (AiModelstoreDeviceCpu)(c) +} + +func (c AiModelstoreBackendTorch) Gpu() AiModelstoreDeviceGpu { + c.cs.s = append(c.cs.s, "GPU") + return (AiModelstoreDeviceGpu)(c) +} + +type AiModelstoreBatchsize Completed + +func (c AiModelstoreBatchsize) Minbatchsize(minbatchsize int64) AiModelstoreMinbatchsize { + c.cs.s = append(c.cs.s, "MINBATCHSIZE", strconv.FormatInt(minbatchsize, 10)) + return (AiModelstoreMinbatchsize)(c) +} + +func (c AiModelstoreBatchsize) Minbatchtimeout(minbatchtimeout int64) AiModelstoreMinbatchtimeout { + c.cs.s = append(c.cs.s, "MINBATCHTIMEOUT", strconv.FormatInt(minbatchtimeout, 10)) + return (AiModelstoreMinbatchtimeout)(c) +} + +func (c AiModelstoreBatchsize) Inputs(inputCount int64) AiModelstoreInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiModelstoreInputsInputs)(c) +} + +func (c AiModelstoreBatchsize) Outputs(outputCount int64) AiModelstoreOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelstoreOutputsOutputs)(c) +} + +func (c AiModelstoreBatchsize) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreBatchsize) Build() Completed { + return Completed(c) +} + +type AiModelstoreBlob Completed + +func (c AiModelstoreBlob) Build() Completed { + return Completed(c) +} + +type AiModelstoreDeviceCpu Completed + +func (c AiModelstoreDeviceCpu) Tag(tag string) AiModelstoreTag { + c.cs.s = append(c.cs.s, "TAG", tag) + return (AiModelstoreTag)(c) +} + +func (c AiModelstoreDeviceCpu) Batchsize(batchsize int64) AiModelstoreBatchsize { + c.cs.s = append(c.cs.s, "BATCHSIZE", strconv.FormatInt(batchsize, 10)) + return (AiModelstoreBatchsize)(c) +} + +func (c AiModelstoreDeviceCpu) Minbatchsize(minbatchsize int64) AiModelstoreMinbatchsize { + c.cs.s = append(c.cs.s, "MINBATCHSIZE", strconv.FormatInt(minbatchsize, 10)) + return (AiModelstoreMinbatchsize)(c) +} + +func (c AiModelstoreDeviceCpu) Minbatchtimeout(minbatchtimeout int64) AiModelstoreMinbatchtimeout { + c.cs.s = append(c.cs.s, "MINBATCHTIMEOUT", strconv.FormatInt(minbatchtimeout, 10)) + return (AiModelstoreMinbatchtimeout)(c) +} + +func (c AiModelstoreDeviceCpu) Inputs(inputCount int64) AiModelstoreInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiModelstoreInputsInputs)(c) +} + +func (c AiModelstoreDeviceCpu) Outputs(outputCount int64) AiModelstoreOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelstoreOutputsOutputs)(c) +} + +func (c AiModelstoreDeviceCpu) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreDeviceCpu) Build() Completed { + return Completed(c) +} + +type AiModelstoreDeviceGpu Completed + +func (c AiModelstoreDeviceGpu) Tag(tag string) AiModelstoreTag { + c.cs.s = append(c.cs.s, "TAG", tag) + return (AiModelstoreTag)(c) +} + +func (c AiModelstoreDeviceGpu) Batchsize(batchsize int64) AiModelstoreBatchsize { + c.cs.s = append(c.cs.s, "BATCHSIZE", strconv.FormatInt(batchsize, 10)) + return (AiModelstoreBatchsize)(c) +} + +func (c AiModelstoreDeviceGpu) Minbatchsize(minbatchsize int64) AiModelstoreMinbatchsize { + c.cs.s = append(c.cs.s, "MINBATCHSIZE", strconv.FormatInt(minbatchsize, 10)) + return (AiModelstoreMinbatchsize)(c) +} + +func (c AiModelstoreDeviceGpu) Minbatchtimeout(minbatchtimeout int64) AiModelstoreMinbatchtimeout { + c.cs.s = append(c.cs.s, "MINBATCHTIMEOUT", strconv.FormatInt(minbatchtimeout, 10)) + return (AiModelstoreMinbatchtimeout)(c) +} + +func (c AiModelstoreDeviceGpu) Inputs(inputCount int64) AiModelstoreInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiModelstoreInputsInputs)(c) +} + +func (c AiModelstoreDeviceGpu) Outputs(outputCount int64) AiModelstoreOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelstoreOutputsOutputs)(c) +} + +func (c AiModelstoreDeviceGpu) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreDeviceGpu) Build() Completed { + return Completed(c) +} + +type AiModelstoreInputsInput Completed + +func (c AiModelstoreInputsInput) Input(input ...string) AiModelstoreInputsInput { + c.cs.s = append(c.cs.s, input...) + return c +} + +func (c AiModelstoreInputsInput) Outputs(outputCount int64) AiModelstoreOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelstoreOutputsOutputs)(c) +} + +func (c AiModelstoreInputsInput) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreInputsInput) Build() Completed { + return Completed(c) +} + +type AiModelstoreInputsInputs Completed + +func (c AiModelstoreInputsInputs) Input(input ...string) AiModelstoreInputsInput { + c.cs.s = append(c.cs.s, input...) + return (AiModelstoreInputsInput)(c) +} + +type AiModelstoreKey Completed + +func (c AiModelstoreKey) Tf() AiModelstoreBackendTf { + c.cs.s = append(c.cs.s, "TF") + return (AiModelstoreBackendTf)(c) +} + +func (c AiModelstoreKey) Torch() AiModelstoreBackendTorch { + c.cs.s = append(c.cs.s, "TORCH") + return (AiModelstoreBackendTorch)(c) +} + +func (c AiModelstoreKey) Onnx() AiModelstoreBackendOnnx { + c.cs.s = append(c.cs.s, "ONNX") + return (AiModelstoreBackendOnnx)(c) +} + +type AiModelstoreMinbatchsize Completed + +func (c AiModelstoreMinbatchsize) Minbatchtimeout(minbatchtimeout int64) AiModelstoreMinbatchtimeout { + c.cs.s = append(c.cs.s, "MINBATCHTIMEOUT", strconv.FormatInt(minbatchtimeout, 10)) + return (AiModelstoreMinbatchtimeout)(c) +} + +func (c AiModelstoreMinbatchsize) Inputs(inputCount int64) AiModelstoreInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiModelstoreInputsInputs)(c) +} + +func (c AiModelstoreMinbatchsize) Outputs(outputCount int64) AiModelstoreOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelstoreOutputsOutputs)(c) +} + +func (c AiModelstoreMinbatchsize) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreMinbatchsize) Build() Completed { + return Completed(c) +} + +type AiModelstoreMinbatchtimeout Completed + +func (c AiModelstoreMinbatchtimeout) Inputs(inputCount int64) AiModelstoreInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiModelstoreInputsInputs)(c) +} + +func (c AiModelstoreMinbatchtimeout) Outputs(outputCount int64) AiModelstoreOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelstoreOutputsOutputs)(c) +} + +func (c AiModelstoreMinbatchtimeout) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreMinbatchtimeout) Build() Completed { + return Completed(c) +} + +type AiModelstoreOutputsOutput Completed + +func (c AiModelstoreOutputsOutput) Output(output ...string) AiModelstoreOutputsOutput { + c.cs.s = append(c.cs.s, output...) + return c +} + +func (c AiModelstoreOutputsOutput) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreOutputsOutput) Build() Completed { + return Completed(c) +} + +type AiModelstoreOutputsOutputs Completed + +func (c AiModelstoreOutputsOutputs) Output(output ...string) AiModelstoreOutputsOutput { + c.cs.s = append(c.cs.s, output...) + return (AiModelstoreOutputsOutput)(c) +} + +type AiModelstoreTag Completed + +func (c AiModelstoreTag) Batchsize(batchsize int64) AiModelstoreBatchsize { + c.cs.s = append(c.cs.s, "BATCHSIZE", strconv.FormatInt(batchsize, 10)) + return (AiModelstoreBatchsize)(c) +} + +func (c AiModelstoreTag) Minbatchsize(minbatchsize int64) AiModelstoreMinbatchsize { + c.cs.s = append(c.cs.s, "MINBATCHSIZE", strconv.FormatInt(minbatchsize, 10)) + return (AiModelstoreMinbatchsize)(c) +} + +func (c AiModelstoreTag) Minbatchtimeout(minbatchtimeout int64) AiModelstoreMinbatchtimeout { + c.cs.s = append(c.cs.s, "MINBATCHTIMEOUT", strconv.FormatInt(minbatchtimeout, 10)) + return (AiModelstoreMinbatchtimeout)(c) +} + +func (c AiModelstoreTag) Inputs(inputCount int64) AiModelstoreInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiModelstoreInputsInputs)(c) +} + +func (c AiModelstoreTag) Outputs(outputCount int64) AiModelstoreOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiModelstoreOutputsOutputs)(c) +} + +func (c AiModelstoreTag) Blob(blob string) AiModelstoreBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiModelstoreBlob)(c) +} + +func (c AiModelstoreTag) Build() Completed { + return Completed(c) +} + +type AiScriptdel Completed + +func (b Builder) AiScriptdel() (c AiScriptdel) { + c = AiScriptdel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "AI.SCRIPTDEL") + return c +} + +func (c AiScriptdel) Key(key string) AiScriptdelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiScriptdelKey)(c) +} + +type AiScriptdelKey Completed + +func (c AiScriptdelKey) Build() Completed { + return Completed(c) +} + +type AiScriptexecute Completed + +func (b Builder) AiScriptexecute() (c AiScriptexecute) { + c = AiScriptexecute{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "AI.SCRIPTEXECUTE") + return c +} + +func (c AiScriptexecute) Key(key string) AiScriptexecuteKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiScriptexecuteKey)(c) +} + +type AiScriptexecuteArgsArg Completed + +func (c AiScriptexecuteArgsArg) Arg(arg ...string) AiScriptexecuteArgsArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c AiScriptexecuteArgsArg) Outputs(outputCount int64) AiScriptexecuteOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiScriptexecuteOutputsOutputs)(c) +} + +func (c AiScriptexecuteArgsArg) Timeout(timeout int64) AiScriptexecuteTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (AiScriptexecuteTimeout)(c) +} + +func (c AiScriptexecuteArgsArg) Build() Completed { + return Completed(c) +} + +type AiScriptexecuteArgsArgs Completed + +func (c AiScriptexecuteArgsArgs) Arg(arg ...string) AiScriptexecuteArgsArg { + c.cs.s = append(c.cs.s, arg...) + return (AiScriptexecuteArgsArg)(c) +} + +type AiScriptexecuteFunction Completed + +func (c AiScriptexecuteFunction) Keys(keyCount int64) AiScriptexecuteKeysKeys { + c.cs.s = append(c.cs.s, "KEYS", strconv.FormatInt(keyCount, 10)) + return (AiScriptexecuteKeysKeys)(c) +} + +func (c AiScriptexecuteFunction) Inputs(inputCount int64) AiScriptexecuteInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiScriptexecuteInputsInputs)(c) +} + +func (c AiScriptexecuteFunction) Args(argCount int64) AiScriptexecuteArgsArgs { + c.cs.s = append(c.cs.s, "ARGS", strconv.FormatInt(argCount, 10)) + return (AiScriptexecuteArgsArgs)(c) +} + +func (c AiScriptexecuteFunction) Outputs(outputCount int64) AiScriptexecuteOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiScriptexecuteOutputsOutputs)(c) +} + +func (c AiScriptexecuteFunction) Timeout(timeout int64) AiScriptexecuteTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (AiScriptexecuteTimeout)(c) +} + +func (c AiScriptexecuteFunction) Build() Completed { + return Completed(c) +} + +type AiScriptexecuteInputsInput Completed + +func (c AiScriptexecuteInputsInput) Input(input ...string) AiScriptexecuteInputsInput { + c.cs.s = append(c.cs.s, input...) + return c +} + +func (c AiScriptexecuteInputsInput) Args(argCount int64) AiScriptexecuteArgsArgs { + c.cs.s = append(c.cs.s, "ARGS", strconv.FormatInt(argCount, 10)) + return (AiScriptexecuteArgsArgs)(c) +} + +func (c AiScriptexecuteInputsInput) Outputs(outputCount int64) AiScriptexecuteOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiScriptexecuteOutputsOutputs)(c) +} + +func (c AiScriptexecuteInputsInput) Timeout(timeout int64) AiScriptexecuteTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (AiScriptexecuteTimeout)(c) +} + +func (c AiScriptexecuteInputsInput) Build() Completed { + return Completed(c) +} + +type AiScriptexecuteInputsInputs Completed + +func (c AiScriptexecuteInputsInputs) Input(input ...string) AiScriptexecuteInputsInput { + c.cs.s = append(c.cs.s, input...) + return (AiScriptexecuteInputsInput)(c) +} + +type AiScriptexecuteKey Completed + +func (c AiScriptexecuteKey) Function(function string) AiScriptexecuteFunction { + c.cs.s = append(c.cs.s, function) + return (AiScriptexecuteFunction)(c) +} + +type AiScriptexecuteKeysKey Completed + +func (c AiScriptexecuteKeysKey) Key(key ...string) AiScriptexecuteKeysKey { + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c AiScriptexecuteKeysKey) Inputs(inputCount int64) AiScriptexecuteInputsInputs { + c.cs.s = append(c.cs.s, "INPUTS", strconv.FormatInt(inputCount, 10)) + return (AiScriptexecuteInputsInputs)(c) +} + +func (c AiScriptexecuteKeysKey) Args(argCount int64) AiScriptexecuteArgsArgs { + c.cs.s = append(c.cs.s, "ARGS", strconv.FormatInt(argCount, 10)) + return (AiScriptexecuteArgsArgs)(c) +} + +func (c AiScriptexecuteKeysKey) Outputs(outputCount int64) AiScriptexecuteOutputsOutputs { + c.cs.s = append(c.cs.s, "OUTPUTS", strconv.FormatInt(outputCount, 10)) + return (AiScriptexecuteOutputsOutputs)(c) +} + +func (c AiScriptexecuteKeysKey) Timeout(timeout int64) AiScriptexecuteTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (AiScriptexecuteTimeout)(c) +} + +func (c AiScriptexecuteKeysKey) Build() Completed { + return Completed(c) +} + +type AiScriptexecuteKeysKeys Completed + +func (c AiScriptexecuteKeysKeys) Key(key ...string) AiScriptexecuteKeysKey { + c.cs.s = append(c.cs.s, key...) + return (AiScriptexecuteKeysKey)(c) +} + +type AiScriptexecuteOutputsOutput Completed + +func (c AiScriptexecuteOutputsOutput) Output(output ...string) AiScriptexecuteOutputsOutput { + c.cs.s = append(c.cs.s, output...) + return c +} + +func (c AiScriptexecuteOutputsOutput) Timeout(timeout int64) AiScriptexecuteTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (AiScriptexecuteTimeout)(c) +} + +func (c AiScriptexecuteOutputsOutput) Build() Completed { + return Completed(c) +} + +type AiScriptexecuteOutputsOutputs Completed + +func (c AiScriptexecuteOutputsOutputs) Output(output ...string) AiScriptexecuteOutputsOutput { + c.cs.s = append(c.cs.s, output...) + return (AiScriptexecuteOutputsOutput)(c) +} + +type AiScriptexecuteTimeout Completed + +func (c AiScriptexecuteTimeout) Build() Completed { + return Completed(c) +} + +type AiScriptget Completed + +func (b Builder) AiScriptget() (c AiScriptget) { + c = AiScriptget{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "AI.SCRIPTGET") + return c +} + +func (c AiScriptget) Key(key string) AiScriptgetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiScriptgetKey)(c) +} + +type AiScriptgetKey Completed + +func (c AiScriptgetKey) Meta() AiScriptgetMeta { + c.cs.s = append(c.cs.s, "META") + return (AiScriptgetMeta)(c) +} + +func (c AiScriptgetKey) Source() AiScriptgetSource { + c.cs.s = append(c.cs.s, "SOURCE") + return (AiScriptgetSource)(c) +} + +func (c AiScriptgetKey) Build() Completed { + return Completed(c) +} + +func (c AiScriptgetKey) Cache() Cacheable { + return Cacheable(c) +} + +type AiScriptgetMeta Completed + +func (c AiScriptgetMeta) Source() AiScriptgetSource { + c.cs.s = append(c.cs.s, "SOURCE") + return (AiScriptgetSource)(c) +} + +func (c AiScriptgetMeta) Build() Completed { + return Completed(c) +} + +func (c AiScriptgetMeta) Cache() Cacheable { + return Cacheable(c) +} + +type AiScriptgetSource Completed + +func (c AiScriptgetSource) Build() Completed { + return Completed(c) +} + +func (c AiScriptgetSource) Cache() Cacheable { + return Cacheable(c) +} + +type AiScriptstore Completed + +func (b Builder) AiScriptstore() (c AiScriptstore) { + c = AiScriptstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "AI.SCRIPTSTORE") + return c +} + +func (c AiScriptstore) Key(key string) AiScriptstoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiScriptstoreKey)(c) +} + +type AiScriptstoreDeviceCpu Completed + +func (c AiScriptstoreDeviceCpu) Tag(tag string) AiScriptstoreTag { + c.cs.s = append(c.cs.s, "TAG", tag) + return (AiScriptstoreTag)(c) +} + +func (c AiScriptstoreDeviceCpu) EntryPoints(entryPointCount int64) AiScriptstoreEntryPointsEntryPoints { + c.cs.s = append(c.cs.s, "ENTRY_POINTS", strconv.FormatInt(entryPointCount, 10)) + return (AiScriptstoreEntryPointsEntryPoints)(c) +} + +type AiScriptstoreDeviceGpu Completed + +func (c AiScriptstoreDeviceGpu) Tag(tag string) AiScriptstoreTag { + c.cs.s = append(c.cs.s, "TAG", tag) + return (AiScriptstoreTag)(c) +} + +func (c AiScriptstoreDeviceGpu) EntryPoints(entryPointCount int64) AiScriptstoreEntryPointsEntryPoints { + c.cs.s = append(c.cs.s, "ENTRY_POINTS", strconv.FormatInt(entryPointCount, 10)) + return (AiScriptstoreEntryPointsEntryPoints)(c) +} + +type AiScriptstoreEntryPointsEntryPoint Completed + +func (c AiScriptstoreEntryPointsEntryPoint) EntryPoint(entryPoint ...string) AiScriptstoreEntryPointsEntryPoint { + c.cs.s = append(c.cs.s, entryPoint...) + return c +} + +func (c AiScriptstoreEntryPointsEntryPoint) Build() Completed { + return Completed(c) +} + +type AiScriptstoreEntryPointsEntryPoints Completed + +func (c AiScriptstoreEntryPointsEntryPoints) EntryPoint(entryPoint ...string) AiScriptstoreEntryPointsEntryPoint { + c.cs.s = append(c.cs.s, entryPoint...) + return (AiScriptstoreEntryPointsEntryPoint)(c) +} + +type AiScriptstoreKey Completed + +func (c AiScriptstoreKey) Cpu() AiScriptstoreDeviceCpu { + c.cs.s = append(c.cs.s, "CPU") + return (AiScriptstoreDeviceCpu)(c) +} + +func (c AiScriptstoreKey) Gpu() AiScriptstoreDeviceGpu { + c.cs.s = append(c.cs.s, "GPU") + return (AiScriptstoreDeviceGpu)(c) +} + +type AiScriptstoreTag Completed + +func (c AiScriptstoreTag) EntryPoints(entryPointCount int64) AiScriptstoreEntryPointsEntryPoints { + c.cs.s = append(c.cs.s, "ENTRY_POINTS", strconv.FormatInt(entryPointCount, 10)) + return (AiScriptstoreEntryPointsEntryPoints)(c) +} + +type AiTensorget Completed + +func (b Builder) AiTensorget() (c AiTensorget) { + c = AiTensorget{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "AI.TENSORGET") + return c +} + +func (c AiTensorget) Key(key string) AiTensorgetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiTensorgetKey)(c) +} + +type AiTensorgetFormatBlob Completed + +func (c AiTensorgetFormatBlob) Build() Completed { + return Completed(c) +} + +func (c AiTensorgetFormatBlob) Cache() Cacheable { + return Cacheable(c) +} + +type AiTensorgetFormatValues Completed + +func (c AiTensorgetFormatValues) Build() Completed { + return Completed(c) +} + +func (c AiTensorgetFormatValues) Cache() Cacheable { + return Cacheable(c) +} + +type AiTensorgetKey Completed + +func (c AiTensorgetKey) Meta() AiTensorgetMeta { + c.cs.s = append(c.cs.s, "META") + return (AiTensorgetMeta)(c) +} + +type AiTensorgetMeta Completed + +func (c AiTensorgetMeta) Blob() AiTensorgetFormatBlob { + c.cs.s = append(c.cs.s, "BLOB") + return (AiTensorgetFormatBlob)(c) +} + +func (c AiTensorgetMeta) Values() AiTensorgetFormatValues { + c.cs.s = append(c.cs.s, "VALUES") + return (AiTensorgetFormatValues)(c) +} + +func (c AiTensorgetMeta) Build() Completed { + return Completed(c) +} + +func (c AiTensorgetMeta) Cache() Cacheable { + return Cacheable(c) +} + +type AiTensorset Completed + +func (b Builder) AiTensorset() (c AiTensorset) { + c = AiTensorset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "AI.TENSORSET") + return c +} + +func (c AiTensorset) Key(key string) AiTensorsetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AiTensorsetKey)(c) +} + +type AiTensorsetBlob Completed + +func (c AiTensorsetBlob) Values(value ...string) AiTensorsetValues { + c.cs.s = append(c.cs.s, "VALUES") + c.cs.s = append(c.cs.s, value...) + return (AiTensorsetValues)(c) +} + +func (c AiTensorsetBlob) Build() Completed { + return Completed(c) +} + +type AiTensorsetKey Completed + +func (c AiTensorsetKey) Float() AiTensorsetTypeFloat { + c.cs.s = append(c.cs.s, "FLOAT") + return (AiTensorsetTypeFloat)(c) +} + +func (c AiTensorsetKey) Double() AiTensorsetTypeDouble { + c.cs.s = append(c.cs.s, "DOUBLE") + return (AiTensorsetTypeDouble)(c) +} + +func (c AiTensorsetKey) Int8() AiTensorsetTypeInt8 { + c.cs.s = append(c.cs.s, "INT8") + return (AiTensorsetTypeInt8)(c) +} + +func (c AiTensorsetKey) Int16() AiTensorsetTypeInt16 { + c.cs.s = append(c.cs.s, "INT16") + return (AiTensorsetTypeInt16)(c) +} + +func (c AiTensorsetKey) Int32() AiTensorsetTypeInt32 { + c.cs.s = append(c.cs.s, "INT32") + return (AiTensorsetTypeInt32)(c) +} + +func (c AiTensorsetKey) Int64() AiTensorsetTypeInt64 { + c.cs.s = append(c.cs.s, "INT64") + return (AiTensorsetTypeInt64)(c) +} + +func (c AiTensorsetKey) Uint8() AiTensorsetTypeUint8 { + c.cs.s = append(c.cs.s, "UINT8") + return (AiTensorsetTypeUint8)(c) +} + +func (c AiTensorsetKey) Uint16() AiTensorsetTypeUint16 { + c.cs.s = append(c.cs.s, "UINT16") + return (AiTensorsetTypeUint16)(c) +} + +func (c AiTensorsetKey) String() AiTensorsetTypeString { + c.cs.s = append(c.cs.s, "STRING") + return (AiTensorsetTypeString)(c) +} + +func (c AiTensorsetKey) Bool() AiTensorsetTypeBool { + c.cs.s = append(c.cs.s, "BOOL") + return (AiTensorsetTypeBool)(c) +} + +type AiTensorsetShape Completed + +func (c AiTensorsetShape) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c AiTensorsetShape) Blob(blob string) AiTensorsetBlob { + c.cs.s = append(c.cs.s, "BLOB", blob) + return (AiTensorsetBlob)(c) +} + +func (c AiTensorsetShape) Values(value ...string) AiTensorsetValues { + c.cs.s = append(c.cs.s, "VALUES") + c.cs.s = append(c.cs.s, value...) + return (AiTensorsetValues)(c) +} + +func (c AiTensorsetShape) Build() Completed { + return Completed(c) +} + +type AiTensorsetTypeBool Completed + +func (c AiTensorsetTypeBool) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeDouble Completed + +func (c AiTensorsetTypeDouble) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeFloat Completed + +func (c AiTensorsetTypeFloat) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeInt16 Completed + +func (c AiTensorsetTypeInt16) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeInt32 Completed + +func (c AiTensorsetTypeInt32) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeInt64 Completed + +func (c AiTensorsetTypeInt64) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeInt8 Completed + +func (c AiTensorsetTypeInt8) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeString Completed + +func (c AiTensorsetTypeString) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeUint16 Completed + +func (c AiTensorsetTypeUint16) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetTypeUint8 Completed + +func (c AiTensorsetTypeUint8) Shape(shape ...int64) AiTensorsetShape { + for _, n := range shape { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (AiTensorsetShape)(c) +} + +type AiTensorsetValues Completed + +func (c AiTensorsetValues) Values(value ...string) AiTensorsetValues { + c.cs.s = append(c.cs.s, "VALUES") + c.cs.s = append(c.cs.s, value...) + return c +} + +func (c AiTensorsetValues) Build() Completed { + return Completed(c) +} + +type Append Completed + +func (b Builder) Append() (c Append) { + c = Append{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "APPEND") + return c +} + +func (c Append) Key(key string) AppendKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (AppendKey)(c) +} + +type AppendKey Completed + +func (c AppendKey) Value(value string) AppendValue { + c.cs.s = append(c.cs.s, value) + return (AppendValue)(c) +} + +type AppendValue Completed + +func (c AppendValue) Build() Completed { + return Completed(c) +} + +type Asking Completed + +func (b Builder) Asking() (c Asking) { + c = Asking{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ASKING") + return c +} + +func (c Asking) Build() Completed { + return Completed(c) +} + +type Auth Completed + +func (b Builder) Auth() (c Auth) { + c = Auth{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "AUTH") + return c +} + +func (c Auth) Username(username string) AuthUsername { + c.cs.s = append(c.cs.s, username) + return (AuthUsername)(c) +} + +func (c Auth) Password(password string) AuthPassword { + c.cs.s = append(c.cs.s, password) + return (AuthPassword)(c) +} + +type AuthPassword Completed + +func (c AuthPassword) Build() Completed { + return Completed(c) +} + +type AuthUsername Completed + +func (c AuthUsername) Password(password string) AuthPassword { + c.cs.s = append(c.cs.s, password) + return (AuthPassword)(c) +} + +type BfAdd Completed + +func (b Builder) BfAdd() (c BfAdd) { + c = BfAdd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BF.ADD") + return c +} + +func (c BfAdd) Key(key string) BfAddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfAddKey)(c) +} + +type BfAddItem Completed + +func (c BfAddItem) Build() Completed { + return Completed(c) +} + +type BfAddKey Completed + +func (c BfAddKey) Item(item string) BfAddItem { + c.cs.s = append(c.cs.s, item) + return (BfAddItem)(c) +} + +type BfExists Completed + +func (b Builder) BfExists() (c BfExists) { + c = BfExists{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "BF.EXISTS") + return c +} + +func (c BfExists) Key(key string) BfExistsKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfExistsKey)(c) +} + +type BfExistsItem Completed + +func (c BfExistsItem) Build() Completed { + return Completed(c) +} + +func (c BfExistsItem) Cache() Cacheable { + return Cacheable(c) +} + +type BfExistsKey Completed + +func (c BfExistsKey) Item(item string) BfExistsItem { + c.cs.s = append(c.cs.s, item) + return (BfExistsItem)(c) +} + +type BfInfo Completed + +func (b Builder) BfInfo() (c BfInfo) { + c = BfInfo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "BF.INFO") + return c +} + +func (c BfInfo) Key(key string) BfInfoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfInfoKey)(c) +} + +type BfInfoKey Completed + +func (c BfInfoKey) Capacity() BfInfoSingleValueCapacity { + c.cs.s = append(c.cs.s, "CAPACITY") + return (BfInfoSingleValueCapacity)(c) +} + +func (c BfInfoKey) Size() BfInfoSingleValueSize { + c.cs.s = append(c.cs.s, "SIZE") + return (BfInfoSingleValueSize)(c) +} + +func (c BfInfoKey) Filters() BfInfoSingleValueFilters { + c.cs.s = append(c.cs.s, "FILTERS") + return (BfInfoSingleValueFilters)(c) +} + +func (c BfInfoKey) Items() BfInfoSingleValueItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (BfInfoSingleValueItems)(c) +} + +func (c BfInfoKey) Expansion() BfInfoSingleValueExpansion { + c.cs.s = append(c.cs.s, "EXPANSION") + return (BfInfoSingleValueExpansion)(c) +} + +func (c BfInfoKey) Build() Completed { + return Completed(c) +} + +func (c BfInfoKey) Cache() Cacheable { + return Cacheable(c) +} + +type BfInfoSingleValueCapacity Completed + +func (c BfInfoSingleValueCapacity) Build() Completed { + return Completed(c) +} + +func (c BfInfoSingleValueCapacity) Cache() Cacheable { + return Cacheable(c) +} + +type BfInfoSingleValueExpansion Completed + +func (c BfInfoSingleValueExpansion) Build() Completed { + return Completed(c) +} + +func (c BfInfoSingleValueExpansion) Cache() Cacheable { + return Cacheable(c) +} + +type BfInfoSingleValueFilters Completed + +func (c BfInfoSingleValueFilters) Build() Completed { + return Completed(c) +} + +func (c BfInfoSingleValueFilters) Cache() Cacheable { + return Cacheable(c) +} + +type BfInfoSingleValueItems Completed + +func (c BfInfoSingleValueItems) Build() Completed { + return Completed(c) +} + +func (c BfInfoSingleValueItems) Cache() Cacheable { + return Cacheable(c) +} + +type BfInfoSingleValueSize Completed + +func (c BfInfoSingleValueSize) Build() Completed { + return Completed(c) +} + +func (c BfInfoSingleValueSize) Cache() Cacheable { + return Cacheable(c) +} + +type BfInsert Completed + +func (b Builder) BfInsert() (c BfInsert) { + c = BfInsert{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BF.INSERT") + return c +} + +func (c BfInsert) Key(key string) BfInsertKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfInsertKey)(c) +} + +type BfInsertCapacity Completed + +func (c BfInsertCapacity) Error(error float64) BfInsertError { + c.cs.s = append(c.cs.s, "ERROR", strconv.FormatFloat(error, 'f', -1, 64)) + return (BfInsertError)(c) +} + +func (c BfInsertCapacity) Expansion(expansion int64) BfInsertExpansion { + c.cs.s = append(c.cs.s, "EXPANSION", strconv.FormatInt(expansion, 10)) + return (BfInsertExpansion)(c) +} + +func (c BfInsertCapacity) Nocreate() BfInsertNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (BfInsertNocreate)(c) +} + +func (c BfInsertCapacity) Nonscaling() BfInsertNonscaling { + c.cs.s = append(c.cs.s, "NONSCALING") + return (BfInsertNonscaling)(c) +} + +func (c BfInsertCapacity) Items() BfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (BfInsertItems)(c) +} + +type BfInsertError Completed + +func (c BfInsertError) Expansion(expansion int64) BfInsertExpansion { + c.cs.s = append(c.cs.s, "EXPANSION", strconv.FormatInt(expansion, 10)) + return (BfInsertExpansion)(c) +} + +func (c BfInsertError) Nocreate() BfInsertNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (BfInsertNocreate)(c) +} + +func (c BfInsertError) Nonscaling() BfInsertNonscaling { + c.cs.s = append(c.cs.s, "NONSCALING") + return (BfInsertNonscaling)(c) +} + +func (c BfInsertError) Items() BfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (BfInsertItems)(c) +} + +type BfInsertExpansion Completed + +func (c BfInsertExpansion) Nocreate() BfInsertNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (BfInsertNocreate)(c) +} + +func (c BfInsertExpansion) Nonscaling() BfInsertNonscaling { + c.cs.s = append(c.cs.s, "NONSCALING") + return (BfInsertNonscaling)(c) +} + +func (c BfInsertExpansion) Items() BfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (BfInsertItems)(c) +} + +type BfInsertItem Completed + +func (c BfInsertItem) Item(item ...string) BfInsertItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c BfInsertItem) Build() Completed { + return Completed(c) +} + +type BfInsertItems Completed + +func (c BfInsertItems) Item(item ...string) BfInsertItem { + c.cs.s = append(c.cs.s, item...) + return (BfInsertItem)(c) +} + +type BfInsertKey Completed + +func (c BfInsertKey) Capacity(capacity int64) BfInsertCapacity { + c.cs.s = append(c.cs.s, "CAPACITY", strconv.FormatInt(capacity, 10)) + return (BfInsertCapacity)(c) +} + +func (c BfInsertKey) Error(error float64) BfInsertError { + c.cs.s = append(c.cs.s, "ERROR", strconv.FormatFloat(error, 'f', -1, 64)) + return (BfInsertError)(c) +} + +func (c BfInsertKey) Expansion(expansion int64) BfInsertExpansion { + c.cs.s = append(c.cs.s, "EXPANSION", strconv.FormatInt(expansion, 10)) + return (BfInsertExpansion)(c) +} + +func (c BfInsertKey) Nocreate() BfInsertNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (BfInsertNocreate)(c) +} + +func (c BfInsertKey) Nonscaling() BfInsertNonscaling { + c.cs.s = append(c.cs.s, "NONSCALING") + return (BfInsertNonscaling)(c) +} + +func (c BfInsertKey) Items() BfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (BfInsertItems)(c) +} + +type BfInsertNocreate Completed + +func (c BfInsertNocreate) Nonscaling() BfInsertNonscaling { + c.cs.s = append(c.cs.s, "NONSCALING") + return (BfInsertNonscaling)(c) +} + +func (c BfInsertNocreate) Items() BfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (BfInsertItems)(c) +} + +type BfInsertNonscaling Completed + +func (c BfInsertNonscaling) Items() BfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (BfInsertItems)(c) +} + +type BfLoadchunk Completed + +func (b Builder) BfLoadchunk() (c BfLoadchunk) { + c = BfLoadchunk{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BF.LOADCHUNK") + return c +} + +func (c BfLoadchunk) Key(key string) BfLoadchunkKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfLoadchunkKey)(c) +} + +type BfLoadchunkData Completed + +func (c BfLoadchunkData) Build() Completed { + return Completed(c) +} + +type BfLoadchunkIterator Completed + +func (c BfLoadchunkIterator) Data(data string) BfLoadchunkData { + c.cs.s = append(c.cs.s, data) + return (BfLoadchunkData)(c) +} + +type BfLoadchunkKey Completed + +func (c BfLoadchunkKey) Iterator(iterator int64) BfLoadchunkIterator { + c.cs.s = append(c.cs.s, strconv.FormatInt(iterator, 10)) + return (BfLoadchunkIterator)(c) +} + +type BfMadd Completed + +func (b Builder) BfMadd() (c BfMadd) { + c = BfMadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BF.MADD") + return c +} + +func (c BfMadd) Key(key string) BfMaddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfMaddKey)(c) +} + +type BfMaddItem Completed + +func (c BfMaddItem) Item(item ...string) BfMaddItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c BfMaddItem) Build() Completed { + return Completed(c) +} + +type BfMaddKey Completed + +func (c BfMaddKey) Item(item ...string) BfMaddItem { + c.cs.s = append(c.cs.s, item...) + return (BfMaddItem)(c) +} + +type BfMexists Completed + +func (b Builder) BfMexists() (c BfMexists) { + c = BfMexists{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "BF.MEXISTS") + return c +} + +func (c BfMexists) Key(key string) BfMexistsKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfMexistsKey)(c) +} + +type BfMexistsItem Completed + +func (c BfMexistsItem) Item(item ...string) BfMexistsItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c BfMexistsItem) Build() Completed { + return Completed(c) +} + +type BfMexistsKey Completed + +func (c BfMexistsKey) Item(item ...string) BfMexistsItem { + c.cs.s = append(c.cs.s, item...) + return (BfMexistsItem)(c) +} + +type BfReserve Completed + +func (b Builder) BfReserve() (c BfReserve) { + c = BfReserve{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BF.RESERVE") + return c +} + +func (c BfReserve) Key(key string) BfReserveKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfReserveKey)(c) +} + +type BfReserveCapacity Completed + +func (c BfReserveCapacity) Expansion(expansion int64) BfReserveExpansion { + c.cs.s = append(c.cs.s, "EXPANSION", strconv.FormatInt(expansion, 10)) + return (BfReserveExpansion)(c) +} + +func (c BfReserveCapacity) Nonscaling() BfReserveNonscaling { + c.cs.s = append(c.cs.s, "NONSCALING") + return (BfReserveNonscaling)(c) +} + +func (c BfReserveCapacity) Build() Completed { + return Completed(c) +} + +type BfReserveErrorRate Completed + +func (c BfReserveErrorRate) Capacity(capacity int64) BfReserveCapacity { + c.cs.s = append(c.cs.s, strconv.FormatInt(capacity, 10)) + return (BfReserveCapacity)(c) +} + +type BfReserveExpansion Completed + +func (c BfReserveExpansion) Nonscaling() BfReserveNonscaling { + c.cs.s = append(c.cs.s, "NONSCALING") + return (BfReserveNonscaling)(c) +} + +func (c BfReserveExpansion) Build() Completed { + return Completed(c) +} + +type BfReserveKey Completed + +func (c BfReserveKey) ErrorRate(errorRate float64) BfReserveErrorRate { + c.cs.s = append(c.cs.s, strconv.FormatFloat(errorRate, 'f', -1, 64)) + return (BfReserveErrorRate)(c) +} + +type BfReserveNonscaling Completed + +func (c BfReserveNonscaling) Build() Completed { + return Completed(c) +} + +type BfScandump Completed + +func (b Builder) BfScandump() (c BfScandump) { + c = BfScandump{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "BF.SCANDUMP") + return c +} + +func (c BfScandump) Key(key string) BfScandumpKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BfScandumpKey)(c) +} + +type BfScandumpIterator Completed + +func (c BfScandumpIterator) Build() Completed { + return Completed(c) +} + +type BfScandumpKey Completed + +func (c BfScandumpKey) Iterator(iterator int64) BfScandumpIterator { + c.cs.s = append(c.cs.s, strconv.FormatInt(iterator, 10)) + return (BfScandumpIterator)(c) +} + +type Bgrewriteaof Completed + +func (b Builder) Bgrewriteaof() (c Bgrewriteaof) { + c = Bgrewriteaof{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BGREWRITEAOF") + return c +} + +func (c Bgrewriteaof) Build() Completed { + return Completed(c) +} + +type Bgsave Completed + +func (b Builder) Bgsave() (c Bgsave) { + c = Bgsave{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BGSAVE") + return c +} + +func (c Bgsave) Schedule() BgsaveSchedule { + c.cs.s = append(c.cs.s, "SCHEDULE") + return (BgsaveSchedule)(c) +} + +func (c Bgsave) Build() Completed { + return Completed(c) +} + +type BgsaveSchedule Completed + +func (c BgsaveSchedule) Build() Completed { + return Completed(c) +} + +type Bitcount Completed + +func (b Builder) Bitcount() (c Bitcount) { + c = Bitcount{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "BITCOUNT") + return c +} + +func (c Bitcount) Key(key string) BitcountKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BitcountKey)(c) +} + +type BitcountIndexEnd Completed + +func (c BitcountIndexEnd) Byte() BitcountIndexIndexUnitByte { + c.cs.s = append(c.cs.s, "BYTE") + return (BitcountIndexIndexUnitByte)(c) +} + +func (c BitcountIndexEnd) Bit() BitcountIndexIndexUnitBit { + c.cs.s = append(c.cs.s, "BIT") + return (BitcountIndexIndexUnitBit)(c) +} + +func (c BitcountIndexEnd) Build() Completed { + return Completed(c) +} + +func (c BitcountIndexEnd) Cache() Cacheable { + return Cacheable(c) +} + +type BitcountIndexIndexUnitBit Completed + +func (c BitcountIndexIndexUnitBit) Build() Completed { + return Completed(c) +} + +func (c BitcountIndexIndexUnitBit) Cache() Cacheable { + return Cacheable(c) +} + +type BitcountIndexIndexUnitByte Completed + +func (c BitcountIndexIndexUnitByte) Build() Completed { + return Completed(c) +} + +func (c BitcountIndexIndexUnitByte) Cache() Cacheable { + return Cacheable(c) +} + +type BitcountIndexStart Completed + +func (c BitcountIndexStart) End(end int64) BitcountIndexEnd { + c.cs.s = append(c.cs.s, strconv.FormatInt(end, 10)) + return (BitcountIndexEnd)(c) +} + +type BitcountKey Completed + +func (c BitcountKey) Start(start int64) BitcountIndexStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (BitcountIndexStart)(c) +} + +func (c BitcountKey) Build() Completed { + return Completed(c) +} + +func (c BitcountKey) Cache() Cacheable { + return Cacheable(c) +} + +type Bitfield Completed + +func (b Builder) Bitfield() (c Bitfield) { + c = Bitfield{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BITFIELD") + return c +} + +func (c Bitfield) Key(key string) BitfieldKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BitfieldKey)(c) +} + +type BitfieldKey Completed + +func (c BitfieldKey) Get(encoding string, offset int64) BitfieldOperationGet { + c.cs.s = append(c.cs.s, "GET", encoding, strconv.FormatInt(offset, 10)) + return (BitfieldOperationGet)(c) +} + +func (c BitfieldKey) OverflowWrap() BitfieldOperationWriteOverflowWrap { + c.cs.s = append(c.cs.s, "OVERFLOW", "WRAP") + return (BitfieldOperationWriteOverflowWrap)(c) +} + +func (c BitfieldKey) OverflowSat() BitfieldOperationWriteOverflowSat { + c.cs.s = append(c.cs.s, "OVERFLOW", "SAT") + return (BitfieldOperationWriteOverflowSat)(c) +} + +func (c BitfieldKey) OverflowFail() BitfieldOperationWriteOverflowFail { + c.cs.s = append(c.cs.s, "OVERFLOW", "FAIL") + return (BitfieldOperationWriteOverflowFail)(c) +} + +func (c BitfieldKey) Set(encoding string, offset int64, value int64) BitfieldOperationWriteSetSet { + c.cs.s = append(c.cs.s, "SET", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(value, 10)) + return (BitfieldOperationWriteSetSet)(c) +} + +func (c BitfieldKey) Incrby(encoding string, offset int64, increment int64) BitfieldOperationWriteSetIncrby { + c.cs.s = append(c.cs.s, "INCRBY", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(increment, 10)) + return (BitfieldOperationWriteSetIncrby)(c) +} + +type BitfieldOperationGet Completed + +func (c BitfieldOperationGet) OverflowWrap() BitfieldOperationWriteOverflowWrap { + c.cs.s = append(c.cs.s, "OVERFLOW", "WRAP") + return (BitfieldOperationWriteOverflowWrap)(c) +} + +func (c BitfieldOperationGet) OverflowSat() BitfieldOperationWriteOverflowSat { + c.cs.s = append(c.cs.s, "OVERFLOW", "SAT") + return (BitfieldOperationWriteOverflowSat)(c) +} + +func (c BitfieldOperationGet) OverflowFail() BitfieldOperationWriteOverflowFail { + c.cs.s = append(c.cs.s, "OVERFLOW", "FAIL") + return (BitfieldOperationWriteOverflowFail)(c) +} + +func (c BitfieldOperationGet) Set(encoding string, offset int64, value int64) BitfieldOperationWriteSetSet { + c.cs.s = append(c.cs.s, "SET", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(value, 10)) + return (BitfieldOperationWriteSetSet)(c) +} + +func (c BitfieldOperationGet) Incrby(encoding string, offset int64, increment int64) BitfieldOperationWriteSetIncrby { + c.cs.s = append(c.cs.s, "INCRBY", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(increment, 10)) + return (BitfieldOperationWriteSetIncrby)(c) +} + +type BitfieldOperationWriteOverflowFail Completed + +func (c BitfieldOperationWriteOverflowFail) Set(encoding string, offset int64, value int64) BitfieldOperationWriteSetSet { + c.cs.s = append(c.cs.s, "SET", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(value, 10)) + return (BitfieldOperationWriteSetSet)(c) +} + +func (c BitfieldOperationWriteOverflowFail) Incrby(encoding string, offset int64, increment int64) BitfieldOperationWriteSetIncrby { + c.cs.s = append(c.cs.s, "INCRBY", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(increment, 10)) + return (BitfieldOperationWriteSetIncrby)(c) +} + +type BitfieldOperationWriteOverflowSat Completed + +func (c BitfieldOperationWriteOverflowSat) Set(encoding string, offset int64, value int64) BitfieldOperationWriteSetSet { + c.cs.s = append(c.cs.s, "SET", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(value, 10)) + return (BitfieldOperationWriteSetSet)(c) +} + +func (c BitfieldOperationWriteOverflowSat) Incrby(encoding string, offset int64, increment int64) BitfieldOperationWriteSetIncrby { + c.cs.s = append(c.cs.s, "INCRBY", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(increment, 10)) + return (BitfieldOperationWriteSetIncrby)(c) +} + +type BitfieldOperationWriteOverflowWrap Completed + +func (c BitfieldOperationWriteOverflowWrap) Set(encoding string, offset int64, value int64) BitfieldOperationWriteSetSet { + c.cs.s = append(c.cs.s, "SET", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(value, 10)) + return (BitfieldOperationWriteSetSet)(c) +} + +func (c BitfieldOperationWriteOverflowWrap) Incrby(encoding string, offset int64, increment int64) BitfieldOperationWriteSetIncrby { + c.cs.s = append(c.cs.s, "INCRBY", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(increment, 10)) + return (BitfieldOperationWriteSetIncrby)(c) +} + +type BitfieldOperationWriteSetIncrby Completed + +func (c BitfieldOperationWriteSetIncrby) Get(encoding string, offset int64) BitfieldOperationGet { + c.cs.s = append(c.cs.s, "GET", encoding, strconv.FormatInt(offset, 10)) + return (BitfieldOperationGet)(c) +} + +func (c BitfieldOperationWriteSetIncrby) OverflowWrap() BitfieldOperationWriteOverflowWrap { + c.cs.s = append(c.cs.s, "OVERFLOW", "WRAP") + return (BitfieldOperationWriteOverflowWrap)(c) +} + +func (c BitfieldOperationWriteSetIncrby) OverflowSat() BitfieldOperationWriteOverflowSat { + c.cs.s = append(c.cs.s, "OVERFLOW", "SAT") + return (BitfieldOperationWriteOverflowSat)(c) +} + +func (c BitfieldOperationWriteSetIncrby) OverflowFail() BitfieldOperationWriteOverflowFail { + c.cs.s = append(c.cs.s, "OVERFLOW", "FAIL") + return (BitfieldOperationWriteOverflowFail)(c) +} + +func (c BitfieldOperationWriteSetIncrby) Set(encoding string, offset int64, value int64) BitfieldOperationWriteSetSet { + c.cs.s = append(c.cs.s, "SET", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(value, 10)) + return (BitfieldOperationWriteSetSet)(c) +} + +func (c BitfieldOperationWriteSetIncrby) Incrby(encoding string, offset int64, increment int64) BitfieldOperationWriteSetIncrby { + c.cs.s = append(c.cs.s, "INCRBY", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(increment, 10)) + return c +} + +func (c BitfieldOperationWriteSetIncrby) Build() Completed { + return Completed(c) +} + +type BitfieldOperationWriteSetSet Completed + +func (c BitfieldOperationWriteSetSet) Incrby(encoding string, offset int64, increment int64) BitfieldOperationWriteSetIncrby { + c.cs.s = append(c.cs.s, "INCRBY", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(increment, 10)) + return (BitfieldOperationWriteSetIncrby)(c) +} + +func (c BitfieldOperationWriteSetSet) Get(encoding string, offset int64) BitfieldOperationGet { + c.cs.s = append(c.cs.s, "GET", encoding, strconv.FormatInt(offset, 10)) + return (BitfieldOperationGet)(c) +} + +func (c BitfieldOperationWriteSetSet) OverflowWrap() BitfieldOperationWriteOverflowWrap { + c.cs.s = append(c.cs.s, "OVERFLOW", "WRAP") + return (BitfieldOperationWriteOverflowWrap)(c) +} + +func (c BitfieldOperationWriteSetSet) OverflowSat() BitfieldOperationWriteOverflowSat { + c.cs.s = append(c.cs.s, "OVERFLOW", "SAT") + return (BitfieldOperationWriteOverflowSat)(c) +} + +func (c BitfieldOperationWriteSetSet) OverflowFail() BitfieldOperationWriteOverflowFail { + c.cs.s = append(c.cs.s, "OVERFLOW", "FAIL") + return (BitfieldOperationWriteOverflowFail)(c) +} + +func (c BitfieldOperationWriteSetSet) Set(encoding string, offset int64, value int64) BitfieldOperationWriteSetSet { + c.cs.s = append(c.cs.s, "SET", encoding, strconv.FormatInt(offset, 10), strconv.FormatInt(value, 10)) + return c +} + +func (c BitfieldOperationWriteSetSet) Build() Completed { + return Completed(c) +} + +type BitfieldRo Completed + +func (b Builder) BitfieldRo() (c BitfieldRo) { + c = BitfieldRo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "BITFIELD_RO") + return c +} + +func (c BitfieldRo) Key(key string) BitfieldRoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BitfieldRoKey)(c) +} + +type BitfieldRoGet Completed + +func (c BitfieldRoGet) Get(encoding string, offset int64) BitfieldRoGet { + c.cs.s = append(c.cs.s, "GET", encoding, strconv.FormatInt(offset, 10)) + return c +} + +func (c BitfieldRoGet) Build() Completed { + return Completed(c) +} + +func (c BitfieldRoGet) Cache() Cacheable { + return Cacheable(c) +} + +type BitfieldRoKey Completed + +func (c BitfieldRoKey) Get() BitfieldRoGet { + return (BitfieldRoGet)(c) +} + +type Bitop Completed + +func (b Builder) Bitop() (c Bitop) { + c = Bitop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "BITOP") + return c +} + +func (c Bitop) Operation(operation string) BitopOperation { + c.cs.s = append(c.cs.s, operation) + return (BitopOperation)(c) +} + +type BitopDestkey Completed + +func (c BitopDestkey) Key(key ...string) BitopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (BitopKey)(c) +} + +type BitopKey Completed + +func (c BitopKey) Key(key ...string) BitopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c BitopKey) Build() Completed { + return Completed(c) +} + +type BitopOperation Completed + +func (c BitopOperation) Destkey(destkey string) BitopDestkey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destkey) + } else { + c.ks = check(c.ks, slot(destkey)) + } + c.cs.s = append(c.cs.s, destkey) + return (BitopDestkey)(c) +} + +type Bitpos Completed + +func (b Builder) Bitpos() (c Bitpos) { + c = Bitpos{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "BITPOS") + return c +} + +func (c Bitpos) Key(key string) BitposKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (BitposKey)(c) +} + +type BitposBit Completed + +func (c BitposBit) Start(start int64) BitposIndexStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (BitposIndexStart)(c) +} + +func (c BitposBit) Build() Completed { + return Completed(c) +} + +func (c BitposBit) Cache() Cacheable { + return Cacheable(c) +} + +type BitposIndexEndIndexEnd Completed + +func (c BitposIndexEndIndexEnd) Byte() BitposIndexEndIndexIndexUnitByte { + c.cs.s = append(c.cs.s, "BYTE") + return (BitposIndexEndIndexIndexUnitByte)(c) +} + +func (c BitposIndexEndIndexEnd) Bit() BitposIndexEndIndexIndexUnitBit { + c.cs.s = append(c.cs.s, "BIT") + return (BitposIndexEndIndexIndexUnitBit)(c) +} + +func (c BitposIndexEndIndexEnd) Build() Completed { + return Completed(c) +} + +func (c BitposIndexEndIndexEnd) Cache() Cacheable { + return Cacheable(c) +} + +type BitposIndexEndIndexIndexUnitBit Completed + +func (c BitposIndexEndIndexIndexUnitBit) Build() Completed { + return Completed(c) +} + +func (c BitposIndexEndIndexIndexUnitBit) Cache() Cacheable { + return Cacheable(c) +} + +type BitposIndexEndIndexIndexUnitByte Completed + +func (c BitposIndexEndIndexIndexUnitByte) Build() Completed { + return Completed(c) +} + +func (c BitposIndexEndIndexIndexUnitByte) Cache() Cacheable { + return Cacheable(c) +} + +type BitposIndexStart Completed + +func (c BitposIndexStart) End(end int64) BitposIndexEndIndexEnd { + c.cs.s = append(c.cs.s, strconv.FormatInt(end, 10)) + return (BitposIndexEndIndexEnd)(c) +} + +func (c BitposIndexStart) Build() Completed { + return Completed(c) +} + +func (c BitposIndexStart) Cache() Cacheable { + return Cacheable(c) +} + +type BitposKey Completed + +func (c BitposKey) Bit(bit int64) BitposBit { + c.cs.s = append(c.cs.s, strconv.FormatInt(bit, 10)) + return (BitposBit)(c) +} + +type Blmove Completed + +func (b Builder) Blmove() (c Blmove) { + c = Blmove{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BLMOVE") + return c +} + +func (c Blmove) Source(source string) BlmoveSource { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(source) + } else { + c.ks = check(c.ks, slot(source)) + } + c.cs.s = append(c.cs.s, source) + return (BlmoveSource)(c) +} + +type BlmoveDestination Completed + +func (c BlmoveDestination) Left() BlmoveWherefromLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (BlmoveWherefromLeft)(c) +} + +func (c BlmoveDestination) Right() BlmoveWherefromRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (BlmoveWherefromRight)(c) +} + +type BlmoveSource Completed + +func (c BlmoveSource) Destination(destination string) BlmoveDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (BlmoveDestination)(c) +} + +type BlmoveTimeout Completed + +func (c BlmoveTimeout) Build() Completed { + return Completed(c) +} + +type BlmoveWherefromLeft Completed + +func (c BlmoveWherefromLeft) Left() BlmoveWheretoLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (BlmoveWheretoLeft)(c) +} + +func (c BlmoveWherefromLeft) Right() BlmoveWheretoRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (BlmoveWheretoRight)(c) +} + +type BlmoveWherefromRight Completed + +func (c BlmoveWherefromRight) Left() BlmoveWheretoLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (BlmoveWheretoLeft)(c) +} + +func (c BlmoveWherefromRight) Right() BlmoveWheretoRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (BlmoveWheretoRight)(c) +} + +type BlmoveWheretoLeft Completed + +func (c BlmoveWheretoLeft) Timeout(timeout float64) BlmoveTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BlmoveTimeout)(c) +} + +type BlmoveWheretoRight Completed + +func (c BlmoveWheretoRight) Timeout(timeout float64) BlmoveTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BlmoveTimeout)(c) +} + +type Blmpop Completed + +func (b Builder) Blmpop() (c Blmpop) { + c = Blmpop{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BLMPOP") + return c +} + +func (c Blmpop) Timeout(timeout float64) BlmpopTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BlmpopTimeout)(c) +} + +type BlmpopCount Completed + +func (c BlmpopCount) Build() Completed { + return Completed(c) +} + +type BlmpopKey Completed + +func (c BlmpopKey) Key(key ...string) BlmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c BlmpopKey) Left() BlmpopWhereLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (BlmpopWhereLeft)(c) +} + +func (c BlmpopKey) Right() BlmpopWhereRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (BlmpopWhereRight)(c) +} + +type BlmpopNumkeys Completed + +func (c BlmpopNumkeys) Key(key ...string) BlmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (BlmpopKey)(c) +} + +type BlmpopTimeout Completed + +func (c BlmpopTimeout) Numkeys(numkeys int64) BlmpopNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (BlmpopNumkeys)(c) +} + +type BlmpopWhereLeft Completed + +func (c BlmpopWhereLeft) Count(count int64) BlmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (BlmpopCount)(c) +} + +func (c BlmpopWhereLeft) Build() Completed { + return Completed(c) +} + +type BlmpopWhereRight Completed + +func (c BlmpopWhereRight) Count(count int64) BlmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (BlmpopCount)(c) +} + +func (c BlmpopWhereRight) Build() Completed { + return Completed(c) +} + +type Blpop Completed + +func (b Builder) Blpop() (c Blpop) { + c = Blpop{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BLPOP") + return c +} + +func (c Blpop) Key(key ...string) BlpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (BlpopKey)(c) +} + +type BlpopKey Completed + +func (c BlpopKey) Key(key ...string) BlpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c BlpopKey) Timeout(timeout float64) BlpopTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BlpopTimeout)(c) +} + +type BlpopTimeout Completed + +func (c BlpopTimeout) Build() Completed { + return Completed(c) +} + +type Brpop Completed + +func (b Builder) Brpop() (c Brpop) { + c = Brpop{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BRPOP") + return c +} + +func (c Brpop) Key(key ...string) BrpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (BrpopKey)(c) +} + +type BrpopKey Completed + +func (c BrpopKey) Key(key ...string) BrpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c BrpopKey) Timeout(timeout float64) BrpopTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BrpopTimeout)(c) +} + +type BrpopTimeout Completed + +func (c BrpopTimeout) Build() Completed { + return Completed(c) +} + +type Brpoplpush Completed + +func (b Builder) Brpoplpush() (c Brpoplpush) { + c = Brpoplpush{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BRPOPLPUSH") + return c +} + +func (c Brpoplpush) Source(source string) BrpoplpushSource { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(source) + } else { + c.ks = check(c.ks, slot(source)) + } + c.cs.s = append(c.cs.s, source) + return (BrpoplpushSource)(c) +} + +type BrpoplpushDestination Completed + +func (c BrpoplpushDestination) Timeout(timeout float64) BrpoplpushTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BrpoplpushTimeout)(c) +} + +type BrpoplpushSource Completed + +func (c BrpoplpushSource) Destination(destination string) BrpoplpushDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (BrpoplpushDestination)(c) +} + +type BrpoplpushTimeout Completed + +func (c BrpoplpushTimeout) Build() Completed { + return Completed(c) +} + +type Bzmpop Completed + +func (b Builder) Bzmpop() (c Bzmpop) { + c = Bzmpop{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BZMPOP") + return c +} + +func (c Bzmpop) Timeout(timeout float64) BzmpopTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BzmpopTimeout)(c) +} + +type BzmpopCount Completed + +func (c BzmpopCount) Build() Completed { + return Completed(c) +} + +type BzmpopKey Completed + +func (c BzmpopKey) Key(key ...string) BzmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c BzmpopKey) Min() BzmpopWhereMin { + c.cs.s = append(c.cs.s, "MIN") + return (BzmpopWhereMin)(c) +} + +func (c BzmpopKey) Max() BzmpopWhereMax { + c.cs.s = append(c.cs.s, "MAX") + return (BzmpopWhereMax)(c) +} + +type BzmpopNumkeys Completed + +func (c BzmpopNumkeys) Key(key ...string) BzmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (BzmpopKey)(c) +} + +type BzmpopTimeout Completed + +func (c BzmpopTimeout) Numkeys(numkeys int64) BzmpopNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (BzmpopNumkeys)(c) +} + +type BzmpopWhereMax Completed + +func (c BzmpopWhereMax) Count(count int64) BzmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (BzmpopCount)(c) +} + +func (c BzmpopWhereMax) Build() Completed { + return Completed(c) +} + +type BzmpopWhereMin Completed + +func (c BzmpopWhereMin) Count(count int64) BzmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (BzmpopCount)(c) +} + +func (c BzmpopWhereMin) Build() Completed { + return Completed(c) +} + +type Bzpopmax Completed + +func (b Builder) Bzpopmax() (c Bzpopmax) { + c = Bzpopmax{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BZPOPMAX") + return c +} + +func (c Bzpopmax) Key(key ...string) BzpopmaxKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (BzpopmaxKey)(c) +} + +type BzpopmaxKey Completed + +func (c BzpopmaxKey) Key(key ...string) BzpopmaxKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c BzpopmaxKey) Timeout(timeout float64) BzpopmaxTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BzpopmaxTimeout)(c) +} + +type BzpopmaxTimeout Completed + +func (c BzpopmaxTimeout) Build() Completed { + return Completed(c) +} + +type Bzpopmin Completed + +func (b Builder) Bzpopmin() (c Bzpopmin) { + c = Bzpopmin{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "BZPOPMIN") + return c +} + +func (c Bzpopmin) Key(key ...string) BzpopminKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (BzpopminKey)(c) +} + +type BzpopminKey Completed + +func (c BzpopminKey) Key(key ...string) BzpopminKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c BzpopminKey) Timeout(timeout float64) BzpopminTimeout { + c.cs.s = append(c.cs.s, strconv.FormatFloat(timeout, 'f', -1, 64)) + return (BzpopminTimeout)(c) +} + +type BzpopminTimeout Completed + +func (c BzpopminTimeout) Build() Completed { + return Completed(c) +} + +type CfAdd Completed + +func (b Builder) CfAdd() (c CfAdd) { + c = CfAdd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.ADD") + return c +} + +func (c CfAdd) Key(key string) CfAddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfAddKey)(c) +} + +type CfAddItem Completed + +func (c CfAddItem) Build() Completed { + return Completed(c) +} + +type CfAddKey Completed + +func (c CfAddKey) Item(item string) CfAddItem { + c.cs.s = append(c.cs.s, item) + return (CfAddItem)(c) +} + +type CfAddnx Completed + +func (b Builder) CfAddnx() (c CfAddnx) { + c = CfAddnx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.ADDNX") + return c +} + +func (c CfAddnx) Key(key string) CfAddnxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfAddnxKey)(c) +} + +type CfAddnxItem Completed + +func (c CfAddnxItem) Build() Completed { + return Completed(c) +} + +type CfAddnxKey Completed + +func (c CfAddnxKey) Item(item string) CfAddnxItem { + c.cs.s = append(c.cs.s, item) + return (CfAddnxItem)(c) +} + +type CfCount Completed + +func (b Builder) CfCount() (c CfCount) { + c = CfCount{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "CF.COUNT") + return c +} + +func (c CfCount) Key(key string) CfCountKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfCountKey)(c) +} + +type CfCountItem Completed + +func (c CfCountItem) Build() Completed { + return Completed(c) +} + +func (c CfCountItem) Cache() Cacheable { + return Cacheable(c) +} + +type CfCountKey Completed + +func (c CfCountKey) Item(item string) CfCountItem { + c.cs.s = append(c.cs.s, item) + return (CfCountItem)(c) +} + +type CfDel Completed + +func (b Builder) CfDel() (c CfDel) { + c = CfDel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.DEL") + return c +} + +func (c CfDel) Key(key string) CfDelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfDelKey)(c) +} + +type CfDelItem Completed + +func (c CfDelItem) Build() Completed { + return Completed(c) +} + +type CfDelKey Completed + +func (c CfDelKey) Item(item string) CfDelItem { + c.cs.s = append(c.cs.s, item) + return (CfDelItem)(c) +} + +type CfExists Completed + +func (b Builder) CfExists() (c CfExists) { + c = CfExists{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "CF.EXISTS") + return c +} + +func (c CfExists) Key(key string) CfExistsKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfExistsKey)(c) +} + +type CfExistsItem Completed + +func (c CfExistsItem) Build() Completed { + return Completed(c) +} + +func (c CfExistsItem) Cache() Cacheable { + return Cacheable(c) +} + +type CfExistsKey Completed + +func (c CfExistsKey) Item(item string) CfExistsItem { + c.cs.s = append(c.cs.s, item) + return (CfExistsItem)(c) +} + +type CfInfo Completed + +func (b Builder) CfInfo() (c CfInfo) { + c = CfInfo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "CF.INFO") + return c +} + +func (c CfInfo) Key(key string) CfInfoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfInfoKey)(c) +} + +type CfInfoKey Completed + +func (c CfInfoKey) Build() Completed { + return Completed(c) +} + +func (c CfInfoKey) Cache() Cacheable { + return Cacheable(c) +} + +type CfInsert Completed + +func (b Builder) CfInsert() (c CfInsert) { + c = CfInsert{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.INSERT") + return c +} + +func (c CfInsert) Key(key string) CfInsertKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfInsertKey)(c) +} + +type CfInsertCapacity Completed + +func (c CfInsertCapacity) Nocreate() CfInsertNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (CfInsertNocreate)(c) +} + +func (c CfInsertCapacity) Items() CfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (CfInsertItems)(c) +} + +type CfInsertItem Completed + +func (c CfInsertItem) Item(item ...string) CfInsertItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c CfInsertItem) Build() Completed { + return Completed(c) +} + +type CfInsertItems Completed + +func (c CfInsertItems) Item(item ...string) CfInsertItem { + c.cs.s = append(c.cs.s, item...) + return (CfInsertItem)(c) +} + +type CfInsertKey Completed + +func (c CfInsertKey) Capacity(capacity int64) CfInsertCapacity { + c.cs.s = append(c.cs.s, "CAPACITY", strconv.FormatInt(capacity, 10)) + return (CfInsertCapacity)(c) +} + +func (c CfInsertKey) Nocreate() CfInsertNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (CfInsertNocreate)(c) +} + +func (c CfInsertKey) Items() CfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (CfInsertItems)(c) +} + +type CfInsertNocreate Completed + +func (c CfInsertNocreate) Items() CfInsertItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (CfInsertItems)(c) +} + +type CfInsertnx Completed + +func (b Builder) CfInsertnx() (c CfInsertnx) { + c = CfInsertnx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.INSERTNX") + return c +} + +func (c CfInsertnx) Key(key string) CfInsertnxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfInsertnxKey)(c) +} + +type CfInsertnxCapacity Completed + +func (c CfInsertnxCapacity) Nocreate() CfInsertnxNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (CfInsertnxNocreate)(c) +} + +func (c CfInsertnxCapacity) Items() CfInsertnxItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (CfInsertnxItems)(c) +} + +type CfInsertnxItem Completed + +func (c CfInsertnxItem) Item(item ...string) CfInsertnxItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c CfInsertnxItem) Build() Completed { + return Completed(c) +} + +type CfInsertnxItems Completed + +func (c CfInsertnxItems) Item(item ...string) CfInsertnxItem { + c.cs.s = append(c.cs.s, item...) + return (CfInsertnxItem)(c) +} + +type CfInsertnxKey Completed + +func (c CfInsertnxKey) Capacity(capacity int64) CfInsertnxCapacity { + c.cs.s = append(c.cs.s, "CAPACITY", strconv.FormatInt(capacity, 10)) + return (CfInsertnxCapacity)(c) +} + +func (c CfInsertnxKey) Nocreate() CfInsertnxNocreate { + c.cs.s = append(c.cs.s, "NOCREATE") + return (CfInsertnxNocreate)(c) +} + +func (c CfInsertnxKey) Items() CfInsertnxItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (CfInsertnxItems)(c) +} + +type CfInsertnxNocreate Completed + +func (c CfInsertnxNocreate) Items() CfInsertnxItems { + c.cs.s = append(c.cs.s, "ITEMS") + return (CfInsertnxItems)(c) +} + +type CfLoadchunk Completed + +func (b Builder) CfLoadchunk() (c CfLoadchunk) { + c = CfLoadchunk{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.LOADCHUNK") + return c +} + +func (c CfLoadchunk) Key(key string) CfLoadchunkKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfLoadchunkKey)(c) +} + +type CfLoadchunkData Completed + +func (c CfLoadchunkData) Build() Completed { + return Completed(c) +} + +type CfLoadchunkIterator Completed + +func (c CfLoadchunkIterator) Data(data string) CfLoadchunkData { + c.cs.s = append(c.cs.s, data) + return (CfLoadchunkData)(c) +} + +type CfLoadchunkKey Completed + +func (c CfLoadchunkKey) Iterator(iterator int64) CfLoadchunkIterator { + c.cs.s = append(c.cs.s, strconv.FormatInt(iterator, 10)) + return (CfLoadchunkIterator)(c) +} + +type CfMexists Completed + +func (b Builder) CfMexists() (c CfMexists) { + c = CfMexists{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.MEXISTS") + return c +} + +func (c CfMexists) Key(key string) CfMexistsKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfMexistsKey)(c) +} + +type CfMexistsItem Completed + +func (c CfMexistsItem) Item(item ...string) CfMexistsItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c CfMexistsItem) Build() Completed { + return Completed(c) +} + +type CfMexistsKey Completed + +func (c CfMexistsKey) Item(item ...string) CfMexistsItem { + c.cs.s = append(c.cs.s, item...) + return (CfMexistsItem)(c) +} + +type CfReserve Completed + +func (b Builder) CfReserve() (c CfReserve) { + c = CfReserve{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CF.RESERVE") + return c +} + +func (c CfReserve) Key(key string) CfReserveKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfReserveKey)(c) +} + +type CfReserveBucketsize Completed + +func (c CfReserveBucketsize) Maxiterations(maxiterations int64) CfReserveMaxiterations { + c.cs.s = append(c.cs.s, "MAXITERATIONS", strconv.FormatInt(maxiterations, 10)) + return (CfReserveMaxiterations)(c) +} + +func (c CfReserveBucketsize) Expansion(expansion int64) CfReserveExpansion { + c.cs.s = append(c.cs.s, "EXPANSION", strconv.FormatInt(expansion, 10)) + return (CfReserveExpansion)(c) +} + +func (c CfReserveBucketsize) Build() Completed { + return Completed(c) +} + +type CfReserveCapacity Completed + +func (c CfReserveCapacity) Bucketsize(bucketsize int64) CfReserveBucketsize { + c.cs.s = append(c.cs.s, "BUCKETSIZE", strconv.FormatInt(bucketsize, 10)) + return (CfReserveBucketsize)(c) +} + +func (c CfReserveCapacity) Maxiterations(maxiterations int64) CfReserveMaxiterations { + c.cs.s = append(c.cs.s, "MAXITERATIONS", strconv.FormatInt(maxiterations, 10)) + return (CfReserveMaxiterations)(c) +} + +func (c CfReserveCapacity) Expansion(expansion int64) CfReserveExpansion { + c.cs.s = append(c.cs.s, "EXPANSION", strconv.FormatInt(expansion, 10)) + return (CfReserveExpansion)(c) +} + +func (c CfReserveCapacity) Build() Completed { + return Completed(c) +} + +type CfReserveExpansion Completed + +func (c CfReserveExpansion) Build() Completed { + return Completed(c) +} + +type CfReserveKey Completed + +func (c CfReserveKey) Capacity(capacity int64) CfReserveCapacity { + c.cs.s = append(c.cs.s, strconv.FormatInt(capacity, 10)) + return (CfReserveCapacity)(c) +} + +type CfReserveMaxiterations Completed + +func (c CfReserveMaxiterations) Expansion(expansion int64) CfReserveExpansion { + c.cs.s = append(c.cs.s, "EXPANSION", strconv.FormatInt(expansion, 10)) + return (CfReserveExpansion)(c) +} + +func (c CfReserveMaxiterations) Build() Completed { + return Completed(c) +} + +type CfScandump Completed + +func (b Builder) CfScandump() (c CfScandump) { + c = CfScandump{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "CF.SCANDUMP") + return c +} + +func (c CfScandump) Key(key string) CfScandumpKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CfScandumpKey)(c) +} + +type CfScandumpIterator Completed + +func (c CfScandumpIterator) Build() Completed { + return Completed(c) +} + +type CfScandumpKey Completed + +func (c CfScandumpKey) Iterator(iterator int64) CfScandumpIterator { + c.cs.s = append(c.cs.s, strconv.FormatInt(iterator, 10)) + return (CfScandumpIterator)(c) +} + +type ClientCaching Completed + +func (b Builder) ClientCaching() (c ClientCaching) { + c = ClientCaching{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "CACHING") + return c +} + +func (c ClientCaching) Yes() ClientCachingModeYes { + c.cs.s = append(c.cs.s, "YES") + return (ClientCachingModeYes)(c) +} + +func (c ClientCaching) No() ClientCachingModeNo { + c.cs.s = append(c.cs.s, "NO") + return (ClientCachingModeNo)(c) +} + +type ClientCachingModeNo Completed + +func (c ClientCachingModeNo) Build() Completed { + return Completed(c) +} + +type ClientCachingModeYes Completed + +func (c ClientCachingModeYes) Build() Completed { + return Completed(c) +} + +type ClientGetname Completed + +func (b Builder) ClientGetname() (c ClientGetname) { + c = ClientGetname{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "GETNAME") + return c +} + +func (c ClientGetname) Build() Completed { + return Completed(c) +} + +type ClientGetredir Completed + +func (b Builder) ClientGetredir() (c ClientGetredir) { + c = ClientGetredir{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "GETREDIR") + return c +} + +func (c ClientGetredir) Build() Completed { + return Completed(c) +} + +type ClientId Completed + +func (b Builder) ClientId() (c ClientId) { + c = ClientId{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "ID") + return c +} + +func (c ClientId) Build() Completed { + return Completed(c) +} + +type ClientInfo Completed + +func (b Builder) ClientInfo() (c ClientInfo) { + c = ClientInfo{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "INFO") + return c +} + +func (c ClientInfo) Build() Completed { + return Completed(c) +} + +type ClientKill Completed + +func (b Builder) ClientKill() (c ClientKill) { + c = ClientKill{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "KILL") + return c +} + +func (c ClientKill) IpPort(ipPort string) ClientKillIpPort { + c.cs.s = append(c.cs.s, ipPort) + return (ClientKillIpPort)(c) +} + +func (c ClientKill) Id(clientId int64) ClientKillId { + c.cs.s = append(c.cs.s, "ID", strconv.FormatInt(clientId, 10)) + return (ClientKillId)(c) +} + +func (c ClientKill) TypeNormal() ClientKillTypeNormal { + c.cs.s = append(c.cs.s, "TYPE", "NORMAL") + return (ClientKillTypeNormal)(c) +} + +func (c ClientKill) TypeMaster() ClientKillTypeMaster { + c.cs.s = append(c.cs.s, "TYPE", "MASTER") + return (ClientKillTypeMaster)(c) +} + +func (c ClientKill) TypeReplica() ClientKillTypeReplica { + c.cs.s = append(c.cs.s, "TYPE", "REPLICA") + return (ClientKillTypeReplica)(c) +} + +func (c ClientKill) TypePubsub() ClientKillTypePubsub { + c.cs.s = append(c.cs.s, "TYPE", "PUBSUB") + return (ClientKillTypePubsub)(c) +} + +func (c ClientKill) User(username string) ClientKillUser { + c.cs.s = append(c.cs.s, "USER", username) + return (ClientKillUser)(c) +} + +func (c ClientKill) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKill) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKill) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKill) Build() Completed { + return Completed(c) +} + +type ClientKillAddr Completed + +func (c ClientKillAddr) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillAddr) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillAddr) Build() Completed { + return Completed(c) +} + +type ClientKillId Completed + +func (c ClientKillId) TypeNormal() ClientKillTypeNormal { + c.cs.s = append(c.cs.s, "TYPE", "NORMAL") + return (ClientKillTypeNormal)(c) +} + +func (c ClientKillId) TypeMaster() ClientKillTypeMaster { + c.cs.s = append(c.cs.s, "TYPE", "MASTER") + return (ClientKillTypeMaster)(c) +} + +func (c ClientKillId) TypeReplica() ClientKillTypeReplica { + c.cs.s = append(c.cs.s, "TYPE", "REPLICA") + return (ClientKillTypeReplica)(c) +} + +func (c ClientKillId) TypePubsub() ClientKillTypePubsub { + c.cs.s = append(c.cs.s, "TYPE", "PUBSUB") + return (ClientKillTypePubsub)(c) +} + +func (c ClientKillId) User(username string) ClientKillUser { + c.cs.s = append(c.cs.s, "USER", username) + return (ClientKillUser)(c) +} + +func (c ClientKillId) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKillId) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillId) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillId) Build() Completed { + return Completed(c) +} + +type ClientKillIpPort Completed + +func (c ClientKillIpPort) Id(clientId int64) ClientKillId { + c.cs.s = append(c.cs.s, "ID", strconv.FormatInt(clientId, 10)) + return (ClientKillId)(c) +} + +func (c ClientKillIpPort) TypeNormal() ClientKillTypeNormal { + c.cs.s = append(c.cs.s, "TYPE", "NORMAL") + return (ClientKillTypeNormal)(c) +} + +func (c ClientKillIpPort) TypeMaster() ClientKillTypeMaster { + c.cs.s = append(c.cs.s, "TYPE", "MASTER") + return (ClientKillTypeMaster)(c) +} + +func (c ClientKillIpPort) TypeReplica() ClientKillTypeReplica { + c.cs.s = append(c.cs.s, "TYPE", "REPLICA") + return (ClientKillTypeReplica)(c) +} + +func (c ClientKillIpPort) TypePubsub() ClientKillTypePubsub { + c.cs.s = append(c.cs.s, "TYPE", "PUBSUB") + return (ClientKillTypePubsub)(c) +} + +func (c ClientKillIpPort) User(username string) ClientKillUser { + c.cs.s = append(c.cs.s, "USER", username) + return (ClientKillUser)(c) +} + +func (c ClientKillIpPort) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKillIpPort) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillIpPort) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillIpPort) Build() Completed { + return Completed(c) +} + +type ClientKillLaddr Completed + +func (c ClientKillLaddr) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillLaddr) Build() Completed { + return Completed(c) +} + +type ClientKillSkipme Completed + +func (c ClientKillSkipme) Build() Completed { + return Completed(c) +} + +type ClientKillTypeMaster Completed + +func (c ClientKillTypeMaster) User(username string) ClientKillUser { + c.cs.s = append(c.cs.s, "USER", username) + return (ClientKillUser)(c) +} + +func (c ClientKillTypeMaster) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKillTypeMaster) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillTypeMaster) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillTypeMaster) Build() Completed { + return Completed(c) +} + +type ClientKillTypeNormal Completed + +func (c ClientKillTypeNormal) User(username string) ClientKillUser { + c.cs.s = append(c.cs.s, "USER", username) + return (ClientKillUser)(c) +} + +func (c ClientKillTypeNormal) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKillTypeNormal) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillTypeNormal) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillTypeNormal) Build() Completed { + return Completed(c) +} + +type ClientKillTypePubsub Completed + +func (c ClientKillTypePubsub) User(username string) ClientKillUser { + c.cs.s = append(c.cs.s, "USER", username) + return (ClientKillUser)(c) +} + +func (c ClientKillTypePubsub) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKillTypePubsub) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillTypePubsub) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillTypePubsub) Build() Completed { + return Completed(c) +} + +type ClientKillTypeReplica Completed + +func (c ClientKillTypeReplica) User(username string) ClientKillUser { + c.cs.s = append(c.cs.s, "USER", username) + return (ClientKillUser)(c) +} + +func (c ClientKillTypeReplica) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKillTypeReplica) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillTypeReplica) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillTypeReplica) Build() Completed { + return Completed(c) +} + +type ClientKillUser Completed + +func (c ClientKillUser) Addr(ipPort string) ClientKillAddr { + c.cs.s = append(c.cs.s, "ADDR", ipPort) + return (ClientKillAddr)(c) +} + +func (c ClientKillUser) Laddr(ipPort string) ClientKillLaddr { + c.cs.s = append(c.cs.s, "LADDR", ipPort) + return (ClientKillLaddr)(c) +} + +func (c ClientKillUser) Skipme(yesNo string) ClientKillSkipme { + c.cs.s = append(c.cs.s, "SKIPME", yesNo) + return (ClientKillSkipme)(c) +} + +func (c ClientKillUser) Build() Completed { + return Completed(c) +} + +type ClientList Completed + +func (b Builder) ClientList() (c ClientList) { + c = ClientList{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "LIST") + return c +} + +func (c ClientList) TypeNormal() ClientListTypeNormal { + c.cs.s = append(c.cs.s, "TYPE", "NORMAL") + return (ClientListTypeNormal)(c) +} + +func (c ClientList) TypeMaster() ClientListTypeMaster { + c.cs.s = append(c.cs.s, "TYPE", "MASTER") + return (ClientListTypeMaster)(c) +} + +func (c ClientList) TypeReplica() ClientListTypeReplica { + c.cs.s = append(c.cs.s, "TYPE", "REPLICA") + return (ClientListTypeReplica)(c) +} + +func (c ClientList) TypePubsub() ClientListTypePubsub { + c.cs.s = append(c.cs.s, "TYPE", "PUBSUB") + return (ClientListTypePubsub)(c) +} + +func (c ClientList) Id() ClientListIdId { + c.cs.s = append(c.cs.s, "ID") + return (ClientListIdId)(c) +} + +func (c ClientList) Build() Completed { + return Completed(c) +} + +type ClientListIdClientId Completed + +func (c ClientListIdClientId) ClientId(clientId ...int64) ClientListIdClientId { + for _, n := range clientId { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c ClientListIdClientId) Build() Completed { + return Completed(c) +} + +type ClientListIdId Completed + +func (c ClientListIdId) ClientId(clientId ...int64) ClientListIdClientId { + for _, n := range clientId { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (ClientListIdClientId)(c) +} + +type ClientListTypeMaster Completed + +func (c ClientListTypeMaster) Id() ClientListIdId { + c.cs.s = append(c.cs.s, "ID") + return (ClientListIdId)(c) +} + +func (c ClientListTypeMaster) Build() Completed { + return Completed(c) +} + +type ClientListTypeNormal Completed + +func (c ClientListTypeNormal) Id() ClientListIdId { + c.cs.s = append(c.cs.s, "ID") + return (ClientListIdId)(c) +} + +func (c ClientListTypeNormal) Build() Completed { + return Completed(c) +} + +type ClientListTypePubsub Completed + +func (c ClientListTypePubsub) Id() ClientListIdId { + c.cs.s = append(c.cs.s, "ID") + return (ClientListIdId)(c) +} + +func (c ClientListTypePubsub) Build() Completed { + return Completed(c) +} + +type ClientListTypeReplica Completed + +func (c ClientListTypeReplica) Id() ClientListIdId { + c.cs.s = append(c.cs.s, "ID") + return (ClientListIdId)(c) +} + +func (c ClientListTypeReplica) Build() Completed { + return Completed(c) +} + +type ClientNoEvict Completed + +func (b Builder) ClientNoEvict() (c ClientNoEvict) { + c = ClientNoEvict{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "NO-EVICT") + return c +} + +func (c ClientNoEvict) On() ClientNoEvictEnabledOn { + c.cs.s = append(c.cs.s, "ON") + return (ClientNoEvictEnabledOn)(c) +} + +func (c ClientNoEvict) Off() ClientNoEvictEnabledOff { + c.cs.s = append(c.cs.s, "OFF") + return (ClientNoEvictEnabledOff)(c) +} + +type ClientNoEvictEnabledOff Completed + +func (c ClientNoEvictEnabledOff) Build() Completed { + return Completed(c) +} + +type ClientNoEvictEnabledOn Completed + +func (c ClientNoEvictEnabledOn) Build() Completed { + return Completed(c) +} + +type ClientPause Completed + +func (b Builder) ClientPause() (c ClientPause) { + c = ClientPause{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "CLIENT", "PAUSE") + return c +} + +func (c ClientPause) Timeout(timeout int64) ClientPauseTimeout { + c.cs.s = append(c.cs.s, strconv.FormatInt(timeout, 10)) + return (ClientPauseTimeout)(c) +} + +type ClientPauseModeAll Completed + +func (c ClientPauseModeAll) Build() Completed { + return Completed(c) +} + +type ClientPauseModeWrite Completed + +func (c ClientPauseModeWrite) Build() Completed { + return Completed(c) +} + +type ClientPauseTimeout Completed + +func (c ClientPauseTimeout) Write() ClientPauseModeWrite { + c.cs.s = append(c.cs.s, "WRITE") + return (ClientPauseModeWrite)(c) +} + +func (c ClientPauseTimeout) All() ClientPauseModeAll { + c.cs.s = append(c.cs.s, "ALL") + return (ClientPauseModeAll)(c) +} + +func (c ClientPauseTimeout) Build() Completed { + return Completed(c) +} + +type ClientReply Completed + +func (b Builder) ClientReply() (c ClientReply) { + c = ClientReply{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "REPLY") + return c +} + +func (c ClientReply) On() ClientReplyReplyModeOn { + c.cs.s = append(c.cs.s, "ON") + return (ClientReplyReplyModeOn)(c) +} + +func (c ClientReply) Off() ClientReplyReplyModeOff { + c.cs.s = append(c.cs.s, "OFF") + return (ClientReplyReplyModeOff)(c) +} + +func (c ClientReply) Skip() ClientReplyReplyModeSkip { + c.cs.s = append(c.cs.s, "SKIP") + return (ClientReplyReplyModeSkip)(c) +} + +type ClientReplyReplyModeOff Completed + +func (c ClientReplyReplyModeOff) Build() Completed { + return Completed(c) +} + +type ClientReplyReplyModeOn Completed + +func (c ClientReplyReplyModeOn) Build() Completed { + return Completed(c) +} + +type ClientReplyReplyModeSkip Completed + +func (c ClientReplyReplyModeSkip) Build() Completed { + return Completed(c) +} + +type ClientSetname Completed + +func (b Builder) ClientSetname() (c ClientSetname) { + c = ClientSetname{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "SETNAME") + return c +} + +func (c ClientSetname) ConnectionName(connectionName string) ClientSetnameConnectionName { + c.cs.s = append(c.cs.s, connectionName) + return (ClientSetnameConnectionName)(c) +} + +type ClientSetnameConnectionName Completed + +func (c ClientSetnameConnectionName) Build() Completed { + return Completed(c) +} + +type ClientTracking Completed + +func (b Builder) ClientTracking() (c ClientTracking) { + c = ClientTracking{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "TRACKING") + return c +} + +func (c ClientTracking) On() ClientTrackingStatusOn { + c.cs.s = append(c.cs.s, "ON") + return (ClientTrackingStatusOn)(c) +} + +func (c ClientTracking) Off() ClientTrackingStatusOff { + c.cs.s = append(c.cs.s, "OFF") + return (ClientTrackingStatusOff)(c) +} + +type ClientTrackingBcast Completed + +func (c ClientTrackingBcast) Optin() ClientTrackingOptin { + c.cs.s = append(c.cs.s, "OPTIN") + return (ClientTrackingOptin)(c) +} + +func (c ClientTrackingBcast) Optout() ClientTrackingOptout { + c.cs.s = append(c.cs.s, "OPTOUT") + return (ClientTrackingOptout)(c) +} + +func (c ClientTrackingBcast) Noloop() ClientTrackingNoloop { + c.cs.s = append(c.cs.s, "NOLOOP") + return (ClientTrackingNoloop)(c) +} + +func (c ClientTrackingBcast) Build() Completed { + return Completed(c) +} + +type ClientTrackingNoloop Completed + +func (c ClientTrackingNoloop) Build() Completed { + return Completed(c) +} + +type ClientTrackingOptin Completed + +func (c ClientTrackingOptin) Optout() ClientTrackingOptout { + c.cs.s = append(c.cs.s, "OPTOUT") + return (ClientTrackingOptout)(c) +} + +func (c ClientTrackingOptin) Noloop() ClientTrackingNoloop { + c.cs.s = append(c.cs.s, "NOLOOP") + return (ClientTrackingNoloop)(c) +} + +func (c ClientTrackingOptin) Build() Completed { + return Completed(c) +} + +type ClientTrackingOptout Completed + +func (c ClientTrackingOptout) Noloop() ClientTrackingNoloop { + c.cs.s = append(c.cs.s, "NOLOOP") + return (ClientTrackingNoloop)(c) +} + +func (c ClientTrackingOptout) Build() Completed { + return Completed(c) +} + +type ClientTrackingPrefix Completed + +func (c ClientTrackingPrefix) Prefix(prefix string) ClientTrackingPrefix { + c.cs.s = append(c.cs.s, "PREFIX", prefix) + return c +} + +func (c ClientTrackingPrefix) Bcast() ClientTrackingBcast { + c.cs.s = append(c.cs.s, "BCAST") + return (ClientTrackingBcast)(c) +} + +func (c ClientTrackingPrefix) Optin() ClientTrackingOptin { + c.cs.s = append(c.cs.s, "OPTIN") + return (ClientTrackingOptin)(c) +} + +func (c ClientTrackingPrefix) Optout() ClientTrackingOptout { + c.cs.s = append(c.cs.s, "OPTOUT") + return (ClientTrackingOptout)(c) +} + +func (c ClientTrackingPrefix) Noloop() ClientTrackingNoloop { + c.cs.s = append(c.cs.s, "NOLOOP") + return (ClientTrackingNoloop)(c) +} + +func (c ClientTrackingPrefix) Build() Completed { + return Completed(c) +} + +type ClientTrackingRedirect Completed + +func (c ClientTrackingRedirect) Prefix() ClientTrackingPrefix { + return (ClientTrackingPrefix)(c) +} + +func (c ClientTrackingRedirect) Bcast() ClientTrackingBcast { + c.cs.s = append(c.cs.s, "BCAST") + return (ClientTrackingBcast)(c) +} + +func (c ClientTrackingRedirect) Optin() ClientTrackingOptin { + c.cs.s = append(c.cs.s, "OPTIN") + return (ClientTrackingOptin)(c) +} + +func (c ClientTrackingRedirect) Optout() ClientTrackingOptout { + c.cs.s = append(c.cs.s, "OPTOUT") + return (ClientTrackingOptout)(c) +} + +func (c ClientTrackingRedirect) Noloop() ClientTrackingNoloop { + c.cs.s = append(c.cs.s, "NOLOOP") + return (ClientTrackingNoloop)(c) +} + +func (c ClientTrackingRedirect) Build() Completed { + return Completed(c) +} + +type ClientTrackingStatusOff Completed + +func (c ClientTrackingStatusOff) Redirect(clientId int64) ClientTrackingRedirect { + c.cs.s = append(c.cs.s, "REDIRECT", strconv.FormatInt(clientId, 10)) + return (ClientTrackingRedirect)(c) +} + +func (c ClientTrackingStatusOff) Prefix() ClientTrackingPrefix { + return (ClientTrackingPrefix)(c) +} + +func (c ClientTrackingStatusOff) Bcast() ClientTrackingBcast { + c.cs.s = append(c.cs.s, "BCAST") + return (ClientTrackingBcast)(c) +} + +func (c ClientTrackingStatusOff) Optin() ClientTrackingOptin { + c.cs.s = append(c.cs.s, "OPTIN") + return (ClientTrackingOptin)(c) +} + +func (c ClientTrackingStatusOff) Optout() ClientTrackingOptout { + c.cs.s = append(c.cs.s, "OPTOUT") + return (ClientTrackingOptout)(c) +} + +func (c ClientTrackingStatusOff) Noloop() ClientTrackingNoloop { + c.cs.s = append(c.cs.s, "NOLOOP") + return (ClientTrackingNoloop)(c) +} + +func (c ClientTrackingStatusOff) Build() Completed { + return Completed(c) +} + +type ClientTrackingStatusOn Completed + +func (c ClientTrackingStatusOn) Redirect(clientId int64) ClientTrackingRedirect { + c.cs.s = append(c.cs.s, "REDIRECT", strconv.FormatInt(clientId, 10)) + return (ClientTrackingRedirect)(c) +} + +func (c ClientTrackingStatusOn) Prefix() ClientTrackingPrefix { + return (ClientTrackingPrefix)(c) +} + +func (c ClientTrackingStatusOn) Bcast() ClientTrackingBcast { + c.cs.s = append(c.cs.s, "BCAST") + return (ClientTrackingBcast)(c) +} + +func (c ClientTrackingStatusOn) Optin() ClientTrackingOptin { + c.cs.s = append(c.cs.s, "OPTIN") + return (ClientTrackingOptin)(c) +} + +func (c ClientTrackingStatusOn) Optout() ClientTrackingOptout { + c.cs.s = append(c.cs.s, "OPTOUT") + return (ClientTrackingOptout)(c) +} + +func (c ClientTrackingStatusOn) Noloop() ClientTrackingNoloop { + c.cs.s = append(c.cs.s, "NOLOOP") + return (ClientTrackingNoloop)(c) +} + +func (c ClientTrackingStatusOn) Build() Completed { + return Completed(c) +} + +type ClientTrackinginfo Completed + +func (b Builder) ClientTrackinginfo() (c ClientTrackinginfo) { + c = ClientTrackinginfo{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "TRACKINGINFO") + return c +} + +func (c ClientTrackinginfo) Build() Completed { + return Completed(c) +} + +type ClientUnblock Completed + +func (b Builder) ClientUnblock() (c ClientUnblock) { + c = ClientUnblock{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "UNBLOCK") + return c +} + +func (c ClientUnblock) ClientId(clientId int64) ClientUnblockClientId { + c.cs.s = append(c.cs.s, strconv.FormatInt(clientId, 10)) + return (ClientUnblockClientId)(c) +} + +type ClientUnblockClientId Completed + +func (c ClientUnblockClientId) Timeout() ClientUnblockUnblockTypeTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT") + return (ClientUnblockUnblockTypeTimeout)(c) +} + +func (c ClientUnblockClientId) Error() ClientUnblockUnblockTypeError { + c.cs.s = append(c.cs.s, "ERROR") + return (ClientUnblockUnblockTypeError)(c) +} + +func (c ClientUnblockClientId) Build() Completed { + return Completed(c) +} + +type ClientUnblockUnblockTypeError Completed + +func (c ClientUnblockUnblockTypeError) Build() Completed { + return Completed(c) +} + +type ClientUnblockUnblockTypeTimeout Completed + +func (c ClientUnblockUnblockTypeTimeout) Build() Completed { + return Completed(c) +} + +type ClientUnpause Completed + +func (b Builder) ClientUnpause() (c ClientUnpause) { + c = ClientUnpause{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLIENT", "UNPAUSE") + return c +} + +func (c ClientUnpause) Build() Completed { + return Completed(c) +} + +type ClusterAddslots Completed + +func (b Builder) ClusterAddslots() (c ClusterAddslots) { + c = ClusterAddslots{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "ADDSLOTS") + return c +} + +func (c ClusterAddslots) Slot(slot ...int64) ClusterAddslotsSlot { + for _, n := range slot { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (ClusterAddslotsSlot)(c) +} + +type ClusterAddslotsSlot Completed + +func (c ClusterAddslotsSlot) Slot(slot ...int64) ClusterAddslotsSlot { + for _, n := range slot { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c ClusterAddslotsSlot) Build() Completed { + return Completed(c) +} + +type ClusterAddslotsrange Completed + +func (b Builder) ClusterAddslotsrange() (c ClusterAddslotsrange) { + c = ClusterAddslotsrange{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "ADDSLOTSRANGE") + return c +} + +func (c ClusterAddslotsrange) StartSlotEndSlot() ClusterAddslotsrangeStartSlotEndSlot { + return (ClusterAddslotsrangeStartSlotEndSlot)(c) +} + +type ClusterAddslotsrangeStartSlotEndSlot Completed + +func (c ClusterAddslotsrangeStartSlotEndSlot) StartSlotEndSlot(startSlot int64, endSlot int64) ClusterAddslotsrangeStartSlotEndSlot { + c.cs.s = append(c.cs.s, strconv.FormatInt(startSlot, 10), strconv.FormatInt(endSlot, 10)) + return c +} + +func (c ClusterAddslotsrangeStartSlotEndSlot) Build() Completed { + return Completed(c) +} + +type ClusterBumpepoch Completed + +func (b Builder) ClusterBumpepoch() (c ClusterBumpepoch) { + c = ClusterBumpepoch{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "BUMPEPOCH") + return c +} + +func (c ClusterBumpepoch) Build() Completed { + return Completed(c) +} + +type ClusterCountFailureReports Completed + +func (b Builder) ClusterCountFailureReports() (c ClusterCountFailureReports) { + c = ClusterCountFailureReports{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "COUNT-FAILURE-REPORTS") + return c +} + +func (c ClusterCountFailureReports) NodeId(nodeId string) ClusterCountFailureReportsNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterCountFailureReportsNodeId)(c) +} + +type ClusterCountFailureReportsNodeId Completed + +func (c ClusterCountFailureReportsNodeId) Build() Completed { + return Completed(c) +} + +type ClusterCountkeysinslot Completed + +func (b Builder) ClusterCountkeysinslot() (c ClusterCountkeysinslot) { + c = ClusterCountkeysinslot{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "COUNTKEYSINSLOT") + return c +} + +func (c ClusterCountkeysinslot) Slot(slot int64) ClusterCountkeysinslotSlot { + c.cs.s = append(c.cs.s, strconv.FormatInt(slot, 10)) + return (ClusterCountkeysinslotSlot)(c) +} + +type ClusterCountkeysinslotSlot Completed + +func (c ClusterCountkeysinslotSlot) Build() Completed { + return Completed(c) +} + +type ClusterDelslots Completed + +func (b Builder) ClusterDelslots() (c ClusterDelslots) { + c = ClusterDelslots{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "DELSLOTS") + return c +} + +func (c ClusterDelslots) Slot(slot ...int64) ClusterDelslotsSlot { + for _, n := range slot { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (ClusterDelslotsSlot)(c) +} + +type ClusterDelslotsSlot Completed + +func (c ClusterDelslotsSlot) Slot(slot ...int64) ClusterDelslotsSlot { + for _, n := range slot { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c ClusterDelslotsSlot) Build() Completed { + return Completed(c) +} + +type ClusterDelslotsrange Completed + +func (b Builder) ClusterDelslotsrange() (c ClusterDelslotsrange) { + c = ClusterDelslotsrange{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "DELSLOTSRANGE") + return c +} + +func (c ClusterDelslotsrange) StartSlotEndSlot() ClusterDelslotsrangeStartSlotEndSlot { + return (ClusterDelslotsrangeStartSlotEndSlot)(c) +} + +type ClusterDelslotsrangeStartSlotEndSlot Completed + +func (c ClusterDelslotsrangeStartSlotEndSlot) StartSlotEndSlot(startSlot int64, endSlot int64) ClusterDelslotsrangeStartSlotEndSlot { + c.cs.s = append(c.cs.s, strconv.FormatInt(startSlot, 10), strconv.FormatInt(endSlot, 10)) + return c +} + +func (c ClusterDelslotsrangeStartSlotEndSlot) Build() Completed { + return Completed(c) +} + +type ClusterFailover Completed + +func (b Builder) ClusterFailover() (c ClusterFailover) { + c = ClusterFailover{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "FAILOVER") + return c +} + +func (c ClusterFailover) Force() ClusterFailoverOptionsForce { + c.cs.s = append(c.cs.s, "FORCE") + return (ClusterFailoverOptionsForce)(c) +} + +func (c ClusterFailover) Takeover() ClusterFailoverOptionsTakeover { + c.cs.s = append(c.cs.s, "TAKEOVER") + return (ClusterFailoverOptionsTakeover)(c) +} + +func (c ClusterFailover) Build() Completed { + return Completed(c) +} + +type ClusterFailoverOptionsForce Completed + +func (c ClusterFailoverOptionsForce) Build() Completed { + return Completed(c) +} + +type ClusterFailoverOptionsTakeover Completed + +func (c ClusterFailoverOptionsTakeover) Build() Completed { + return Completed(c) +} + +type ClusterFlushslots Completed + +func (b Builder) ClusterFlushslots() (c ClusterFlushslots) { + c = ClusterFlushslots{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "FLUSHSLOTS") + return c +} + +func (c ClusterFlushslots) Build() Completed { + return Completed(c) +} + +type ClusterForget Completed + +func (b Builder) ClusterForget() (c ClusterForget) { + c = ClusterForget{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "FORGET") + return c +} + +func (c ClusterForget) NodeId(nodeId string) ClusterForgetNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterForgetNodeId)(c) +} + +type ClusterForgetNodeId Completed + +func (c ClusterForgetNodeId) Build() Completed { + return Completed(c) +} + +type ClusterGetkeysinslot Completed + +func (b Builder) ClusterGetkeysinslot() (c ClusterGetkeysinslot) { + c = ClusterGetkeysinslot{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "GETKEYSINSLOT") + return c +} + +func (c ClusterGetkeysinslot) Slot(slot int64) ClusterGetkeysinslotSlot { + c.cs.s = append(c.cs.s, strconv.FormatInt(slot, 10)) + return (ClusterGetkeysinslotSlot)(c) +} + +type ClusterGetkeysinslotCount Completed + +func (c ClusterGetkeysinslotCount) Build() Completed { + return Completed(c) +} + +type ClusterGetkeysinslotSlot Completed + +func (c ClusterGetkeysinslotSlot) Count(count int64) ClusterGetkeysinslotCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (ClusterGetkeysinslotCount)(c) +} + +type ClusterInfo Completed + +func (b Builder) ClusterInfo() (c ClusterInfo) { + c = ClusterInfo{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "INFO") + return c +} + +func (c ClusterInfo) Build() Completed { + return Completed(c) +} + +type ClusterKeyslot Completed + +func (b Builder) ClusterKeyslot() (c ClusterKeyslot) { + c = ClusterKeyslot{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "KEYSLOT") + return c +} + +func (c ClusterKeyslot) Key(key string) ClusterKeyslotKey { + c.cs.s = append(c.cs.s, key) + return (ClusterKeyslotKey)(c) +} + +type ClusterKeyslotKey Completed + +func (c ClusterKeyslotKey) Build() Completed { + return Completed(c) +} + +type ClusterLinks Completed + +func (b Builder) ClusterLinks() (c ClusterLinks) { + c = ClusterLinks{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "LINKS") + return c +} + +func (c ClusterLinks) Build() Completed { + return Completed(c) +} + +type ClusterMeet Completed + +func (b Builder) ClusterMeet() (c ClusterMeet) { + c = ClusterMeet{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "MEET") + return c +} + +func (c ClusterMeet) Ip(ip string) ClusterMeetIp { + c.cs.s = append(c.cs.s, ip) + return (ClusterMeetIp)(c) +} + +type ClusterMeetClusterBusPort Completed + +func (c ClusterMeetClusterBusPort) Build() Completed { + return Completed(c) +} + +type ClusterMeetIp Completed + +func (c ClusterMeetIp) Port(port int64) ClusterMeetPort { + c.cs.s = append(c.cs.s, strconv.FormatInt(port, 10)) + return (ClusterMeetPort)(c) +} + +type ClusterMeetPort Completed + +func (c ClusterMeetPort) ClusterBusPort(clusterBusPort int64) ClusterMeetClusterBusPort { + c.cs.s = append(c.cs.s, strconv.FormatInt(clusterBusPort, 10)) + return (ClusterMeetClusterBusPort)(c) +} + +func (c ClusterMeetPort) Build() Completed { + return Completed(c) +} + +type ClusterMyid Completed + +func (b Builder) ClusterMyid() (c ClusterMyid) { + c = ClusterMyid{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "MYID") + return c +} + +func (c ClusterMyid) Build() Completed { + return Completed(c) +} + +type ClusterNodes Completed + +func (b Builder) ClusterNodes() (c ClusterNodes) { + c = ClusterNodes{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "NODES") + return c +} + +func (c ClusterNodes) Build() Completed { + return Completed(c) +} + +type ClusterReplicas Completed + +func (b Builder) ClusterReplicas() (c ClusterReplicas) { + c = ClusterReplicas{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "REPLICAS") + return c +} + +func (c ClusterReplicas) NodeId(nodeId string) ClusterReplicasNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterReplicasNodeId)(c) +} + +type ClusterReplicasNodeId Completed + +func (c ClusterReplicasNodeId) Build() Completed { + return Completed(c) +} + +type ClusterReplicate Completed + +func (b Builder) ClusterReplicate() (c ClusterReplicate) { + c = ClusterReplicate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "REPLICATE") + return c +} + +func (c ClusterReplicate) NodeId(nodeId string) ClusterReplicateNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterReplicateNodeId)(c) +} + +type ClusterReplicateNodeId Completed + +func (c ClusterReplicateNodeId) Build() Completed { + return Completed(c) +} + +type ClusterReset Completed + +func (b Builder) ClusterReset() (c ClusterReset) { + c = ClusterReset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "RESET") + return c +} + +func (c ClusterReset) Hard() ClusterResetResetTypeHard { + c.cs.s = append(c.cs.s, "HARD") + return (ClusterResetResetTypeHard)(c) +} + +func (c ClusterReset) Soft() ClusterResetResetTypeSoft { + c.cs.s = append(c.cs.s, "SOFT") + return (ClusterResetResetTypeSoft)(c) +} + +func (c ClusterReset) Build() Completed { + return Completed(c) +} + +type ClusterResetResetTypeHard Completed + +func (c ClusterResetResetTypeHard) Build() Completed { + return Completed(c) +} + +type ClusterResetResetTypeSoft Completed + +func (c ClusterResetResetTypeSoft) Build() Completed { + return Completed(c) +} + +type ClusterSaveconfig Completed + +func (b Builder) ClusterSaveconfig() (c ClusterSaveconfig) { + c = ClusterSaveconfig{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "SAVECONFIG") + return c +} + +func (c ClusterSaveconfig) Build() Completed { + return Completed(c) +} + +type ClusterSetConfigEpoch Completed + +func (b Builder) ClusterSetConfigEpoch() (c ClusterSetConfigEpoch) { + c = ClusterSetConfigEpoch{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "SET-CONFIG-EPOCH") + return c +} + +func (c ClusterSetConfigEpoch) ConfigEpoch(configEpoch int64) ClusterSetConfigEpochConfigEpoch { + c.cs.s = append(c.cs.s, strconv.FormatInt(configEpoch, 10)) + return (ClusterSetConfigEpochConfigEpoch)(c) +} + +type ClusterSetConfigEpochConfigEpoch Completed + +func (c ClusterSetConfigEpochConfigEpoch) Build() Completed { + return Completed(c) +} + +type ClusterSetslot Completed + +func (b Builder) ClusterSetslot() (c ClusterSetslot) { + c = ClusterSetslot{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "SETSLOT") + return c +} + +func (c ClusterSetslot) Slot(slot int64) ClusterSetslotSlot { + c.cs.s = append(c.cs.s, strconv.FormatInt(slot, 10)) + return (ClusterSetslotSlot)(c) +} + +type ClusterSetslotNodeId Completed + +func (c ClusterSetslotNodeId) Build() Completed { + return Completed(c) +} + +type ClusterSetslotSlot Completed + +func (c ClusterSetslotSlot) Importing() ClusterSetslotSubcommandImporting { + c.cs.s = append(c.cs.s, "IMPORTING") + return (ClusterSetslotSubcommandImporting)(c) +} + +func (c ClusterSetslotSlot) Migrating() ClusterSetslotSubcommandMigrating { + c.cs.s = append(c.cs.s, "MIGRATING") + return (ClusterSetslotSubcommandMigrating)(c) +} + +func (c ClusterSetslotSlot) Stable() ClusterSetslotSubcommandStable { + c.cs.s = append(c.cs.s, "STABLE") + return (ClusterSetslotSubcommandStable)(c) +} + +func (c ClusterSetslotSlot) Node() ClusterSetslotSubcommandNode { + c.cs.s = append(c.cs.s, "NODE") + return (ClusterSetslotSubcommandNode)(c) +} + +type ClusterSetslotSubcommandImporting Completed + +func (c ClusterSetslotSubcommandImporting) NodeId(nodeId string) ClusterSetslotNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterSetslotNodeId)(c) +} + +func (c ClusterSetslotSubcommandImporting) Build() Completed { + return Completed(c) +} + +type ClusterSetslotSubcommandMigrating Completed + +func (c ClusterSetslotSubcommandMigrating) NodeId(nodeId string) ClusterSetslotNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterSetslotNodeId)(c) +} + +func (c ClusterSetslotSubcommandMigrating) Build() Completed { + return Completed(c) +} + +type ClusterSetslotSubcommandNode Completed + +func (c ClusterSetslotSubcommandNode) NodeId(nodeId string) ClusterSetslotNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterSetslotNodeId)(c) +} + +func (c ClusterSetslotSubcommandNode) Build() Completed { + return Completed(c) +} + +type ClusterSetslotSubcommandStable Completed + +func (c ClusterSetslotSubcommandStable) NodeId(nodeId string) ClusterSetslotNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterSetslotNodeId)(c) +} + +func (c ClusterSetslotSubcommandStable) Build() Completed { + return Completed(c) +} + +type ClusterShards Completed + +func (b Builder) ClusterShards() (c ClusterShards) { + c = ClusterShards{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "SHARDS") + return c +} + +func (c ClusterShards) Build() Completed { + return Completed(c) +} + +type ClusterSlaves Completed + +func (b Builder) ClusterSlaves() (c ClusterSlaves) { + c = ClusterSlaves{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "SLAVES") + return c +} + +func (c ClusterSlaves) NodeId(nodeId string) ClusterSlavesNodeId { + c.cs.s = append(c.cs.s, nodeId) + return (ClusterSlavesNodeId)(c) +} + +type ClusterSlavesNodeId Completed + +func (c ClusterSlavesNodeId) Build() Completed { + return Completed(c) +} + +type ClusterSlots Completed + +func (b Builder) ClusterSlots() (c ClusterSlots) { + c = ClusterSlots{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CLUSTER", "SLOTS") + return c +} + +func (c ClusterSlots) Build() Completed { + return Completed(c) +} + +type CmsIncrby Completed + +func (b Builder) CmsIncrby() (c CmsIncrby) { + c = CmsIncrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CMS.INCRBY") + return c +} + +func (c CmsIncrby) Key(key string) CmsIncrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CmsIncrbyKey)(c) +} + +type CmsIncrbyItemsIncrement Completed + +func (c CmsIncrbyItemsIncrement) Item(item string) CmsIncrbyItemsItem { + c.cs.s = append(c.cs.s, item) + return (CmsIncrbyItemsItem)(c) +} + +func (c CmsIncrbyItemsIncrement) Build() Completed { + return Completed(c) +} + +type CmsIncrbyItemsItem Completed + +func (c CmsIncrbyItemsItem) Increment(increment int64) CmsIncrbyItemsIncrement { + c.cs.s = append(c.cs.s, strconv.FormatInt(increment, 10)) + return (CmsIncrbyItemsIncrement)(c) +} + +type CmsIncrbyKey Completed + +func (c CmsIncrbyKey) Item(item string) CmsIncrbyItemsItem { + c.cs.s = append(c.cs.s, item) + return (CmsIncrbyItemsItem)(c) +} + +type CmsInfo Completed + +func (b Builder) CmsInfo() (c CmsInfo) { + c = CmsInfo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "CMS.INFO") + return c +} + +func (c CmsInfo) Key(key string) CmsInfoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CmsInfoKey)(c) +} + +type CmsInfoKey Completed + +func (c CmsInfoKey) Build() Completed { + return Completed(c) +} + +func (c CmsInfoKey) Cache() Cacheable { + return Cacheable(c) +} + +type CmsInitbydim Completed + +func (b Builder) CmsInitbydim() (c CmsInitbydim) { + c = CmsInitbydim{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CMS.INITBYDIM") + return c +} + +func (c CmsInitbydim) Key(key string) CmsInitbydimKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CmsInitbydimKey)(c) +} + +type CmsInitbydimDepth Completed + +func (c CmsInitbydimDepth) Build() Completed { + return Completed(c) +} + +type CmsInitbydimKey Completed + +func (c CmsInitbydimKey) Width(width int64) CmsInitbydimWidth { + c.cs.s = append(c.cs.s, strconv.FormatInt(width, 10)) + return (CmsInitbydimWidth)(c) +} + +type CmsInitbydimWidth Completed + +func (c CmsInitbydimWidth) Depth(depth int64) CmsInitbydimDepth { + c.cs.s = append(c.cs.s, strconv.FormatInt(depth, 10)) + return (CmsInitbydimDepth)(c) +} + +type CmsInitbyprob Completed + +func (b Builder) CmsInitbyprob() (c CmsInitbyprob) { + c = CmsInitbyprob{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CMS.INITBYPROB") + return c +} + +func (c CmsInitbyprob) Key(key string) CmsInitbyprobKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CmsInitbyprobKey)(c) +} + +type CmsInitbyprobError Completed + +func (c CmsInitbyprobError) Probability(probability float64) CmsInitbyprobProbability { + c.cs.s = append(c.cs.s, strconv.FormatFloat(probability, 'f', -1, 64)) + return (CmsInitbyprobProbability)(c) +} + +type CmsInitbyprobKey Completed + +func (c CmsInitbyprobKey) Error(error float64) CmsInitbyprobError { + c.cs.s = append(c.cs.s, strconv.FormatFloat(error, 'f', -1, 64)) + return (CmsInitbyprobError)(c) +} + +type CmsInitbyprobProbability Completed + +func (c CmsInitbyprobProbability) Build() Completed { + return Completed(c) +} + +type CmsMerge Completed + +func (b Builder) CmsMerge() (c CmsMerge) { + c = CmsMerge{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CMS.MERGE") + return c +} + +func (c CmsMerge) Destination(destination string) CmsMergeDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (CmsMergeDestination)(c) +} + +type CmsMergeDestination Completed + +func (c CmsMergeDestination) Numkeys(numkeys int64) CmsMergeNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (CmsMergeNumkeys)(c) +} + +type CmsMergeNumkeys Completed + +func (c CmsMergeNumkeys) Source(source ...string) CmsMergeSource { + if c.ks&NoSlot == NoSlot { + for _, k := range source { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range source { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, source...) + return (CmsMergeSource)(c) +} + +type CmsMergeSource Completed + +func (c CmsMergeSource) Source(source ...string) CmsMergeSource { + if c.ks&NoSlot == NoSlot { + for _, k := range source { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range source { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, source...) + return c +} + +func (c CmsMergeSource) Weights() CmsMergeWeightWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + return (CmsMergeWeightWeights)(c) +} + +func (c CmsMergeSource) Build() Completed { + return Completed(c) +} + +type CmsMergeWeightWeight Completed + +func (c CmsMergeWeightWeight) Weight(weight ...float64) CmsMergeWeightWeight { + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return c +} + +func (c CmsMergeWeightWeight) Build() Completed { + return Completed(c) +} + +type CmsMergeWeightWeights Completed + +func (c CmsMergeWeightWeights) Weight(weight ...float64) CmsMergeWeightWeight { + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return (CmsMergeWeightWeight)(c) +} + +type CmsQuery Completed + +func (b Builder) CmsQuery() (c CmsQuery) { + c = CmsQuery{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "CMS.QUERY") + return c +} + +func (c CmsQuery) Key(key string) CmsQueryKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (CmsQueryKey)(c) +} + +type CmsQueryItem Completed + +func (c CmsQueryItem) Item(item ...string) CmsQueryItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c CmsQueryItem) Build() Completed { + return Completed(c) +} + +func (c CmsQueryItem) Cache() Cacheable { + return Cacheable(c) +} + +type CmsQueryKey Completed + +func (c CmsQueryKey) Item(item ...string) CmsQueryItem { + c.cs.s = append(c.cs.s, item...) + return (CmsQueryItem)(c) +} + +type Command Completed + +func (b Builder) Command() (c Command) { + c = Command{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COMMAND") + return c +} + +func (c Command) Build() Completed { + return Completed(c) +} + +type CommandCount Completed + +func (b Builder) CommandCount() (c CommandCount) { + c = CommandCount{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COMMAND", "COUNT") + return c +} + +func (c CommandCount) Build() Completed { + return Completed(c) +} + +type CommandDocs Completed + +func (b Builder) CommandDocs() (c CommandDocs) { + c = CommandDocs{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COMMAND", "DOCS") + return c +} + +func (c CommandDocs) CommandName(commandName ...string) CommandDocsCommandName { + c.cs.s = append(c.cs.s, commandName...) + return (CommandDocsCommandName)(c) +} + +func (c CommandDocs) Build() Completed { + return Completed(c) +} + +type CommandDocsCommandName Completed + +func (c CommandDocsCommandName) CommandName(commandName ...string) CommandDocsCommandName { + c.cs.s = append(c.cs.s, commandName...) + return c +} + +func (c CommandDocsCommandName) Build() Completed { + return Completed(c) +} + +type CommandGetkeys Completed + +func (b Builder) CommandGetkeys() (c CommandGetkeys) { + c = CommandGetkeys{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COMMAND", "GETKEYS") + return c +} + +func (c CommandGetkeys) Build() Completed { + return Completed(c) +} + +type CommandGetkeysandflags Completed + +func (b Builder) CommandGetkeysandflags() (c CommandGetkeysandflags) { + c = CommandGetkeysandflags{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COMMAND", "GETKEYSANDFLAGS") + return c +} + +func (c CommandGetkeysandflags) Build() Completed { + return Completed(c) +} + +type CommandInfo Completed + +func (b Builder) CommandInfo() (c CommandInfo) { + c = CommandInfo{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COMMAND", "INFO") + return c +} + +func (c CommandInfo) CommandName(commandName ...string) CommandInfoCommandName { + c.cs.s = append(c.cs.s, commandName...) + return (CommandInfoCommandName)(c) +} + +func (c CommandInfo) Build() Completed { + return Completed(c) +} + +type CommandInfoCommandName Completed + +func (c CommandInfoCommandName) CommandName(commandName ...string) CommandInfoCommandName { + c.cs.s = append(c.cs.s, commandName...) + return c +} + +func (c CommandInfoCommandName) Build() Completed { + return Completed(c) +} + +type CommandList Completed + +func (b Builder) CommandList() (c CommandList) { + c = CommandList{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COMMAND", "LIST") + return c +} + +func (c CommandList) FilterbyModuleName(name string) CommandListFilterbyModuleName { + c.cs.s = append(c.cs.s, "FILTERBY", "MODULE", name) + return (CommandListFilterbyModuleName)(c) +} + +func (c CommandList) FilterbyAclcatCategory(category string) CommandListFilterbyAclcatCategory { + c.cs.s = append(c.cs.s, "FILTERBY", "ACLCAT", category) + return (CommandListFilterbyAclcatCategory)(c) +} + +func (c CommandList) FilterbyPatternPattern(pattern string) CommandListFilterbyPatternPattern { + c.cs.s = append(c.cs.s, "FILTERBY", "PATTERN", pattern) + return (CommandListFilterbyPatternPattern)(c) +} + +func (c CommandList) Build() Completed { + return Completed(c) +} + +type CommandListFilterbyAclcatCategory Completed + +func (c CommandListFilterbyAclcatCategory) Build() Completed { + return Completed(c) +} + +type CommandListFilterbyModuleName Completed + +func (c CommandListFilterbyModuleName) Build() Completed { + return Completed(c) +} + +type CommandListFilterbyPatternPattern Completed + +func (c CommandListFilterbyPatternPattern) Build() Completed { + return Completed(c) +} + +type ConfigGet Completed + +func (b Builder) ConfigGet() (c ConfigGet) { + c = ConfigGet{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CONFIG", "GET") + return c +} + +func (c ConfigGet) Parameter(parameter ...string) ConfigGetParameter { + c.cs.s = append(c.cs.s, parameter...) + return (ConfigGetParameter)(c) +} + +type ConfigGetParameter Completed + +func (c ConfigGetParameter) Parameter(parameter ...string) ConfigGetParameter { + c.cs.s = append(c.cs.s, parameter...) + return c +} + +func (c ConfigGetParameter) Build() Completed { + return Completed(c) +} + +type ConfigResetstat Completed + +func (b Builder) ConfigResetstat() (c ConfigResetstat) { + c = ConfigResetstat{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CONFIG", "RESETSTAT") + return c +} + +func (c ConfigResetstat) Build() Completed { + return Completed(c) +} + +type ConfigRewrite Completed + +func (b Builder) ConfigRewrite() (c ConfigRewrite) { + c = ConfigRewrite{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CONFIG", "REWRITE") + return c +} + +func (c ConfigRewrite) Build() Completed { + return Completed(c) +} + +type ConfigSet Completed + +func (b Builder) ConfigSet() (c ConfigSet) { + c = ConfigSet{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "CONFIG", "SET") + return c +} + +func (c ConfigSet) ParameterValue() ConfigSetParameterValue { + return (ConfigSetParameterValue)(c) +} + +type ConfigSetParameterValue Completed + +func (c ConfigSetParameterValue) ParameterValue(parameter string, value string) ConfigSetParameterValue { + c.cs.s = append(c.cs.s, parameter, value) + return c +} + +func (c ConfigSetParameterValue) Build() Completed { + return Completed(c) +} + +type Copy Completed + +func (b Builder) Copy() (c Copy) { + c = Copy{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "COPY") + return c +} + +func (c Copy) Source(source string) CopySource { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(source) + } else { + c.ks = check(c.ks, slot(source)) + } + c.cs.s = append(c.cs.s, source) + return (CopySource)(c) +} + +type CopyDb Completed + +func (c CopyDb) Replace() CopyReplace { + c.cs.s = append(c.cs.s, "REPLACE") + return (CopyReplace)(c) +} + +func (c CopyDb) Build() Completed { + return Completed(c) +} + +type CopyDestination Completed + +func (c CopyDestination) Db(destinationDb int64) CopyDb { + c.cs.s = append(c.cs.s, "DB", strconv.FormatInt(destinationDb, 10)) + return (CopyDb)(c) +} + +func (c CopyDestination) Replace() CopyReplace { + c.cs.s = append(c.cs.s, "REPLACE") + return (CopyReplace)(c) +} + +func (c CopyDestination) Build() Completed { + return Completed(c) +} + +type CopyReplace Completed + +func (c CopyReplace) Build() Completed { + return Completed(c) +} + +type CopySource Completed + +func (c CopySource) Destination(destination string) CopyDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (CopyDestination)(c) +} + +type Dbsize Completed + +func (b Builder) Dbsize() (c Dbsize) { + c = Dbsize{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "DBSIZE") + return c +} + +func (c Dbsize) Build() Completed { + return Completed(c) +} + +type DebugObject Completed + +func (b Builder) DebugObject() (c DebugObject) { + c = DebugObject{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "DEBUG", "OBJECT") + return c +} + +func (c DebugObject) Key(key string) DebugObjectKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (DebugObjectKey)(c) +} + +type DebugObjectKey Completed + +func (c DebugObjectKey) Build() Completed { + return Completed(c) +} + +type DebugSegfault Completed + +func (b Builder) DebugSegfault() (c DebugSegfault) { + c = DebugSegfault{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "DEBUG", "SEGFAULT") + return c +} + +func (c DebugSegfault) Build() Completed { + return Completed(c) +} + +type Decr Completed + +func (b Builder) Decr() (c Decr) { + c = Decr{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "DECR") + return c +} + +func (c Decr) Key(key string) DecrKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (DecrKey)(c) +} + +type DecrKey Completed + +func (c DecrKey) Build() Completed { + return Completed(c) +} + +type Decrby Completed + +func (b Builder) Decrby() (c Decrby) { + c = Decrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "DECRBY") + return c +} + +func (c Decrby) Key(key string) DecrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (DecrbyKey)(c) +} + +type DecrbyDecrement Completed + +func (c DecrbyDecrement) Build() Completed { + return Completed(c) +} + +type DecrbyKey Completed + +func (c DecrbyKey) Decrement(decrement int64) DecrbyDecrement { + c.cs.s = append(c.cs.s, strconv.FormatInt(decrement, 10)) + return (DecrbyDecrement)(c) +} + +type Del Completed + +func (b Builder) Del() (c Del) { + c = Del{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "DEL") + return c +} + +func (c Del) Key(key ...string) DelKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (DelKey)(c) +} + +type DelKey Completed + +func (c DelKey) Key(key ...string) DelKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c DelKey) Build() Completed { + return Completed(c) +} + +type Discard Completed + +func (b Builder) Discard() (c Discard) { + c = Discard{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "DISCARD") + return c +} + +func (c Discard) Build() Completed { + return Completed(c) +} + +type Dump Completed + +func (b Builder) Dump() (c Dump) { + c = Dump{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "DUMP") + return c +} + +func (c Dump) Key(key string) DumpKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (DumpKey)(c) +} + +type DumpKey Completed + +func (c DumpKey) Build() Completed { + return Completed(c) +} + +type Echo Completed + +func (b Builder) Echo() (c Echo) { + c = Echo{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ECHO") + return c +} + +func (c Echo) Message(message string) EchoMessage { + c.cs.s = append(c.cs.s, message) + return (EchoMessage)(c) +} + +type EchoMessage Completed + +func (c EchoMessage) Build() Completed { + return Completed(c) +} + +type Eval Completed + +func (b Builder) Eval() (c Eval) { + c = Eval{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "EVAL") + return c +} + +func (c Eval) Script(script string) EvalScript { + c.cs.s = append(c.cs.s, script) + return (EvalScript)(c) +} + +type EvalArg Completed + +func (c EvalArg) Arg(arg ...string) EvalArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c EvalArg) Build() Completed { + return Completed(c) +} + +type EvalKey Completed + +func (c EvalKey) Key(key ...string) EvalKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c EvalKey) Arg(arg ...string) EvalArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalArg)(c) +} + +func (c EvalKey) Build() Completed { + return Completed(c) +} + +type EvalNumkeys Completed + +func (c EvalNumkeys) Key(key ...string) EvalKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (EvalKey)(c) +} + +func (c EvalNumkeys) Arg(arg ...string) EvalArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalArg)(c) +} + +func (c EvalNumkeys) Build() Completed { + return Completed(c) +} + +type EvalRo Completed + +func (b Builder) EvalRo() (c EvalRo) { + c = EvalRo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "EVAL_RO") + return c +} + +func (c EvalRo) Script(script string) EvalRoScript { + c.cs.s = append(c.cs.s, script) + return (EvalRoScript)(c) +} + +type EvalRoArg Completed + +func (c EvalRoArg) Arg(arg ...string) EvalRoArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c EvalRoArg) Build() Completed { + return Completed(c) +} + +type EvalRoKey Completed + +func (c EvalRoKey) Key(key ...string) EvalRoKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c EvalRoKey) Arg(arg ...string) EvalRoArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalRoArg)(c) +} + +func (c EvalRoKey) Build() Completed { + return Completed(c) +} + +type EvalRoNumkeys Completed + +func (c EvalRoNumkeys) Key(key ...string) EvalRoKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (EvalRoKey)(c) +} + +func (c EvalRoNumkeys) Arg(arg ...string) EvalRoArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalRoArg)(c) +} + +func (c EvalRoNumkeys) Build() Completed { + return Completed(c) +} + +type EvalRoScript Completed + +func (c EvalRoScript) Numkeys(numkeys int64) EvalRoNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (EvalRoNumkeys)(c) +} + +type EvalScript Completed + +func (c EvalScript) Numkeys(numkeys int64) EvalNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (EvalNumkeys)(c) +} + +type Evalsha Completed + +func (b Builder) Evalsha() (c Evalsha) { + c = Evalsha{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "EVALSHA") + return c +} + +func (c Evalsha) Sha1(sha1 string) EvalshaSha1 { + c.cs.s = append(c.cs.s, sha1) + return (EvalshaSha1)(c) +} + +type EvalshaArg Completed + +func (c EvalshaArg) Arg(arg ...string) EvalshaArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c EvalshaArg) Build() Completed { + return Completed(c) +} + +type EvalshaKey Completed + +func (c EvalshaKey) Key(key ...string) EvalshaKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c EvalshaKey) Arg(arg ...string) EvalshaArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalshaArg)(c) +} + +func (c EvalshaKey) Build() Completed { + return Completed(c) +} + +type EvalshaNumkeys Completed + +func (c EvalshaNumkeys) Key(key ...string) EvalshaKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (EvalshaKey)(c) +} + +func (c EvalshaNumkeys) Arg(arg ...string) EvalshaArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalshaArg)(c) +} + +func (c EvalshaNumkeys) Build() Completed { + return Completed(c) +} + +type EvalshaRo Completed + +func (b Builder) EvalshaRo() (c EvalshaRo) { + c = EvalshaRo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "EVALSHA_RO") + return c +} + +func (c EvalshaRo) Sha1(sha1 string) EvalshaRoSha1 { + c.cs.s = append(c.cs.s, sha1) + return (EvalshaRoSha1)(c) +} + +type EvalshaRoArg Completed + +func (c EvalshaRoArg) Arg(arg ...string) EvalshaRoArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c EvalshaRoArg) Build() Completed { + return Completed(c) +} + +type EvalshaRoKey Completed + +func (c EvalshaRoKey) Key(key ...string) EvalshaRoKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c EvalshaRoKey) Arg(arg ...string) EvalshaRoArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalshaRoArg)(c) +} + +func (c EvalshaRoKey) Build() Completed { + return Completed(c) +} + +type EvalshaRoNumkeys Completed + +func (c EvalshaRoNumkeys) Key(key ...string) EvalshaRoKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (EvalshaRoKey)(c) +} + +func (c EvalshaRoNumkeys) Arg(arg ...string) EvalshaRoArg { + c.cs.s = append(c.cs.s, arg...) + return (EvalshaRoArg)(c) +} + +func (c EvalshaRoNumkeys) Build() Completed { + return Completed(c) +} + +type EvalshaRoSha1 Completed + +func (c EvalshaRoSha1) Numkeys(numkeys int64) EvalshaRoNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (EvalshaRoNumkeys)(c) +} + +type EvalshaSha1 Completed + +func (c EvalshaSha1) Numkeys(numkeys int64) EvalshaNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (EvalshaNumkeys)(c) +} + +type Exec Completed + +func (b Builder) Exec() (c Exec) { + c = Exec{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "EXEC") + return c +} + +func (c Exec) Build() Completed { + return Completed(c) +} + +type Exists Completed + +func (b Builder) Exists() (c Exists) { + c = Exists{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "EXISTS") + return c +} + +func (c Exists) Key(key ...string) ExistsKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ExistsKey)(c) +} + +type ExistsKey Completed + +func (c ExistsKey) Key(key ...string) ExistsKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ExistsKey) Build() Completed { + return Completed(c) +} + +type Expire Completed + +func (b Builder) Expire() (c Expire) { + c = Expire{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "EXPIRE") + return c +} + +func (c Expire) Key(key string) ExpireKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ExpireKey)(c) +} + +type ExpireConditionGt Completed + +func (c ExpireConditionGt) Build() Completed { + return Completed(c) +} + +type ExpireConditionLt Completed + +func (c ExpireConditionLt) Build() Completed { + return Completed(c) +} + +type ExpireConditionNx Completed + +func (c ExpireConditionNx) Build() Completed { + return Completed(c) +} + +type ExpireConditionXx Completed + +func (c ExpireConditionXx) Build() Completed { + return Completed(c) +} + +type ExpireKey Completed + +func (c ExpireKey) Seconds(seconds int64) ExpireSeconds { + c.cs.s = append(c.cs.s, strconv.FormatInt(seconds, 10)) + return (ExpireSeconds)(c) +} + +type ExpireSeconds Completed + +func (c ExpireSeconds) Nx() ExpireConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (ExpireConditionNx)(c) +} + +func (c ExpireSeconds) Xx() ExpireConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (ExpireConditionXx)(c) +} + +func (c ExpireSeconds) Gt() ExpireConditionGt { + c.cs.s = append(c.cs.s, "GT") + return (ExpireConditionGt)(c) +} + +func (c ExpireSeconds) Lt() ExpireConditionLt { + c.cs.s = append(c.cs.s, "LT") + return (ExpireConditionLt)(c) +} + +func (c ExpireSeconds) Build() Completed { + return Completed(c) +} + +type Expireat Completed + +func (b Builder) Expireat() (c Expireat) { + c = Expireat{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "EXPIREAT") + return c +} + +func (c Expireat) Key(key string) ExpireatKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ExpireatKey)(c) +} + +type ExpireatConditionGt Completed + +func (c ExpireatConditionGt) Build() Completed { + return Completed(c) +} + +type ExpireatConditionLt Completed + +func (c ExpireatConditionLt) Build() Completed { + return Completed(c) +} + +type ExpireatConditionNx Completed + +func (c ExpireatConditionNx) Build() Completed { + return Completed(c) +} + +type ExpireatConditionXx Completed + +func (c ExpireatConditionXx) Build() Completed { + return Completed(c) +} + +type ExpireatKey Completed + +func (c ExpireatKey) Timestamp(timestamp int64) ExpireatTimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(timestamp, 10)) + return (ExpireatTimestamp)(c) +} + +type ExpireatTimestamp Completed + +func (c ExpireatTimestamp) Nx() ExpireatConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (ExpireatConditionNx)(c) +} + +func (c ExpireatTimestamp) Xx() ExpireatConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (ExpireatConditionXx)(c) +} + +func (c ExpireatTimestamp) Gt() ExpireatConditionGt { + c.cs.s = append(c.cs.s, "GT") + return (ExpireatConditionGt)(c) +} + +func (c ExpireatTimestamp) Lt() ExpireatConditionLt { + c.cs.s = append(c.cs.s, "LT") + return (ExpireatConditionLt)(c) +} + +func (c ExpireatTimestamp) Build() Completed { + return Completed(c) +} + +type Expiretime Completed + +func (b Builder) Expiretime() (c Expiretime) { + c = Expiretime{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "EXPIRETIME") + return c +} + +func (c Expiretime) Key(key string) ExpiretimeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ExpiretimeKey)(c) +} + +type ExpiretimeKey Completed + +func (c ExpiretimeKey) Build() Completed { + return Completed(c) +} + +func (c ExpiretimeKey) Cache() Cacheable { + return Cacheable(c) +} + +type Failover Completed + +func (b Builder) Failover() (c Failover) { + c = Failover{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FAILOVER") + return c +} + +func (c Failover) To() FailoverTargetTo { + c.cs.s = append(c.cs.s, "TO") + return (FailoverTargetTo)(c) +} + +func (c Failover) Abort() FailoverAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (FailoverAbort)(c) +} + +func (c Failover) Timeout(milliseconds int64) FailoverTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(milliseconds, 10)) + return (FailoverTimeout)(c) +} + +func (c Failover) Build() Completed { + return Completed(c) +} + +type FailoverAbort Completed + +func (c FailoverAbort) Timeout(milliseconds int64) FailoverTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(milliseconds, 10)) + return (FailoverTimeout)(c) +} + +func (c FailoverAbort) Build() Completed { + return Completed(c) +} + +type FailoverTargetForce Completed + +func (c FailoverTargetForce) Abort() FailoverAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (FailoverAbort)(c) +} + +func (c FailoverTargetForce) Timeout(milliseconds int64) FailoverTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(milliseconds, 10)) + return (FailoverTimeout)(c) +} + +func (c FailoverTargetForce) Build() Completed { + return Completed(c) +} + +type FailoverTargetHost Completed + +func (c FailoverTargetHost) Port(port int64) FailoverTargetPort { + c.cs.s = append(c.cs.s, strconv.FormatInt(port, 10)) + return (FailoverTargetPort)(c) +} + +type FailoverTargetPort Completed + +func (c FailoverTargetPort) Force() FailoverTargetForce { + c.cs.s = append(c.cs.s, "FORCE") + return (FailoverTargetForce)(c) +} + +func (c FailoverTargetPort) Abort() FailoverAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (FailoverAbort)(c) +} + +func (c FailoverTargetPort) Timeout(milliseconds int64) FailoverTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(milliseconds, 10)) + return (FailoverTimeout)(c) +} + +func (c FailoverTargetPort) Build() Completed { + return Completed(c) +} + +type FailoverTargetTo Completed + +func (c FailoverTargetTo) Host(host string) FailoverTargetHost { + c.cs.s = append(c.cs.s, host) + return (FailoverTargetHost)(c) +} + +type FailoverTimeout Completed + +func (c FailoverTimeout) Build() Completed { + return Completed(c) +} + +type Fcall Completed + +func (b Builder) Fcall() (c Fcall) { + c = Fcall{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FCALL") + return c +} + +func (c Fcall) Function(function string) FcallFunction { + c.cs.s = append(c.cs.s, function) + return (FcallFunction)(c) +} + +type FcallArg Completed + +func (c FcallArg) Arg(arg ...string) FcallArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c FcallArg) Build() Completed { + return Completed(c) +} + +type FcallFunction Completed + +func (c FcallFunction) Numkeys(numkeys int64) FcallNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (FcallNumkeys)(c) +} + +type FcallKey Completed + +func (c FcallKey) Key(key ...string) FcallKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c FcallKey) Arg(arg ...string) FcallArg { + c.cs.s = append(c.cs.s, arg...) + return (FcallArg)(c) +} + +func (c FcallKey) Build() Completed { + return Completed(c) +} + +type FcallNumkeys Completed + +func (c FcallNumkeys) Key(key ...string) FcallKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (FcallKey)(c) +} + +func (c FcallNumkeys) Arg(arg ...string) FcallArg { + c.cs.s = append(c.cs.s, arg...) + return (FcallArg)(c) +} + +func (c FcallNumkeys) Build() Completed { + return Completed(c) +} + +type FcallRo Completed + +func (b Builder) FcallRo() (c FcallRo) { + c = FcallRo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "FCALL_RO") + return c +} + +func (c FcallRo) Function(function string) FcallRoFunction { + c.cs.s = append(c.cs.s, function) + return (FcallRoFunction)(c) +} + +type FcallRoArg Completed + +func (c FcallRoArg) Arg(arg ...string) FcallRoArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c FcallRoArg) Build() Completed { + return Completed(c) +} + +func (c FcallRoArg) Cache() Cacheable { + return Cacheable(c) +} + +type FcallRoFunction Completed + +func (c FcallRoFunction) Numkeys(numkeys int64) FcallRoNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (FcallRoNumkeys)(c) +} + +type FcallRoKey Completed + +func (c FcallRoKey) Key(key ...string) FcallRoKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c FcallRoKey) Arg(arg ...string) FcallRoArg { + c.cs.s = append(c.cs.s, arg...) + return (FcallRoArg)(c) +} + +func (c FcallRoKey) Build() Completed { + return Completed(c) +} + +func (c FcallRoKey) Cache() Cacheable { + return Cacheable(c) +} + +type FcallRoNumkeys Completed + +func (c FcallRoNumkeys) Key(key ...string) FcallRoKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (FcallRoKey)(c) +} + +func (c FcallRoNumkeys) Arg(arg ...string) FcallRoArg { + c.cs.s = append(c.cs.s, arg...) + return (FcallRoArg)(c) +} + +func (c FcallRoNumkeys) Build() Completed { + return Completed(c) +} + +func (c FcallRoNumkeys) Cache() Cacheable { + return Cacheable(c) +} + +type Flushall Completed + +func (b Builder) Flushall() (c Flushall) { + c = Flushall{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FLUSHALL") + return c +} + +func (c Flushall) Async() FlushallAsync { + c.cs.s = append(c.cs.s, "ASYNC") + return (FlushallAsync)(c) +} + +func (c Flushall) Sync() FlushallAsyncSync { + c.cs.s = append(c.cs.s, "SYNC") + return (FlushallAsyncSync)(c) +} + +func (c Flushall) Build() Completed { + return Completed(c) +} + +type FlushallAsync Completed + +func (c FlushallAsync) Build() Completed { + return Completed(c) +} + +type FlushallAsyncSync Completed + +func (c FlushallAsyncSync) Build() Completed { + return Completed(c) +} + +type Flushdb Completed + +func (b Builder) Flushdb() (c Flushdb) { + c = Flushdb{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FLUSHDB") + return c +} + +func (c Flushdb) Async() FlushdbAsync { + c.cs.s = append(c.cs.s, "ASYNC") + return (FlushdbAsync)(c) +} + +func (c Flushdb) Sync() FlushdbAsyncSync { + c.cs.s = append(c.cs.s, "SYNC") + return (FlushdbAsyncSync)(c) +} + +func (c Flushdb) Build() Completed { + return Completed(c) +} + +type FlushdbAsync Completed + +func (c FlushdbAsync) Build() Completed { + return Completed(c) +} + +type FlushdbAsyncSync Completed + +func (c FlushdbAsyncSync) Build() Completed { + return Completed(c) +} + +type FtAggregate Completed + +func (b Builder) FtAggregate() (c FtAggregate) { + c = FtAggregate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.AGGREGATE") + return c +} + +func (c FtAggregate) Index(index string) FtAggregateIndex { + c.cs.s = append(c.cs.s, index) + return (FtAggregateIndex)(c) +} + +type FtAggregateCursorCount Completed + +func (c FtAggregateCursorCount) Maxidle(idleTime int64) FtAggregateCursorMaxidle { + c.cs.s = append(c.cs.s, "MAXIDLE", strconv.FormatInt(idleTime, 10)) + return (FtAggregateCursorMaxidle)(c) +} + +func (c FtAggregateCursorCount) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateCursorCount) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateCursorCount) Build() Completed { + return Completed(c) +} + +type FtAggregateCursorMaxidle Completed + +func (c FtAggregateCursorMaxidle) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateCursorMaxidle) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateCursorMaxidle) Build() Completed { + return Completed(c) +} + +type FtAggregateCursorWithcursor Completed + +func (c FtAggregateCursorWithcursor) Count(readSize int64) FtAggregateCursorCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(readSize, 10)) + return (FtAggregateCursorCount)(c) +} + +func (c FtAggregateCursorWithcursor) Maxidle(idleTime int64) FtAggregateCursorMaxidle { + c.cs.s = append(c.cs.s, "MAXIDLE", strconv.FormatInt(idleTime, 10)) + return (FtAggregateCursorMaxidle)(c) +} + +func (c FtAggregateCursorWithcursor) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateCursorWithcursor) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateCursorWithcursor) Build() Completed { + return Completed(c) +} + +type FtAggregateDialect Completed + +func (c FtAggregateDialect) Build() Completed { + return Completed(c) +} + +type FtAggregateIndex Completed + +func (c FtAggregateIndex) Query(query string) FtAggregateQuery { + c.cs.s = append(c.cs.s, query) + return (FtAggregateQuery)(c) +} + +type FtAggregateLoadField Completed + +func (c FtAggregateLoadField) Field(field ...string) FtAggregateLoadField { + c.cs.s = append(c.cs.s, field...) + return c +} + +func (c FtAggregateLoadField) Timeout(timeout int64) FtAggregateTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtAggregateTimeout)(c) +} + +func (c FtAggregateLoadField) LoadAll() FtAggregateLoadallLoadAll { + c.cs.s = append(c.cs.s, "LOAD", "*") + return (FtAggregateLoadallLoadAll)(c) +} + +func (c FtAggregateLoadField) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateLoadField) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateLoadField) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateLoadField) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateLoadField) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateLoadField) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateLoadField) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateLoadField) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateLoadField) Build() Completed { + return Completed(c) +} + +type FtAggregateLoadLoad Completed + +func (c FtAggregateLoadLoad) Field(field ...string) FtAggregateLoadField { + c.cs.s = append(c.cs.s, field...) + return (FtAggregateLoadField)(c) +} + +type FtAggregateLoadallLoadAll Completed + +func (c FtAggregateLoadallLoadAll) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateLoadallLoadAll) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateLoadallLoadAll) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateLoadallLoadAll) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateLoadallLoadAll) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateLoadallLoadAll) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateLoadallLoadAll) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateLoadallLoadAll) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateLoadallLoadAll) Build() Completed { + return Completed(c) +} + +type FtAggregateOpApplyApply Completed + +func (c FtAggregateOpApplyApply) As(name string) FtAggregateOpApplyAs { + c.cs.s = append(c.cs.s, "AS", name) + return (FtAggregateOpApplyAs)(c) +} + +type FtAggregateOpApplyAs Completed + +func (c FtAggregateOpApplyAs) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpApplyAs) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpApplyAs) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpApplyAs) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpApplyAs) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpApplyAs) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpApplyAs) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpApplyAs) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpApplyAs) Build() Completed { + return Completed(c) +} + +type FtAggregateOpFilter Completed + +func (c FtAggregateOpFilter) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpFilter) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpFilter) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpFilter) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpFilter) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return c +} + +func (c FtAggregateOpFilter) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpFilter) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpFilter) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpFilter) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyGroupby Completed + +func (c FtAggregateOpGroupbyGroupby) Property(property ...string) FtAggregateOpGroupbyProperty { + c.cs.s = append(c.cs.s, property...) + return (FtAggregateOpGroupbyProperty)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return c +} + +func (c FtAggregateOpGroupbyGroupby) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyGroupby) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyProperty Completed + +func (c FtAggregateOpGroupbyProperty) Property(property ...string) FtAggregateOpGroupbyProperty { + c.cs.s = append(c.cs.s, property...) + return c +} + +func (c FtAggregateOpGroupbyProperty) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyProperty) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpGroupbyProperty) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyProperty) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyProperty) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyProperty) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyProperty) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyProperty) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyProperty) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyProperty) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyReduceArg Completed + +func (c FtAggregateOpGroupbyReduceArg) Arg(arg ...string) FtAggregateOpGroupbyReduceArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c FtAggregateOpGroupbyReduceArg) As(name string) FtAggregateOpGroupbyReduceAs { + c.cs.s = append(c.cs.s, "AS", name) + return (FtAggregateOpGroupbyReduceAs)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) By(by string) FtAggregateOpGroupbyReduceBy { + c.cs.s = append(c.cs.s, "BY", by) + return (FtAggregateOpGroupbyReduceBy)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Asc() FtAggregateOpGroupbyReduceOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (FtAggregateOpGroupbyReduceOrderAsc)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Desc() FtAggregateOpGroupbyReduceOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (FtAggregateOpGroupbyReduceOrderDesc)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyReduceArg) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyReduceAs Completed + +func (c FtAggregateOpGroupbyReduceAs) By(by string) FtAggregateOpGroupbyReduceBy { + c.cs.s = append(c.cs.s, "BY", by) + return (FtAggregateOpGroupbyReduceBy)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Asc() FtAggregateOpGroupbyReduceOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (FtAggregateOpGroupbyReduceOrderAsc)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Desc() FtAggregateOpGroupbyReduceOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (FtAggregateOpGroupbyReduceOrderDesc)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyReduceAs) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyReduceBy Completed + +func (c FtAggregateOpGroupbyReduceBy) Asc() FtAggregateOpGroupbyReduceOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (FtAggregateOpGroupbyReduceOrderAsc)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Desc() FtAggregateOpGroupbyReduceOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (FtAggregateOpGroupbyReduceOrderDesc)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyReduceBy) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyReduceNargs Completed + +func (c FtAggregateOpGroupbyReduceNargs) Arg(arg ...string) FtAggregateOpGroupbyReduceArg { + c.cs.s = append(c.cs.s, arg...) + return (FtAggregateOpGroupbyReduceArg)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) As(name string) FtAggregateOpGroupbyReduceAs { + c.cs.s = append(c.cs.s, "AS", name) + return (FtAggregateOpGroupbyReduceAs)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) By(by string) FtAggregateOpGroupbyReduceBy { + c.cs.s = append(c.cs.s, "BY", by) + return (FtAggregateOpGroupbyReduceBy)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Asc() FtAggregateOpGroupbyReduceOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (FtAggregateOpGroupbyReduceOrderAsc)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Desc() FtAggregateOpGroupbyReduceOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (FtAggregateOpGroupbyReduceOrderDesc)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyReduceNargs) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyReduceOrderAsc Completed + +func (c FtAggregateOpGroupbyReduceOrderAsc) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderAsc) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyReduceOrderDesc Completed + +func (c FtAggregateOpGroupbyReduceOrderDesc) Reduce(function string) FtAggregateOpGroupbyReduceReduce { + c.cs.s = append(c.cs.s, "REDUCE", function) + return (FtAggregateOpGroupbyReduceReduce)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpGroupbyReduceOrderDesc) Build() Completed { + return Completed(c) +} + +type FtAggregateOpGroupbyReduceReduce Completed + +func (c FtAggregateOpGroupbyReduceReduce) Nargs(nargs int64) FtAggregateOpGroupbyReduceNargs { + c.cs.s = append(c.cs.s, strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyReduceNargs)(c) +} + +type FtAggregateOpLimitLimit Completed + +func (c FtAggregateOpLimitLimit) OffsetNum(offset int64, num int64) FtAggregateOpLimitOffsetNum { + c.cs.s = append(c.cs.s, strconv.FormatInt(offset, 10), strconv.FormatInt(num, 10)) + return (FtAggregateOpLimitOffsetNum)(c) +} + +type FtAggregateOpLimitOffsetNum Completed + +func (c FtAggregateOpLimitOffsetNum) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpLimitOffsetNum) Build() Completed { + return Completed(c) +} + +type FtAggregateOpSortbyFieldsOrderAsc Completed + +func (c FtAggregateOpSortbyFieldsOrderAsc) Property(property string) FtAggregateOpSortbyFieldsProperty { + c.cs.s = append(c.cs.s, property) + return (FtAggregateOpSortbyFieldsProperty)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Max(num int64) FtAggregateOpSortbyMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(num, 10)) + return (FtAggregateOpSortbyMax)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderAsc) Build() Completed { + return Completed(c) +} + +type FtAggregateOpSortbyFieldsOrderDesc Completed + +func (c FtAggregateOpSortbyFieldsOrderDesc) Property(property string) FtAggregateOpSortbyFieldsProperty { + c.cs.s = append(c.cs.s, property) + return (FtAggregateOpSortbyFieldsProperty)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Max(num int64) FtAggregateOpSortbyMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(num, 10)) + return (FtAggregateOpSortbyMax)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpSortbyFieldsOrderDesc) Build() Completed { + return Completed(c) +} + +type FtAggregateOpSortbyFieldsProperty Completed + +func (c FtAggregateOpSortbyFieldsProperty) Asc() FtAggregateOpSortbyFieldsOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (FtAggregateOpSortbyFieldsOrderAsc)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Desc() FtAggregateOpSortbyFieldsOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (FtAggregateOpSortbyFieldsOrderDesc)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Property(property string) FtAggregateOpSortbyFieldsProperty { + c.cs.s = append(c.cs.s, property) + return c +} + +func (c FtAggregateOpSortbyFieldsProperty) Max(num int64) FtAggregateOpSortbyMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(num, 10)) + return (FtAggregateOpSortbyMax)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpSortbyFieldsProperty) Build() Completed { + return Completed(c) +} + +type FtAggregateOpSortbyMax Completed + +func (c FtAggregateOpSortbyMax) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpSortbyMax) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpSortbyMax) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpSortbyMax) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpSortbyMax) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateOpSortbyMax) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpSortbyMax) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpSortbyMax) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpSortbyMax) Build() Completed { + return Completed(c) +} + +type FtAggregateOpSortbySortby Completed + +func (c FtAggregateOpSortbySortby) Property(property string) FtAggregateOpSortbyFieldsProperty { + c.cs.s = append(c.cs.s, property) + return (FtAggregateOpSortbyFieldsProperty)(c) +} + +func (c FtAggregateOpSortbySortby) Max(num int64) FtAggregateOpSortbyMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(num, 10)) + return (FtAggregateOpSortbyMax)(c) +} + +func (c FtAggregateOpSortbySortby) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateOpSortbySortby) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateOpSortbySortby) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateOpSortbySortby) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateOpSortbySortby) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return c +} + +func (c FtAggregateOpSortbySortby) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateOpSortbySortby) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateOpSortbySortby) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateOpSortbySortby) Build() Completed { + return Completed(c) +} + +type FtAggregateParamsNameValue Completed + +func (c FtAggregateParamsNameValue) NameValue(name string, value string) FtAggregateParamsNameValue { + c.cs.s = append(c.cs.s, name, value) + return c +} + +func (c FtAggregateParamsNameValue) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateParamsNameValue) Build() Completed { + return Completed(c) +} + +type FtAggregateParamsNargs Completed + +func (c FtAggregateParamsNargs) NameValue() FtAggregateParamsNameValue { + return (FtAggregateParamsNameValue)(c) +} + +type FtAggregateParamsParams Completed + +func (c FtAggregateParamsParams) Nargs(nargs int64) FtAggregateParamsNargs { + c.cs.s = append(c.cs.s, strconv.FormatInt(nargs, 10)) + return (FtAggregateParamsNargs)(c) +} + +type FtAggregateQuery Completed + +func (c FtAggregateQuery) Verbatim() FtAggregateVerbatim { + c.cs.s = append(c.cs.s, "VERBATIM") + return (FtAggregateVerbatim)(c) +} + +func (c FtAggregateQuery) Load(count string) FtAggregateLoadLoad { + c.cs.s = append(c.cs.s, "LOAD", count) + return (FtAggregateLoadLoad)(c) +} + +func (c FtAggregateQuery) Timeout(timeout int64) FtAggregateTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtAggregateTimeout)(c) +} + +func (c FtAggregateQuery) LoadAll() FtAggregateLoadallLoadAll { + c.cs.s = append(c.cs.s, "LOAD", "*") + return (FtAggregateLoadallLoadAll)(c) +} + +func (c FtAggregateQuery) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateQuery) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateQuery) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateQuery) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateQuery) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateQuery) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateQuery) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateQuery) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateQuery) Build() Completed { + return Completed(c) +} + +type FtAggregateTimeout Completed + +func (c FtAggregateTimeout) LoadAll() FtAggregateLoadallLoadAll { + c.cs.s = append(c.cs.s, "LOAD", "*") + return (FtAggregateLoadallLoadAll)(c) +} + +func (c FtAggregateTimeout) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateTimeout) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateTimeout) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateTimeout) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateTimeout) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateTimeout) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateTimeout) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateTimeout) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateTimeout) Build() Completed { + return Completed(c) +} + +type FtAggregateVerbatim Completed + +func (c FtAggregateVerbatim) Load(count string) FtAggregateLoadLoad { + c.cs.s = append(c.cs.s, "LOAD", count) + return (FtAggregateLoadLoad)(c) +} + +func (c FtAggregateVerbatim) Timeout(timeout int64) FtAggregateTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtAggregateTimeout)(c) +} + +func (c FtAggregateVerbatim) LoadAll() FtAggregateLoadallLoadAll { + c.cs.s = append(c.cs.s, "LOAD", "*") + return (FtAggregateLoadallLoadAll)(c) +} + +func (c FtAggregateVerbatim) Groupby(nargs int64) FtAggregateOpGroupbyGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpGroupbyGroupby)(c) +} + +func (c FtAggregateVerbatim) Sortby(nargs int64) FtAggregateOpSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", strconv.FormatInt(nargs, 10)) + return (FtAggregateOpSortbySortby)(c) +} + +func (c FtAggregateVerbatim) Apply(expression string) FtAggregateOpApplyApply { + c.cs.s = append(c.cs.s, "APPLY", expression) + return (FtAggregateOpApplyApply)(c) +} + +func (c FtAggregateVerbatim) Limit() FtAggregateOpLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtAggregateOpLimitLimit)(c) +} + +func (c FtAggregateVerbatim) Filter(filter string) FtAggregateOpFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtAggregateOpFilter)(c) +} + +func (c FtAggregateVerbatim) Withcursor() FtAggregateCursorWithcursor { + c.cs.s = append(c.cs.s, "WITHCURSOR") + return (FtAggregateCursorWithcursor)(c) +} + +func (c FtAggregateVerbatim) Params() FtAggregateParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtAggregateParamsParams)(c) +} + +func (c FtAggregateVerbatim) Dialect(dialect int64) FtAggregateDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtAggregateDialect)(c) +} + +func (c FtAggregateVerbatim) Build() Completed { + return Completed(c) +} + +type FtAliasadd Completed + +func (b Builder) FtAliasadd() (c FtAliasadd) { + c = FtAliasadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.ALIASADD") + return c +} + +func (c FtAliasadd) Alias(alias string) FtAliasaddAlias { + c.cs.s = append(c.cs.s, alias) + return (FtAliasaddAlias)(c) +} + +type FtAliasaddAlias Completed + +func (c FtAliasaddAlias) Index(index string) FtAliasaddIndex { + c.cs.s = append(c.cs.s, index) + return (FtAliasaddIndex)(c) +} + +type FtAliasaddIndex Completed + +func (c FtAliasaddIndex) Build() Completed { + return Completed(c) +} + +type FtAliasdel Completed + +func (b Builder) FtAliasdel() (c FtAliasdel) { + c = FtAliasdel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.ALIASDEL") + return c +} + +func (c FtAliasdel) Alias(alias string) FtAliasdelAlias { + c.cs.s = append(c.cs.s, alias) + return (FtAliasdelAlias)(c) +} + +type FtAliasdelAlias Completed + +func (c FtAliasdelAlias) Build() Completed { + return Completed(c) +} + +type FtAliasupdate Completed + +func (b Builder) FtAliasupdate() (c FtAliasupdate) { + c = FtAliasupdate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.ALIASUPDATE") + return c +} + +func (c FtAliasupdate) Alias(alias string) FtAliasupdateAlias { + c.cs.s = append(c.cs.s, alias) + return (FtAliasupdateAlias)(c) +} + +type FtAliasupdateAlias Completed + +func (c FtAliasupdateAlias) Index(index string) FtAliasupdateIndex { + c.cs.s = append(c.cs.s, index) + return (FtAliasupdateIndex)(c) +} + +type FtAliasupdateIndex Completed + +func (c FtAliasupdateIndex) Build() Completed { + return Completed(c) +} + +type FtAlter Completed + +func (b Builder) FtAlter() (c FtAlter) { + c = FtAlter{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.ALTER") + return c +} + +func (c FtAlter) Index(index string) FtAlterIndex { + c.cs.s = append(c.cs.s, index) + return (FtAlterIndex)(c) +} + +type FtAlterAdd Completed + +func (c FtAlterAdd) Field(field string) FtAlterField { + c.cs.s = append(c.cs.s, field) + return (FtAlterField)(c) +} + +type FtAlterField Completed + +func (c FtAlterField) Options(options string) FtAlterOptions { + c.cs.s = append(c.cs.s, options) + return (FtAlterOptions)(c) +} + +type FtAlterIndex Completed + +func (c FtAlterIndex) Skipinitialscan() FtAlterSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtAlterSkipinitialscan)(c) +} + +func (c FtAlterIndex) Schema() FtAlterSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtAlterSchema)(c) +} + +type FtAlterOptions Completed + +func (c FtAlterOptions) Build() Completed { + return Completed(c) +} + +type FtAlterSchema Completed + +func (c FtAlterSchema) Add() FtAlterAdd { + c.cs.s = append(c.cs.s, "ADD") + return (FtAlterAdd)(c) +} + +type FtAlterSkipinitialscan Completed + +func (c FtAlterSkipinitialscan) Schema() FtAlterSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtAlterSchema)(c) +} + +type FtConfigGet Completed + +func (b Builder) FtConfigGet() (c FtConfigGet) { + c = FtConfigGet{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.CONFIG", "GET") + return c +} + +func (c FtConfigGet) Option(option string) FtConfigGetOption { + c.cs.s = append(c.cs.s, option) + return (FtConfigGetOption)(c) +} + +type FtConfigGetOption Completed + +func (c FtConfigGetOption) Build() Completed { + return Completed(c) +} + +type FtConfigHelp Completed + +func (b Builder) FtConfigHelp() (c FtConfigHelp) { + c = FtConfigHelp{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.CONFIG", "HELP") + return c +} + +func (c FtConfigHelp) Option(option string) FtConfigHelpOption { + c.cs.s = append(c.cs.s, option) + return (FtConfigHelpOption)(c) +} + +type FtConfigHelpOption Completed + +func (c FtConfigHelpOption) Build() Completed { + return Completed(c) +} + +type FtConfigSet Completed + +func (b Builder) FtConfigSet() (c FtConfigSet) { + c = FtConfigSet{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.CONFIG", "SET") + return c +} + +func (c FtConfigSet) Option(option string) FtConfigSetOption { + c.cs.s = append(c.cs.s, option) + return (FtConfigSetOption)(c) +} + +type FtConfigSetOption Completed + +func (c FtConfigSetOption) Value(value string) FtConfigSetValue { + c.cs.s = append(c.cs.s, value) + return (FtConfigSetValue)(c) +} + +type FtConfigSetValue Completed + +func (c FtConfigSetValue) Build() Completed { + return Completed(c) +} + +type FtCreate Completed + +func (b Builder) FtCreate() (c FtCreate) { + c = FtCreate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.CREATE") + return c +} + +func (c FtCreate) Index(index string) FtCreateIndex { + c.cs.s = append(c.cs.s, index) + return (FtCreateIndex)(c) +} + +type FtCreateFieldAs Completed + +func (c FtCreateFieldAs) Text() FtCreateFieldFieldTypeText { + c.cs.s = append(c.cs.s, "TEXT") + return (FtCreateFieldFieldTypeText)(c) +} + +func (c FtCreateFieldAs) Tag() FtCreateFieldFieldTypeTag { + c.cs.s = append(c.cs.s, "TAG") + return (FtCreateFieldFieldTypeTag)(c) +} + +func (c FtCreateFieldAs) Numeric() FtCreateFieldFieldTypeNumeric { + c.cs.s = append(c.cs.s, "NUMERIC") + return (FtCreateFieldFieldTypeNumeric)(c) +} + +func (c FtCreateFieldAs) Geo() FtCreateFieldFieldTypeGeo { + c.cs.s = append(c.cs.s, "GEO") + return (FtCreateFieldFieldTypeGeo)(c) +} + +func (c FtCreateFieldAs) Vector(algo string, nargs int64, args ...string) FtCreateFieldFieldTypeVector { + c.cs.s = append(c.cs.s, "VECTOR", algo, strconv.FormatInt(nargs, 10)) + c.cs.s = append(c.cs.s, args...) + return (FtCreateFieldFieldTypeVector)(c) +} + +type FtCreateFieldFieldName Completed + +func (c FtCreateFieldFieldName) As(alias string) FtCreateFieldAs { + c.cs.s = append(c.cs.s, "AS", alias) + return (FtCreateFieldAs)(c) +} + +func (c FtCreateFieldFieldName) Text() FtCreateFieldFieldTypeText { + c.cs.s = append(c.cs.s, "TEXT") + return (FtCreateFieldFieldTypeText)(c) +} + +func (c FtCreateFieldFieldName) Tag() FtCreateFieldFieldTypeTag { + c.cs.s = append(c.cs.s, "TAG") + return (FtCreateFieldFieldTypeTag)(c) +} + +func (c FtCreateFieldFieldName) Numeric() FtCreateFieldFieldTypeNumeric { + c.cs.s = append(c.cs.s, "NUMERIC") + return (FtCreateFieldFieldTypeNumeric)(c) +} + +func (c FtCreateFieldFieldName) Geo() FtCreateFieldFieldTypeGeo { + c.cs.s = append(c.cs.s, "GEO") + return (FtCreateFieldFieldTypeGeo)(c) +} + +func (c FtCreateFieldFieldName) Vector(algo string, nargs int64, args ...string) FtCreateFieldFieldTypeVector { + c.cs.s = append(c.cs.s, "VECTOR", algo, strconv.FormatInt(nargs, 10)) + c.cs.s = append(c.cs.s, args...) + return (FtCreateFieldFieldTypeVector)(c) +} + +type FtCreateFieldFieldTypeGeo Completed + +func (c FtCreateFieldFieldTypeGeo) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldFieldTypeGeo) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldFieldTypeGeo) Build() Completed { + return Completed(c) +} + +type FtCreateFieldFieldTypeNumeric Completed + +func (c FtCreateFieldFieldTypeNumeric) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldFieldTypeNumeric) Build() Completed { + return Completed(c) +} + +type FtCreateFieldFieldTypeTag Completed + +func (c FtCreateFieldFieldTypeTag) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldFieldTypeTag) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldFieldTypeTag) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldFieldTypeTag) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldFieldTypeTag) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldFieldTypeTag) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldFieldTypeTag) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldFieldTypeTag) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldFieldTypeTag) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldFieldTypeTag) Build() Completed { + return Completed(c) +} + +type FtCreateFieldFieldTypeText Completed + +func (c FtCreateFieldFieldTypeText) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldFieldTypeText) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldFieldTypeText) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldFieldTypeText) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldFieldTypeText) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldFieldTypeText) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldFieldTypeText) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldFieldTypeText) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldFieldTypeText) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldFieldTypeText) Build() Completed { + return Completed(c) +} + +type FtCreateFieldFieldTypeVector Completed + +func (c FtCreateFieldFieldTypeVector) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldFieldTypeVector) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldFieldTypeVector) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldFieldTypeVector) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldFieldTypeVector) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldFieldTypeVector) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldFieldTypeVector) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldFieldTypeVector) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldFieldTypeVector) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldFieldTypeVector) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionCasesensitive Completed + +func (c FtCreateFieldOptionCasesensitive) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return c +} + +func (c FtCreateFieldOptionCasesensitive) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionCasesensitive) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionNoindex Completed + +func (c FtCreateFieldOptionNoindex) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionNoindex) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionNoindex) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionNoindex) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionNoindex) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionNoindex) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionNoindex) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionNoindex) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return c +} + +func (c FtCreateFieldOptionNoindex) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionNoindex) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionNostem Completed + +func (c FtCreateFieldOptionNostem) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionNostem) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionNostem) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionNostem) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionNostem) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionNostem) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionNostem) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionNostem) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return c +} + +func (c FtCreateFieldOptionNostem) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionNostem) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionPhonetic Completed + +func (c FtCreateFieldOptionPhonetic) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionPhonetic) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionPhonetic) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionPhonetic) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionPhonetic) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionPhonetic) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionPhonetic) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionPhonetic) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return c +} + +func (c FtCreateFieldOptionPhonetic) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionPhonetic) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionSeparator Completed + +func (c FtCreateFieldOptionSeparator) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionSeparator) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionSeparator) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionSeparator) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionSeparator) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionSeparator) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionSeparator) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionSeparator) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return c +} + +func (c FtCreateFieldOptionSeparator) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionSeparator) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionSortableSortable Completed + +func (c FtCreateFieldOptionSortableSortable) Unf() FtCreateFieldOptionSortableUnf { + c.cs.s = append(c.cs.s, "UNF") + return (FtCreateFieldOptionSortableUnf)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return c +} + +func (c FtCreateFieldOptionSortableSortable) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionSortableSortable) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionSortableUnf Completed + +func (c FtCreateFieldOptionSortableUnf) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionSortableUnf) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionSortableUnf) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionWeight Completed + +func (c FtCreateFieldOptionWeight) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionWeight) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionWeight) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return (FtCreateFieldOptionWithsuffixtrie)(c) +} + +func (c FtCreateFieldOptionWeight) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionWeight) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionWeight) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionWeight) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionWeight) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return c +} + +func (c FtCreateFieldOptionWeight) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionWeight) Build() Completed { + return Completed(c) +} + +type FtCreateFieldOptionWithsuffixtrie Completed + +func (c FtCreateFieldOptionWithsuffixtrie) Sortable() FtCreateFieldOptionSortableSortable { + c.cs.s = append(c.cs.s, "SORTABLE") + return (FtCreateFieldOptionSortableSortable)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Noindex() FtCreateFieldOptionNoindex { + c.cs.s = append(c.cs.s, "NOINDEX") + return (FtCreateFieldOptionNoindex)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Nostem() FtCreateFieldOptionNostem { + c.cs.s = append(c.cs.s, "NOSTEM") + return (FtCreateFieldOptionNostem)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Phonetic(phonetic string) FtCreateFieldOptionPhonetic { + c.cs.s = append(c.cs.s, "PHONETIC", phonetic) + return (FtCreateFieldOptionPhonetic)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Weight(weight float64) FtCreateFieldOptionWeight { + c.cs.s = append(c.cs.s, "WEIGHT", strconv.FormatFloat(weight, 'f', -1, 64)) + return (FtCreateFieldOptionWeight)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Separator(separator string) FtCreateFieldOptionSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtCreateFieldOptionSeparator)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Casesensitive() FtCreateFieldOptionCasesensitive { + c.cs.s = append(c.cs.s, "CASESENSITIVE") + return (FtCreateFieldOptionCasesensitive)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Withsuffixtrie() FtCreateFieldOptionWithsuffixtrie { + c.cs.s = append(c.cs.s, "WITHSUFFIXTRIE") + return c +} + +func (c FtCreateFieldOptionWithsuffixtrie) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +func (c FtCreateFieldOptionWithsuffixtrie) Build() Completed { + return Completed(c) +} + +type FtCreateFilter Completed + +func (c FtCreateFilter) Language(defaultLang string) FtCreateLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", defaultLang) + return (FtCreateLanguage)(c) +} + +func (c FtCreateFilter) LanguageField(langAttribute string) FtCreateLanguageField { + c.cs.s = append(c.cs.s, "LANGUAGE_FIELD", langAttribute) + return (FtCreateLanguageField)(c) +} + +func (c FtCreateFilter) Score(defaultScore float64) FtCreateScore { + c.cs.s = append(c.cs.s, "SCORE", strconv.FormatFloat(defaultScore, 'f', -1, 64)) + return (FtCreateScore)(c) +} + +func (c FtCreateFilter) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreateFilter) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateFilter) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateFilter) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateFilter) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateFilter) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateFilter) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateFilter) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateFilter) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateFilter) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateFilter) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateIndex Completed + +func (c FtCreateIndex) OnHash() FtCreateOnHash { + c.cs.s = append(c.cs.s, "ON", "HASH") + return (FtCreateOnHash)(c) +} + +func (c FtCreateIndex) OnJson() FtCreateOnJson { + c.cs.s = append(c.cs.s, "ON", "JSON") + return (FtCreateOnJson)(c) +} + +func (c FtCreateIndex) Prefix(count int64) FtCreatePrefixCount { + c.cs.s = append(c.cs.s, "PREFIX", strconv.FormatInt(count, 10)) + return (FtCreatePrefixCount)(c) +} + +func (c FtCreateIndex) Filter(filter string) FtCreateFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtCreateFilter)(c) +} + +func (c FtCreateIndex) Language(defaultLang string) FtCreateLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", defaultLang) + return (FtCreateLanguage)(c) +} + +func (c FtCreateIndex) LanguageField(langAttribute string) FtCreateLanguageField { + c.cs.s = append(c.cs.s, "LANGUAGE_FIELD", langAttribute) + return (FtCreateLanguageField)(c) +} + +func (c FtCreateIndex) Score(defaultScore float64) FtCreateScore { + c.cs.s = append(c.cs.s, "SCORE", strconv.FormatFloat(defaultScore, 'f', -1, 64)) + return (FtCreateScore)(c) +} + +func (c FtCreateIndex) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreateIndex) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateIndex) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateIndex) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateIndex) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateIndex) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateIndex) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateIndex) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateIndex) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateIndex) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateIndex) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateLanguage Completed + +func (c FtCreateLanguage) LanguageField(langAttribute string) FtCreateLanguageField { + c.cs.s = append(c.cs.s, "LANGUAGE_FIELD", langAttribute) + return (FtCreateLanguageField)(c) +} + +func (c FtCreateLanguage) Score(defaultScore float64) FtCreateScore { + c.cs.s = append(c.cs.s, "SCORE", strconv.FormatFloat(defaultScore, 'f', -1, 64)) + return (FtCreateScore)(c) +} + +func (c FtCreateLanguage) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreateLanguage) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateLanguage) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateLanguage) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateLanguage) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateLanguage) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateLanguage) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateLanguage) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateLanguage) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateLanguage) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateLanguage) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateLanguageField Completed + +func (c FtCreateLanguageField) Score(defaultScore float64) FtCreateScore { + c.cs.s = append(c.cs.s, "SCORE", strconv.FormatFloat(defaultScore, 'f', -1, 64)) + return (FtCreateScore)(c) +} + +func (c FtCreateLanguageField) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreateLanguageField) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateLanguageField) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateLanguageField) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateLanguageField) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateLanguageField) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateLanguageField) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateLanguageField) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateLanguageField) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateLanguageField) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateLanguageField) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateMaxtextfields Completed + +func (c FtCreateMaxtextfields) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateMaxtextfields) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateMaxtextfields) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateMaxtextfields) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateMaxtextfields) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateMaxtextfields) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateMaxtextfields) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateMaxtextfields) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateNofields Completed + +func (c FtCreateNofields) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateNofields) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateNofields) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateNofields) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateNofreqs Completed + +func (c FtCreateNofreqs) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateNofreqs) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateNofreqs) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateNohl Completed + +func (c FtCreateNohl) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateNohl) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateNohl) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateNohl) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateNohl) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateNooffsets Completed + +func (c FtCreateNooffsets) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateNooffsets) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateNooffsets) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateNooffsets) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateNooffsets) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateNooffsets) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateOnHash Completed + +func (c FtCreateOnHash) Prefix(count int64) FtCreatePrefixCount { + c.cs.s = append(c.cs.s, "PREFIX", strconv.FormatInt(count, 10)) + return (FtCreatePrefixCount)(c) +} + +func (c FtCreateOnHash) Filter(filter string) FtCreateFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtCreateFilter)(c) +} + +func (c FtCreateOnHash) Language(defaultLang string) FtCreateLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", defaultLang) + return (FtCreateLanguage)(c) +} + +func (c FtCreateOnHash) LanguageField(langAttribute string) FtCreateLanguageField { + c.cs.s = append(c.cs.s, "LANGUAGE_FIELD", langAttribute) + return (FtCreateLanguageField)(c) +} + +func (c FtCreateOnHash) Score(defaultScore float64) FtCreateScore { + c.cs.s = append(c.cs.s, "SCORE", strconv.FormatFloat(defaultScore, 'f', -1, 64)) + return (FtCreateScore)(c) +} + +func (c FtCreateOnHash) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreateOnHash) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateOnHash) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateOnHash) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateOnHash) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateOnHash) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateOnHash) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateOnHash) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateOnHash) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateOnHash) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateOnHash) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateOnJson Completed + +func (c FtCreateOnJson) Prefix(count int64) FtCreatePrefixCount { + c.cs.s = append(c.cs.s, "PREFIX", strconv.FormatInt(count, 10)) + return (FtCreatePrefixCount)(c) +} + +func (c FtCreateOnJson) Filter(filter string) FtCreateFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtCreateFilter)(c) +} + +func (c FtCreateOnJson) Language(defaultLang string) FtCreateLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", defaultLang) + return (FtCreateLanguage)(c) +} + +func (c FtCreateOnJson) LanguageField(langAttribute string) FtCreateLanguageField { + c.cs.s = append(c.cs.s, "LANGUAGE_FIELD", langAttribute) + return (FtCreateLanguageField)(c) +} + +func (c FtCreateOnJson) Score(defaultScore float64) FtCreateScore { + c.cs.s = append(c.cs.s, "SCORE", strconv.FormatFloat(defaultScore, 'f', -1, 64)) + return (FtCreateScore)(c) +} + +func (c FtCreateOnJson) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreateOnJson) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateOnJson) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateOnJson) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateOnJson) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateOnJson) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateOnJson) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateOnJson) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateOnJson) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateOnJson) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateOnJson) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreatePayloadField Completed + +func (c FtCreatePayloadField) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreatePayloadField) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreatePayloadField) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreatePayloadField) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreatePayloadField) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreatePayloadField) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreatePayloadField) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreatePayloadField) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreatePayloadField) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreatePrefixCount Completed + +func (c FtCreatePrefixCount) Prefix(prefix ...string) FtCreatePrefixPrefix { + c.cs.s = append(c.cs.s, prefix...) + return (FtCreatePrefixPrefix)(c) +} + +type FtCreatePrefixPrefix Completed + +func (c FtCreatePrefixPrefix) Prefix(prefix ...string) FtCreatePrefixPrefix { + c.cs.s = append(c.cs.s, prefix...) + return c +} + +func (c FtCreatePrefixPrefix) Filter(filter string) FtCreateFilter { + c.cs.s = append(c.cs.s, "FILTER", filter) + return (FtCreateFilter)(c) +} + +func (c FtCreatePrefixPrefix) Language(defaultLang string) FtCreateLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", defaultLang) + return (FtCreateLanguage)(c) +} + +func (c FtCreatePrefixPrefix) LanguageField(langAttribute string) FtCreateLanguageField { + c.cs.s = append(c.cs.s, "LANGUAGE_FIELD", langAttribute) + return (FtCreateLanguageField)(c) +} + +func (c FtCreatePrefixPrefix) Score(defaultScore float64) FtCreateScore { + c.cs.s = append(c.cs.s, "SCORE", strconv.FormatFloat(defaultScore, 'f', -1, 64)) + return (FtCreateScore)(c) +} + +func (c FtCreatePrefixPrefix) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreatePrefixPrefix) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreatePrefixPrefix) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreatePrefixPrefix) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreatePrefixPrefix) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreatePrefixPrefix) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreatePrefixPrefix) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreatePrefixPrefix) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreatePrefixPrefix) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreatePrefixPrefix) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreatePrefixPrefix) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateSchema Completed + +func (c FtCreateSchema) FieldName(fieldName string) FtCreateFieldFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtCreateFieldFieldName)(c) +} + +type FtCreateScore Completed + +func (c FtCreateScore) ScoreField(scoreAttribute string) FtCreateScoreField { + c.cs.s = append(c.cs.s, "SCORE_FIELD", scoreAttribute) + return (FtCreateScoreField)(c) +} + +func (c FtCreateScore) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateScore) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateScore) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateScore) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateScore) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateScore) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateScore) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateScore) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateScore) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateScore) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateScoreField Completed + +func (c FtCreateScoreField) PayloadField(payloadAttribute string) FtCreatePayloadField { + c.cs.s = append(c.cs.s, "PAYLOAD_FIELD", payloadAttribute) + return (FtCreatePayloadField)(c) +} + +func (c FtCreateScoreField) Maxtextfields() FtCreateMaxtextfields { + c.cs.s = append(c.cs.s, "MAXTEXTFIELDS") + return (FtCreateMaxtextfields)(c) +} + +func (c FtCreateScoreField) Temporary(seconds float64) FtCreateTemporary { + c.cs.s = append(c.cs.s, "TEMPORARY", strconv.FormatFloat(seconds, 'f', -1, 64)) + return (FtCreateTemporary)(c) +} + +func (c FtCreateScoreField) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateScoreField) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateScoreField) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateScoreField) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateScoreField) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateScoreField) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateScoreField) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateSkipinitialscan Completed + +func (c FtCreateSkipinitialscan) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateStopwordsStopword Completed + +func (c FtCreateStopwordsStopword) Stopword(stopword ...string) FtCreateStopwordsStopword { + c.cs.s = append(c.cs.s, stopword...) + return c +} + +func (c FtCreateStopwordsStopword) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateStopwordsStopword) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateStopwordsStopwords Completed + +func (c FtCreateStopwordsStopwords) Stopword(stopword ...string) FtCreateStopwordsStopword { + c.cs.s = append(c.cs.s, stopword...) + return (FtCreateStopwordsStopword)(c) +} + +func (c FtCreateStopwordsStopwords) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateStopwordsStopwords) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCreateTemporary Completed + +func (c FtCreateTemporary) Nooffsets() FtCreateNooffsets { + c.cs.s = append(c.cs.s, "NOOFFSETS") + return (FtCreateNooffsets)(c) +} + +func (c FtCreateTemporary) Nohl() FtCreateNohl { + c.cs.s = append(c.cs.s, "NOHL") + return (FtCreateNohl)(c) +} + +func (c FtCreateTemporary) Nofields() FtCreateNofields { + c.cs.s = append(c.cs.s, "NOFIELDS") + return (FtCreateNofields)(c) +} + +func (c FtCreateTemporary) Nofreqs() FtCreateNofreqs { + c.cs.s = append(c.cs.s, "NOFREQS") + return (FtCreateNofreqs)(c) +} + +func (c FtCreateTemporary) Stopwords(count int64) FtCreateStopwordsStopwords { + c.cs.s = append(c.cs.s, "STOPWORDS", strconv.FormatInt(count, 10)) + return (FtCreateStopwordsStopwords)(c) +} + +func (c FtCreateTemporary) Skipinitialscan() FtCreateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtCreateSkipinitialscan)(c) +} + +func (c FtCreateTemporary) Schema() FtCreateSchema { + c.cs.s = append(c.cs.s, "SCHEMA") + return (FtCreateSchema)(c) +} + +type FtCursorDel Completed + +func (b Builder) FtCursorDel() (c FtCursorDel) { + c = FtCursorDel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.CURSOR", "DEL") + return c +} + +func (c FtCursorDel) Index(index string) FtCursorDelIndex { + c.cs.s = append(c.cs.s, index) + return (FtCursorDelIndex)(c) +} + +type FtCursorDelCursorId Completed + +func (c FtCursorDelCursorId) Build() Completed { + return Completed(c) +} + +type FtCursorDelIndex Completed + +func (c FtCursorDelIndex) CursorId(cursorId int64) FtCursorDelCursorId { + c.cs.s = append(c.cs.s, strconv.FormatInt(cursorId, 10)) + return (FtCursorDelCursorId)(c) +} + +type FtCursorRead Completed + +func (b Builder) FtCursorRead() (c FtCursorRead) { + c = FtCursorRead{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.CURSOR", "READ") + return c +} + +func (c FtCursorRead) Index(index string) FtCursorReadIndex { + c.cs.s = append(c.cs.s, index) + return (FtCursorReadIndex)(c) +} + +type FtCursorReadCount Completed + +func (c FtCursorReadCount) Build() Completed { + return Completed(c) +} + +type FtCursorReadCursorId Completed + +func (c FtCursorReadCursorId) Count(readSize int64) FtCursorReadCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(readSize, 10)) + return (FtCursorReadCount)(c) +} + +func (c FtCursorReadCursorId) Build() Completed { + return Completed(c) +} + +type FtCursorReadIndex Completed + +func (c FtCursorReadIndex) CursorId(cursorId int64) FtCursorReadCursorId { + c.cs.s = append(c.cs.s, strconv.FormatInt(cursorId, 10)) + return (FtCursorReadCursorId)(c) +} + +type FtDictadd Completed + +func (b Builder) FtDictadd() (c FtDictadd) { + c = FtDictadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.DICTADD") + return c +} + +func (c FtDictadd) Dict(dict string) FtDictaddDict { + c.cs.s = append(c.cs.s, dict) + return (FtDictaddDict)(c) +} + +type FtDictaddDict Completed + +func (c FtDictaddDict) Term(term ...string) FtDictaddTerm { + c.cs.s = append(c.cs.s, term...) + return (FtDictaddTerm)(c) +} + +type FtDictaddTerm Completed + +func (c FtDictaddTerm) Term(term ...string) FtDictaddTerm { + c.cs.s = append(c.cs.s, term...) + return c +} + +func (c FtDictaddTerm) Build() Completed { + return Completed(c) +} + +type FtDictdel Completed + +func (b Builder) FtDictdel() (c FtDictdel) { + c = FtDictdel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.DICTDEL") + return c +} + +func (c FtDictdel) Dict(dict string) FtDictdelDict { + c.cs.s = append(c.cs.s, dict) + return (FtDictdelDict)(c) +} + +type FtDictdelDict Completed + +func (c FtDictdelDict) Term(term ...string) FtDictdelTerm { + c.cs.s = append(c.cs.s, term...) + return (FtDictdelTerm)(c) +} + +type FtDictdelTerm Completed + +func (c FtDictdelTerm) Term(term ...string) FtDictdelTerm { + c.cs.s = append(c.cs.s, term...) + return c +} + +func (c FtDictdelTerm) Build() Completed { + return Completed(c) +} + +type FtDictdump Completed + +func (b Builder) FtDictdump() (c FtDictdump) { + c = FtDictdump{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.DICTDUMP") + return c +} + +func (c FtDictdump) Dict(dict string) FtDictdumpDict { + c.cs.s = append(c.cs.s, dict) + return (FtDictdumpDict)(c) +} + +type FtDictdumpDict Completed + +func (c FtDictdumpDict) Build() Completed { + return Completed(c) +} + +type FtDropindex Completed + +func (b Builder) FtDropindex() (c FtDropindex) { + c = FtDropindex{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.DROPINDEX") + return c +} + +func (c FtDropindex) Index(index string) FtDropindexIndex { + c.cs.s = append(c.cs.s, index) + return (FtDropindexIndex)(c) +} + +type FtDropindexDeleteDocsDd Completed + +func (c FtDropindexDeleteDocsDd) Build() Completed { + return Completed(c) +} + +type FtDropindexIndex Completed + +func (c FtDropindexIndex) Dd() FtDropindexDeleteDocsDd { + c.cs.s = append(c.cs.s, "DD") + return (FtDropindexDeleteDocsDd)(c) +} + +func (c FtDropindexIndex) Build() Completed { + return Completed(c) +} + +type FtExplain Completed + +func (b Builder) FtExplain() (c FtExplain) { + c = FtExplain{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.EXPLAIN") + return c +} + +func (c FtExplain) Index(index string) FtExplainIndex { + c.cs.s = append(c.cs.s, index) + return (FtExplainIndex)(c) +} + +type FtExplainDialect Completed + +func (c FtExplainDialect) Build() Completed { + return Completed(c) +} + +type FtExplainIndex Completed + +func (c FtExplainIndex) Query(query string) FtExplainQuery { + c.cs.s = append(c.cs.s, query) + return (FtExplainQuery)(c) +} + +type FtExplainQuery Completed + +func (c FtExplainQuery) Dialect(dialect int64) FtExplainDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtExplainDialect)(c) +} + +func (c FtExplainQuery) Build() Completed { + return Completed(c) +} + +type FtExplaincli Completed + +func (b Builder) FtExplaincli() (c FtExplaincli) { + c = FtExplaincli{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.EXPLAINCLI") + return c +} + +func (c FtExplaincli) Index(index string) FtExplaincliIndex { + c.cs.s = append(c.cs.s, index) + return (FtExplaincliIndex)(c) +} + +type FtExplaincliDialect Completed + +func (c FtExplaincliDialect) Build() Completed { + return Completed(c) +} + +type FtExplaincliIndex Completed + +func (c FtExplaincliIndex) Query(query string) FtExplaincliQuery { + c.cs.s = append(c.cs.s, query) + return (FtExplaincliQuery)(c) +} + +type FtExplaincliQuery Completed + +func (c FtExplaincliQuery) Dialect(dialect int64) FtExplaincliDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtExplaincliDialect)(c) +} + +func (c FtExplaincliQuery) Build() Completed { + return Completed(c) +} + +type FtInfo Completed + +func (b Builder) FtInfo() (c FtInfo) { + c = FtInfo{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.INFO") + return c +} + +func (c FtInfo) Index(index string) FtInfoIndex { + c.cs.s = append(c.cs.s, index) + return (FtInfoIndex)(c) +} + +type FtInfoIndex Completed + +func (c FtInfoIndex) Build() Completed { + return Completed(c) +} + +type FtList Completed + +func (b Builder) FtList() (c FtList) { + c = FtList{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT._LIST") + return c +} + +func (c FtList) Build() Completed { + return Completed(c) +} + +type FtProfile Completed + +func (b Builder) FtProfile() (c FtProfile) { + c = FtProfile{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.PROFILE") + return c +} + +func (c FtProfile) Index(index string) FtProfileIndex { + c.cs.s = append(c.cs.s, index) + return (FtProfileIndex)(c) +} + +type FtProfileIndex Completed + +func (c FtProfileIndex) Search() FtProfileQuerytypeSearch { + c.cs.s = append(c.cs.s, "SEARCH") + return (FtProfileQuerytypeSearch)(c) +} + +func (c FtProfileIndex) Aggregate() FtProfileQuerytypeAggregate { + c.cs.s = append(c.cs.s, "AGGREGATE") + return (FtProfileQuerytypeAggregate)(c) +} + +type FtProfileLimited Completed + +func (c FtProfileLimited) Query(query string) FtProfileQuery { + c.cs.s = append(c.cs.s, "QUERY", query) + return (FtProfileQuery)(c) +} + +type FtProfileQuery Completed + +func (c FtProfileQuery) Build() Completed { + return Completed(c) +} + +type FtProfileQuerytypeAggregate Completed + +func (c FtProfileQuerytypeAggregate) Limited() FtProfileLimited { + c.cs.s = append(c.cs.s, "LIMITED") + return (FtProfileLimited)(c) +} + +func (c FtProfileQuerytypeAggregate) Query(query string) FtProfileQuery { + c.cs.s = append(c.cs.s, "QUERY", query) + return (FtProfileQuery)(c) +} + +type FtProfileQuerytypeSearch Completed + +func (c FtProfileQuerytypeSearch) Limited() FtProfileLimited { + c.cs.s = append(c.cs.s, "LIMITED") + return (FtProfileLimited)(c) +} + +func (c FtProfileQuerytypeSearch) Query(query string) FtProfileQuery { + c.cs.s = append(c.cs.s, "QUERY", query) + return (FtProfileQuery)(c) +} + +type FtSearch Completed + +func (b Builder) FtSearch() (c FtSearch) { + c = FtSearch{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SEARCH") + return c +} + +func (c FtSearch) Index(index string) FtSearchIndex { + c.cs.s = append(c.cs.s, index) + return (FtSearchIndex)(c) +} + +type FtSearchDialect Completed + +func (c FtSearchDialect) Build() Completed { + return Completed(c) +} + +type FtSearchExpander Completed + +func (c FtSearchExpander) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchExpander) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchExpander) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchExpander) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchExpander) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchExpander) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchExpander) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchExpander) Build() Completed { + return Completed(c) +} + +type FtSearchExplainscore Completed + +func (c FtSearchExplainscore) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchExplainscore) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchExplainscore) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchExplainscore) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchExplainscore) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchExplainscore) Build() Completed { + return Completed(c) +} + +type FtSearchFilterFilter Completed + +func (c FtSearchFilterFilter) Min(min float64) FtSearchFilterMin { + c.cs.s = append(c.cs.s, strconv.FormatFloat(min, 'f', -1, 64)) + return (FtSearchFilterMin)(c) +} + +type FtSearchFilterMax Completed + +func (c FtSearchFilterMax) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchFilterMax) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchFilterMax) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchFilterMax) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchFilterMax) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchFilterMax) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchFilterMax) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchFilterMax) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchFilterMax) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchFilterMax) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchFilterMax) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchFilterMax) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchFilterMax) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchFilterMax) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchFilterMax) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchFilterMax) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchFilterMax) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchFilterMax) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchFilterMax) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchFilterMax) Build() Completed { + return Completed(c) +} + +type FtSearchFilterMin Completed + +func (c FtSearchFilterMin) Max(max float64) FtSearchFilterMax { + c.cs.s = append(c.cs.s, strconv.FormatFloat(max, 'f', -1, 64)) + return (FtSearchFilterMax)(c) +} + +type FtSearchGeoFilterGeofilter Completed + +func (c FtSearchGeoFilterGeofilter) Lon(lon float64) FtSearchGeoFilterLon { + c.cs.s = append(c.cs.s, strconv.FormatFloat(lon, 'f', -1, 64)) + return (FtSearchGeoFilterLon)(c) +} + +type FtSearchGeoFilterLat Completed + +func (c FtSearchGeoFilterLat) Radius(radius float64) FtSearchGeoFilterRadius { + c.cs.s = append(c.cs.s, strconv.FormatFloat(radius, 'f', -1, 64)) + return (FtSearchGeoFilterRadius)(c) +} + +type FtSearchGeoFilterLon Completed + +func (c FtSearchGeoFilterLon) Lat(lat float64) FtSearchGeoFilterLat { + c.cs.s = append(c.cs.s, strconv.FormatFloat(lat, 'f', -1, 64)) + return (FtSearchGeoFilterLat)(c) +} + +type FtSearchGeoFilterRadius Completed + +func (c FtSearchGeoFilterRadius) M() FtSearchGeoFilterRadiusTypeM { + c.cs.s = append(c.cs.s, "m") + return (FtSearchGeoFilterRadiusTypeM)(c) +} + +func (c FtSearchGeoFilterRadius) Km() FtSearchGeoFilterRadiusTypeKm { + c.cs.s = append(c.cs.s, "km") + return (FtSearchGeoFilterRadiusTypeKm)(c) +} + +func (c FtSearchGeoFilterRadius) Mi() FtSearchGeoFilterRadiusTypeMi { + c.cs.s = append(c.cs.s, "mi") + return (FtSearchGeoFilterRadiusTypeMi)(c) +} + +func (c FtSearchGeoFilterRadius) Ft() FtSearchGeoFilterRadiusTypeFt { + c.cs.s = append(c.cs.s, "ft") + return (FtSearchGeoFilterRadiusTypeFt)(c) +} + +type FtSearchGeoFilterRadiusTypeFt Completed + +func (c FtSearchGeoFilterRadiusTypeFt) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchGeoFilterRadiusTypeFt) Build() Completed { + return Completed(c) +} + +type FtSearchGeoFilterRadiusTypeKm Completed + +func (c FtSearchGeoFilterRadiusTypeKm) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchGeoFilterRadiusTypeKm) Build() Completed { + return Completed(c) +} + +type FtSearchGeoFilterRadiusTypeM Completed + +func (c FtSearchGeoFilterRadiusTypeM) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchGeoFilterRadiusTypeM) Build() Completed { + return Completed(c) +} + +type FtSearchGeoFilterRadiusTypeMi Completed + +func (c FtSearchGeoFilterRadiusTypeMi) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchGeoFilterRadiusTypeMi) Build() Completed { + return Completed(c) +} + +type FtSearchHighlightFieldsField Completed + +func (c FtSearchHighlightFieldsField) Field(field ...string) FtSearchHighlightFieldsField { + c.cs.s = append(c.cs.s, field...) + return c +} + +func (c FtSearchHighlightFieldsField) Tags() FtSearchHighlightTagsTags { + c.cs.s = append(c.cs.s, "TAGS") + return (FtSearchHighlightTagsTags)(c) +} + +func (c FtSearchHighlightFieldsField) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchHighlightFieldsField) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchHighlightFieldsField) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchHighlightFieldsField) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchHighlightFieldsField) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchHighlightFieldsField) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchHighlightFieldsField) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchHighlightFieldsField) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchHighlightFieldsField) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchHighlightFieldsField) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchHighlightFieldsField) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchHighlightFieldsField) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchHighlightFieldsField) Build() Completed { + return Completed(c) +} + +type FtSearchHighlightFieldsFields Completed + +func (c FtSearchHighlightFieldsFields) Field(field ...string) FtSearchHighlightFieldsField { + c.cs.s = append(c.cs.s, field...) + return (FtSearchHighlightFieldsField)(c) +} + +type FtSearchHighlightHighlight Completed + +func (c FtSearchHighlightHighlight) Fields(count string) FtSearchHighlightFieldsFields { + c.cs.s = append(c.cs.s, "FIELDS", count) + return (FtSearchHighlightFieldsFields)(c) +} + +func (c FtSearchHighlightHighlight) Tags() FtSearchHighlightTagsTags { + c.cs.s = append(c.cs.s, "TAGS") + return (FtSearchHighlightTagsTags)(c) +} + +func (c FtSearchHighlightHighlight) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchHighlightHighlight) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchHighlightHighlight) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchHighlightHighlight) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchHighlightHighlight) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchHighlightHighlight) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchHighlightHighlight) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchHighlightHighlight) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchHighlightHighlight) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchHighlightHighlight) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchHighlightHighlight) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchHighlightHighlight) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchHighlightHighlight) Build() Completed { + return Completed(c) +} + +type FtSearchHighlightTagsOpenClose Completed + +func (c FtSearchHighlightTagsOpenClose) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchHighlightTagsOpenClose) Build() Completed { + return Completed(c) +} + +type FtSearchHighlightTagsTags Completed + +func (c FtSearchHighlightTagsTags) OpenClose(open string, close string) FtSearchHighlightTagsOpenClose { + c.cs.s = append(c.cs.s, open, close) + return (FtSearchHighlightTagsOpenClose)(c) +} + +type FtSearchInFieldsField Completed + +func (c FtSearchInFieldsField) Field(field ...string) FtSearchInFieldsField { + c.cs.s = append(c.cs.s, field...) + return c +} + +func (c FtSearchInFieldsField) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchInFieldsField) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchInFieldsField) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchInFieldsField) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchInFieldsField) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchInFieldsField) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchInFieldsField) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchInFieldsField) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchInFieldsField) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchInFieldsField) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchInFieldsField) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchInFieldsField) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchInFieldsField) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchInFieldsField) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchInFieldsField) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchInFieldsField) Build() Completed { + return Completed(c) +} + +type FtSearchInFieldsInfields Completed + +func (c FtSearchInFieldsInfields) Field(field ...string) FtSearchInFieldsField { + c.cs.s = append(c.cs.s, field...) + return (FtSearchInFieldsField)(c) +} + +type FtSearchInKeysInkeys Completed + +func (c FtSearchInKeysInkeys) Key(key ...string) FtSearchInKeysKey { + c.cs.s = append(c.cs.s, key...) + return (FtSearchInKeysKey)(c) +} + +type FtSearchInKeysKey Completed + +func (c FtSearchInKeysKey) Key(key ...string) FtSearchInKeysKey { + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c FtSearchInKeysKey) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchInKeysKey) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchInKeysKey) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchInKeysKey) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchInKeysKey) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchInKeysKey) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchInKeysKey) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchInKeysKey) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchInKeysKey) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchInKeysKey) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchInKeysKey) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchInKeysKey) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchInKeysKey) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchInKeysKey) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchInKeysKey) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchInKeysKey) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchInKeysKey) Build() Completed { + return Completed(c) +} + +type FtSearchIndex Completed + +func (c FtSearchIndex) Query(query string) FtSearchQuery { + c.cs.s = append(c.cs.s, query) + return (FtSearchQuery)(c) +} + +type FtSearchLanguage Completed + +func (c FtSearchLanguage) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchLanguage) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchLanguage) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchLanguage) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchLanguage) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchLanguage) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchLanguage) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchLanguage) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchLanguage) Build() Completed { + return Completed(c) +} + +type FtSearchLimitLimit Completed + +func (c FtSearchLimitLimit) OffsetNum(offset int64, num int64) FtSearchLimitOffsetNum { + c.cs.s = append(c.cs.s, strconv.FormatInt(offset, 10), strconv.FormatInt(num, 10)) + return (FtSearchLimitOffsetNum)(c) +} + +type FtSearchLimitOffsetNum Completed + +func (c FtSearchLimitOffsetNum) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchLimitOffsetNum) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchLimitOffsetNum) Build() Completed { + return Completed(c) +} + +type FtSearchNocontent Completed + +func (c FtSearchNocontent) Verbatim() FtSearchVerbatim { + c.cs.s = append(c.cs.s, "VERBATIM") + return (FtSearchVerbatim)(c) +} + +func (c FtSearchNocontent) Nostopwords() FtSearchNostopwords { + c.cs.s = append(c.cs.s, "NOSTOPWORDS") + return (FtSearchNostopwords)(c) +} + +func (c FtSearchNocontent) Withscores() FtSearchWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (FtSearchWithscores)(c) +} + +func (c FtSearchNocontent) Withpayloads() FtSearchWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSearchWithpayloads)(c) +} + +func (c FtSearchNocontent) Withsortkeys() FtSearchWithsortkeys { + c.cs.s = append(c.cs.s, "WITHSORTKEYS") + return (FtSearchWithsortkeys)(c) +} + +func (c FtSearchNocontent) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchNocontent) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchNocontent) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchNocontent) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchNocontent) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchNocontent) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchNocontent) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchNocontent) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchNocontent) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchNocontent) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchNocontent) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchNocontent) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchNocontent) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchNocontent) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchNocontent) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchNocontent) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchNocontent) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchNocontent) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchNocontent) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchNocontent) Build() Completed { + return Completed(c) +} + +type FtSearchNostopwords Completed + +func (c FtSearchNostopwords) Withscores() FtSearchWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (FtSearchWithscores)(c) +} + +func (c FtSearchNostopwords) Withpayloads() FtSearchWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSearchWithpayloads)(c) +} + +func (c FtSearchNostopwords) Withsortkeys() FtSearchWithsortkeys { + c.cs.s = append(c.cs.s, "WITHSORTKEYS") + return (FtSearchWithsortkeys)(c) +} + +func (c FtSearchNostopwords) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchNostopwords) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchNostopwords) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchNostopwords) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchNostopwords) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchNostopwords) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchNostopwords) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchNostopwords) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchNostopwords) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchNostopwords) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchNostopwords) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchNostopwords) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchNostopwords) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchNostopwords) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchNostopwords) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchNostopwords) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchNostopwords) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchNostopwords) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchNostopwords) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchNostopwords) Build() Completed { + return Completed(c) +} + +type FtSearchParamsNameValue Completed + +func (c FtSearchParamsNameValue) NameValue(name string, value string) FtSearchParamsNameValue { + c.cs.s = append(c.cs.s, name, value) + return c +} + +func (c FtSearchParamsNameValue) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchParamsNameValue) Build() Completed { + return Completed(c) +} + +type FtSearchParamsNargs Completed + +func (c FtSearchParamsNargs) NameValue() FtSearchParamsNameValue { + return (FtSearchParamsNameValue)(c) +} + +type FtSearchParamsParams Completed + +func (c FtSearchParamsParams) Nargs(nargs int64) FtSearchParamsNargs { + c.cs.s = append(c.cs.s, strconv.FormatInt(nargs, 10)) + return (FtSearchParamsNargs)(c) +} + +type FtSearchPayload Completed + +func (c FtSearchPayload) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchPayload) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchPayload) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchPayload) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchPayload) Build() Completed { + return Completed(c) +} + +type FtSearchQuery Completed + +func (c FtSearchQuery) Nocontent() FtSearchNocontent { + c.cs.s = append(c.cs.s, "NOCONTENT") + return (FtSearchNocontent)(c) +} + +func (c FtSearchQuery) Verbatim() FtSearchVerbatim { + c.cs.s = append(c.cs.s, "VERBATIM") + return (FtSearchVerbatim)(c) +} + +func (c FtSearchQuery) Nostopwords() FtSearchNostopwords { + c.cs.s = append(c.cs.s, "NOSTOPWORDS") + return (FtSearchNostopwords)(c) +} + +func (c FtSearchQuery) Withscores() FtSearchWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (FtSearchWithscores)(c) +} + +func (c FtSearchQuery) Withpayloads() FtSearchWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSearchWithpayloads)(c) +} + +func (c FtSearchQuery) Withsortkeys() FtSearchWithsortkeys { + c.cs.s = append(c.cs.s, "WITHSORTKEYS") + return (FtSearchWithsortkeys)(c) +} + +func (c FtSearchQuery) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchQuery) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchQuery) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchQuery) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchQuery) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchQuery) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchQuery) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchQuery) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchQuery) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchQuery) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchQuery) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchQuery) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchQuery) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchQuery) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchQuery) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchQuery) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchQuery) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchQuery) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchQuery) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchQuery) Build() Completed { + return Completed(c) +} + +type FtSearchReturnIdentifiersAs Completed + +func (c FtSearchReturnIdentifiersAs) Identifier(identifier string) FtSearchReturnIdentifiersIdentifier { + c.cs.s = append(c.cs.s, identifier) + return (FtSearchReturnIdentifiersIdentifier)(c) +} + +func (c FtSearchReturnIdentifiersAs) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchReturnIdentifiersAs) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchReturnIdentifiersAs) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchReturnIdentifiersAs) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchReturnIdentifiersAs) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchReturnIdentifiersAs) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchReturnIdentifiersAs) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchReturnIdentifiersAs) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchReturnIdentifiersAs) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchReturnIdentifiersAs) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchReturnIdentifiersAs) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchReturnIdentifiersAs) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchReturnIdentifiersAs) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchReturnIdentifiersAs) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchReturnIdentifiersAs) Build() Completed { + return Completed(c) +} + +type FtSearchReturnIdentifiersIdentifier Completed + +func (c FtSearchReturnIdentifiersIdentifier) As(property string) FtSearchReturnIdentifiersAs { + c.cs.s = append(c.cs.s, "AS", property) + return (FtSearchReturnIdentifiersAs)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Identifier(identifier string) FtSearchReturnIdentifiersIdentifier { + c.cs.s = append(c.cs.s, identifier) + return c +} + +func (c FtSearchReturnIdentifiersIdentifier) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchReturnIdentifiersIdentifier) Build() Completed { + return Completed(c) +} + +type FtSearchReturnReturn Completed + +func (c FtSearchReturnReturn) Identifier(identifier string) FtSearchReturnIdentifiersIdentifier { + c.cs.s = append(c.cs.s, identifier) + return (FtSearchReturnIdentifiersIdentifier)(c) +} + +type FtSearchScorer Completed + +func (c FtSearchScorer) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchScorer) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchScorer) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchScorer) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchScorer) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchScorer) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchScorer) Build() Completed { + return Completed(c) +} + +type FtSearchSlop Completed + +func (c FtSearchSlop) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchSlop) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchSlop) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchSlop) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchSlop) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchSlop) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchSlop) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchSlop) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchSlop) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSlop) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSlop) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSlop) Build() Completed { + return Completed(c) +} + +type FtSearchSortbyOrderAsc Completed + +func (c FtSearchSortbyOrderAsc) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSortbyOrderAsc) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSortbyOrderAsc) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSortbyOrderAsc) Build() Completed { + return Completed(c) +} + +type FtSearchSortbyOrderDesc Completed + +func (c FtSearchSortbyOrderDesc) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSortbyOrderDesc) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSortbyOrderDesc) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSortbyOrderDesc) Build() Completed { + return Completed(c) +} + +type FtSearchSortbySortby Completed + +func (c FtSearchSortbySortby) Asc() FtSearchSortbyOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (FtSearchSortbyOrderAsc)(c) +} + +func (c FtSearchSortbySortby) Desc() FtSearchSortbyOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (FtSearchSortbyOrderDesc)(c) +} + +func (c FtSearchSortbySortby) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSortbySortby) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSortbySortby) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSortbySortby) Build() Completed { + return Completed(c) +} + +type FtSearchSummarizeFieldsField Completed + +func (c FtSearchSummarizeFieldsField) Field(field ...string) FtSearchSummarizeFieldsField { + c.cs.s = append(c.cs.s, field...) + return c +} + +func (c FtSearchSummarizeFieldsField) Frags(num int64) FtSearchSummarizeFrags { + c.cs.s = append(c.cs.s, "FRAGS", strconv.FormatInt(num, 10)) + return (FtSearchSummarizeFrags)(c) +} + +func (c FtSearchSummarizeFieldsField) Len(fragsize int64) FtSearchSummarizeLen { + c.cs.s = append(c.cs.s, "LEN", strconv.FormatInt(fragsize, 10)) + return (FtSearchSummarizeLen)(c) +} + +func (c FtSearchSummarizeFieldsField) Separator(separator string) FtSearchSummarizeSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtSearchSummarizeSeparator)(c) +} + +func (c FtSearchSummarizeFieldsField) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchSummarizeFieldsField) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchSummarizeFieldsField) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchSummarizeFieldsField) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchSummarizeFieldsField) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchSummarizeFieldsField) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchSummarizeFieldsField) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchSummarizeFieldsField) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchSummarizeFieldsField) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchSummarizeFieldsField) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchSummarizeFieldsField) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSummarizeFieldsField) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSummarizeFieldsField) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSummarizeFieldsField) Build() Completed { + return Completed(c) +} + +type FtSearchSummarizeFieldsFields Completed + +func (c FtSearchSummarizeFieldsFields) Field(field ...string) FtSearchSummarizeFieldsField { + c.cs.s = append(c.cs.s, field...) + return (FtSearchSummarizeFieldsField)(c) +} + +type FtSearchSummarizeFrags Completed + +func (c FtSearchSummarizeFrags) Len(fragsize int64) FtSearchSummarizeLen { + c.cs.s = append(c.cs.s, "LEN", strconv.FormatInt(fragsize, 10)) + return (FtSearchSummarizeLen)(c) +} + +func (c FtSearchSummarizeFrags) Separator(separator string) FtSearchSummarizeSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtSearchSummarizeSeparator)(c) +} + +func (c FtSearchSummarizeFrags) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchSummarizeFrags) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchSummarizeFrags) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchSummarizeFrags) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchSummarizeFrags) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchSummarizeFrags) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchSummarizeFrags) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchSummarizeFrags) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchSummarizeFrags) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchSummarizeFrags) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchSummarizeFrags) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSummarizeFrags) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSummarizeFrags) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSummarizeFrags) Build() Completed { + return Completed(c) +} + +type FtSearchSummarizeLen Completed + +func (c FtSearchSummarizeLen) Separator(separator string) FtSearchSummarizeSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtSearchSummarizeSeparator)(c) +} + +func (c FtSearchSummarizeLen) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchSummarizeLen) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchSummarizeLen) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchSummarizeLen) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchSummarizeLen) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchSummarizeLen) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchSummarizeLen) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchSummarizeLen) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchSummarizeLen) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchSummarizeLen) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchSummarizeLen) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSummarizeLen) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSummarizeLen) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSummarizeLen) Build() Completed { + return Completed(c) +} + +type FtSearchSummarizeSeparator Completed + +func (c FtSearchSummarizeSeparator) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchSummarizeSeparator) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchSummarizeSeparator) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchSummarizeSeparator) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchSummarizeSeparator) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchSummarizeSeparator) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchSummarizeSeparator) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchSummarizeSeparator) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchSummarizeSeparator) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchSummarizeSeparator) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchSummarizeSeparator) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSummarizeSeparator) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSummarizeSeparator) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSummarizeSeparator) Build() Completed { + return Completed(c) +} + +type FtSearchSummarizeSummarize Completed + +func (c FtSearchSummarizeSummarize) Fields(count string) FtSearchSummarizeFieldsFields { + c.cs.s = append(c.cs.s, "FIELDS", count) + return (FtSearchSummarizeFieldsFields)(c) +} + +func (c FtSearchSummarizeSummarize) Frags(num int64) FtSearchSummarizeFrags { + c.cs.s = append(c.cs.s, "FRAGS", strconv.FormatInt(num, 10)) + return (FtSearchSummarizeFrags)(c) +} + +func (c FtSearchSummarizeSummarize) Len(fragsize int64) FtSearchSummarizeLen { + c.cs.s = append(c.cs.s, "LEN", strconv.FormatInt(fragsize, 10)) + return (FtSearchSummarizeLen)(c) +} + +func (c FtSearchSummarizeSummarize) Separator(separator string) FtSearchSummarizeSeparator { + c.cs.s = append(c.cs.s, "SEPARATOR", separator) + return (FtSearchSummarizeSeparator)(c) +} + +func (c FtSearchSummarizeSummarize) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchSummarizeSummarize) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchSummarizeSummarize) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchSummarizeSummarize) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchSummarizeSummarize) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchSummarizeSummarize) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchSummarizeSummarize) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchSummarizeSummarize) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchSummarizeSummarize) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchSummarizeSummarize) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchSummarizeSummarize) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchSummarizeSummarize) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchSummarizeSummarize) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchSummarizeSummarize) Build() Completed { + return Completed(c) +} + +type FtSearchTagsInorder Completed + +func (c FtSearchTagsInorder) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchTagsInorder) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchTagsInorder) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchTagsInorder) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchTagsInorder) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchTagsInorder) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchTagsInorder) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchTagsInorder) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchTagsInorder) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchTagsInorder) Build() Completed { + return Completed(c) +} + +type FtSearchTimeout Completed + +func (c FtSearchTimeout) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchTimeout) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchTimeout) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchTimeout) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchTimeout) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchTimeout) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchTimeout) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchTimeout) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchTimeout) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchTimeout) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchTimeout) Build() Completed { + return Completed(c) +} + +type FtSearchVerbatim Completed + +func (c FtSearchVerbatim) Nostopwords() FtSearchNostopwords { + c.cs.s = append(c.cs.s, "NOSTOPWORDS") + return (FtSearchNostopwords)(c) +} + +func (c FtSearchVerbatim) Withscores() FtSearchWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (FtSearchWithscores)(c) +} + +func (c FtSearchVerbatim) Withpayloads() FtSearchWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSearchWithpayloads)(c) +} + +func (c FtSearchVerbatim) Withsortkeys() FtSearchWithsortkeys { + c.cs.s = append(c.cs.s, "WITHSORTKEYS") + return (FtSearchWithsortkeys)(c) +} + +func (c FtSearchVerbatim) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchVerbatim) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchVerbatim) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchVerbatim) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchVerbatim) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchVerbatim) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchVerbatim) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchVerbatim) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchVerbatim) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchVerbatim) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchVerbatim) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchVerbatim) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchVerbatim) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchVerbatim) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchVerbatim) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchVerbatim) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchVerbatim) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchVerbatim) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchVerbatim) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchVerbatim) Build() Completed { + return Completed(c) +} + +type FtSearchWithpayloads Completed + +func (c FtSearchWithpayloads) Withsortkeys() FtSearchWithsortkeys { + c.cs.s = append(c.cs.s, "WITHSORTKEYS") + return (FtSearchWithsortkeys)(c) +} + +func (c FtSearchWithpayloads) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchWithpayloads) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchWithpayloads) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchWithpayloads) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchWithpayloads) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchWithpayloads) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchWithpayloads) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchWithpayloads) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchWithpayloads) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchWithpayloads) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchWithpayloads) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchWithpayloads) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchWithpayloads) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchWithpayloads) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchWithpayloads) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchWithpayloads) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchWithpayloads) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchWithpayloads) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchWithpayloads) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchWithpayloads) Build() Completed { + return Completed(c) +} + +type FtSearchWithscores Completed + +func (c FtSearchWithscores) Withpayloads() FtSearchWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSearchWithpayloads)(c) +} + +func (c FtSearchWithscores) Withsortkeys() FtSearchWithsortkeys { + c.cs.s = append(c.cs.s, "WITHSORTKEYS") + return (FtSearchWithsortkeys)(c) +} + +func (c FtSearchWithscores) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchWithscores) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchWithscores) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchWithscores) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchWithscores) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchWithscores) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchWithscores) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchWithscores) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchWithscores) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchWithscores) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchWithscores) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchWithscores) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchWithscores) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchWithscores) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchWithscores) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchWithscores) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchWithscores) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchWithscores) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchWithscores) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchWithscores) Build() Completed { + return Completed(c) +} + +type FtSearchWithsortkeys Completed + +func (c FtSearchWithsortkeys) Filter(numericField string) FtSearchFilterFilter { + c.cs.s = append(c.cs.s, "FILTER", numericField) + return (FtSearchFilterFilter)(c) +} + +func (c FtSearchWithsortkeys) Geofilter(geoField string) FtSearchGeoFilterGeofilter { + c.cs.s = append(c.cs.s, "GEOFILTER", geoField) + return (FtSearchGeoFilterGeofilter)(c) +} + +func (c FtSearchWithsortkeys) Inkeys(count string) FtSearchInKeysInkeys { + c.cs.s = append(c.cs.s, "INKEYS", count) + return (FtSearchInKeysInkeys)(c) +} + +func (c FtSearchWithsortkeys) Infields(count string) FtSearchInFieldsInfields { + c.cs.s = append(c.cs.s, "INFIELDS", count) + return (FtSearchInFieldsInfields)(c) +} + +func (c FtSearchWithsortkeys) Return(count string) FtSearchReturnReturn { + c.cs.s = append(c.cs.s, "RETURN", count) + return (FtSearchReturnReturn)(c) +} + +func (c FtSearchWithsortkeys) Summarize() FtSearchSummarizeSummarize { + c.cs.s = append(c.cs.s, "SUMMARIZE") + return (FtSearchSummarizeSummarize)(c) +} + +func (c FtSearchWithsortkeys) Highlight() FtSearchHighlightHighlight { + c.cs.s = append(c.cs.s, "HIGHLIGHT") + return (FtSearchHighlightHighlight)(c) +} + +func (c FtSearchWithsortkeys) Slop(slop int64) FtSearchSlop { + c.cs.s = append(c.cs.s, "SLOP", strconv.FormatInt(slop, 10)) + return (FtSearchSlop)(c) +} + +func (c FtSearchWithsortkeys) Timeout(timeout int64) FtSearchTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (FtSearchTimeout)(c) +} + +func (c FtSearchWithsortkeys) Inorder() FtSearchTagsInorder { + c.cs.s = append(c.cs.s, "INORDER") + return (FtSearchTagsInorder)(c) +} + +func (c FtSearchWithsortkeys) Language(language string) FtSearchLanguage { + c.cs.s = append(c.cs.s, "LANGUAGE", language) + return (FtSearchLanguage)(c) +} + +func (c FtSearchWithsortkeys) Expander(expander string) FtSearchExpander { + c.cs.s = append(c.cs.s, "EXPANDER", expander) + return (FtSearchExpander)(c) +} + +func (c FtSearchWithsortkeys) Scorer(scorer string) FtSearchScorer { + c.cs.s = append(c.cs.s, "SCORER", scorer) + return (FtSearchScorer)(c) +} + +func (c FtSearchWithsortkeys) Explainscore() FtSearchExplainscore { + c.cs.s = append(c.cs.s, "EXPLAINSCORE") + return (FtSearchExplainscore)(c) +} + +func (c FtSearchWithsortkeys) Payload(payload string) FtSearchPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSearchPayload)(c) +} + +func (c FtSearchWithsortkeys) Sortby(sortby string) FtSearchSortbySortby { + c.cs.s = append(c.cs.s, "SORTBY", sortby) + return (FtSearchSortbySortby)(c) +} + +func (c FtSearchWithsortkeys) Limit() FtSearchLimitLimit { + c.cs.s = append(c.cs.s, "LIMIT") + return (FtSearchLimitLimit)(c) +} + +func (c FtSearchWithsortkeys) Params() FtSearchParamsParams { + c.cs.s = append(c.cs.s, "PARAMS") + return (FtSearchParamsParams)(c) +} + +func (c FtSearchWithsortkeys) Dialect(dialect int64) FtSearchDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSearchDialect)(c) +} + +func (c FtSearchWithsortkeys) Build() Completed { + return Completed(c) +} + +type FtSpellcheck Completed + +func (b Builder) FtSpellcheck() (c FtSpellcheck) { + c = FtSpellcheck{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SPELLCHECK") + return c +} + +func (c FtSpellcheck) Index(index string) FtSpellcheckIndex { + c.cs.s = append(c.cs.s, index) + return (FtSpellcheckIndex)(c) +} + +type FtSpellcheckDialect Completed + +func (c FtSpellcheckDialect) Build() Completed { + return Completed(c) +} + +type FtSpellcheckDistance Completed + +func (c FtSpellcheckDistance) TermsInclude() FtSpellcheckTermsTermsInclude { + c.cs.s = append(c.cs.s, "TERMS", "INCLUDE") + return (FtSpellcheckTermsTermsInclude)(c) +} + +func (c FtSpellcheckDistance) TermsExclude() FtSpellcheckTermsTermsExclude { + c.cs.s = append(c.cs.s, "TERMS", "EXCLUDE") + return (FtSpellcheckTermsTermsExclude)(c) +} + +func (c FtSpellcheckDistance) Dialect(dialect int64) FtSpellcheckDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSpellcheckDialect)(c) +} + +func (c FtSpellcheckDistance) Build() Completed { + return Completed(c) +} + +type FtSpellcheckIndex Completed + +func (c FtSpellcheckIndex) Query(query string) FtSpellcheckQuery { + c.cs.s = append(c.cs.s, query) + return (FtSpellcheckQuery)(c) +} + +type FtSpellcheckQuery Completed + +func (c FtSpellcheckQuery) Distance(distance int64) FtSpellcheckDistance { + c.cs.s = append(c.cs.s, "DISTANCE", strconv.FormatInt(distance, 10)) + return (FtSpellcheckDistance)(c) +} + +func (c FtSpellcheckQuery) TermsInclude() FtSpellcheckTermsTermsInclude { + c.cs.s = append(c.cs.s, "TERMS", "INCLUDE") + return (FtSpellcheckTermsTermsInclude)(c) +} + +func (c FtSpellcheckQuery) TermsExclude() FtSpellcheckTermsTermsExclude { + c.cs.s = append(c.cs.s, "TERMS", "EXCLUDE") + return (FtSpellcheckTermsTermsExclude)(c) +} + +func (c FtSpellcheckQuery) Dialect(dialect int64) FtSpellcheckDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSpellcheckDialect)(c) +} + +func (c FtSpellcheckQuery) Build() Completed { + return Completed(c) +} + +type FtSpellcheckTermsDictionary Completed + +func (c FtSpellcheckTermsDictionary) Terms(terms ...string) FtSpellcheckTermsTerms { + c.cs.s = append(c.cs.s, terms...) + return (FtSpellcheckTermsTerms)(c) +} + +func (c FtSpellcheckTermsDictionary) Dialect(dialect int64) FtSpellcheckDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSpellcheckDialect)(c) +} + +func (c FtSpellcheckTermsDictionary) Build() Completed { + return Completed(c) +} + +type FtSpellcheckTermsTerms Completed + +func (c FtSpellcheckTermsTerms) Terms(terms ...string) FtSpellcheckTermsTerms { + c.cs.s = append(c.cs.s, terms...) + return c +} + +func (c FtSpellcheckTermsTerms) Dialect(dialect int64) FtSpellcheckDialect { + c.cs.s = append(c.cs.s, "DIALECT", strconv.FormatInt(dialect, 10)) + return (FtSpellcheckDialect)(c) +} + +func (c FtSpellcheckTermsTerms) Build() Completed { + return Completed(c) +} + +type FtSpellcheckTermsTermsExclude Completed + +func (c FtSpellcheckTermsTermsExclude) Dictionary(dictionary string) FtSpellcheckTermsDictionary { + c.cs.s = append(c.cs.s, dictionary) + return (FtSpellcheckTermsDictionary)(c) +} + +type FtSpellcheckTermsTermsInclude Completed + +func (c FtSpellcheckTermsTermsInclude) Dictionary(dictionary string) FtSpellcheckTermsDictionary { + c.cs.s = append(c.cs.s, dictionary) + return (FtSpellcheckTermsDictionary)(c) +} + +type FtSugadd Completed + +func (b Builder) FtSugadd() (c FtSugadd) { + c = FtSugadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SUGADD") + return c +} + +func (c FtSugadd) Key(key string) FtSugaddKey { + c.cs.s = append(c.cs.s, key) + return (FtSugaddKey)(c) +} + +type FtSugaddIncrementScoreIncr Completed + +func (c FtSugaddIncrementScoreIncr) Payload(payload string) FtSugaddPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSugaddPayload)(c) +} + +func (c FtSugaddIncrementScoreIncr) Build() Completed { + return Completed(c) +} + +type FtSugaddKey Completed + +func (c FtSugaddKey) String(string string) FtSugaddString { + c.cs.s = append(c.cs.s, string) + return (FtSugaddString)(c) +} + +type FtSugaddPayload Completed + +func (c FtSugaddPayload) Build() Completed { + return Completed(c) +} + +type FtSugaddScore Completed + +func (c FtSugaddScore) Incr() FtSugaddIncrementScoreIncr { + c.cs.s = append(c.cs.s, "INCR") + return (FtSugaddIncrementScoreIncr)(c) +} + +func (c FtSugaddScore) Payload(payload string) FtSugaddPayload { + c.cs.s = append(c.cs.s, "PAYLOAD", payload) + return (FtSugaddPayload)(c) +} + +func (c FtSugaddScore) Build() Completed { + return Completed(c) +} + +type FtSugaddString Completed + +func (c FtSugaddString) Score(score float64) FtSugaddScore { + c.cs.s = append(c.cs.s, strconv.FormatFloat(score, 'f', -1, 64)) + return (FtSugaddScore)(c) +} + +type FtSugdel Completed + +func (b Builder) FtSugdel() (c FtSugdel) { + c = FtSugdel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SUGDEL") + return c +} + +func (c FtSugdel) Key(key string) FtSugdelKey { + c.cs.s = append(c.cs.s, key) + return (FtSugdelKey)(c) +} + +type FtSugdelKey Completed + +func (c FtSugdelKey) String(string string) FtSugdelString { + c.cs.s = append(c.cs.s, string) + return (FtSugdelString)(c) +} + +type FtSugdelString Completed + +func (c FtSugdelString) Build() Completed { + return Completed(c) +} + +type FtSugget Completed + +func (b Builder) FtSugget() (c FtSugget) { + c = FtSugget{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SUGGET") + return c +} + +func (c FtSugget) Key(key string) FtSuggetKey { + c.cs.s = append(c.cs.s, key) + return (FtSuggetKey)(c) +} + +type FtSuggetFuzzy Completed + +func (c FtSuggetFuzzy) Withscores() FtSuggetWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (FtSuggetWithscores)(c) +} + +func (c FtSuggetFuzzy) Withpayloads() FtSuggetWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSuggetWithpayloads)(c) +} + +func (c FtSuggetFuzzy) Max(max int64) FtSuggetMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(max, 10)) + return (FtSuggetMax)(c) +} + +func (c FtSuggetFuzzy) Build() Completed { + return Completed(c) +} + +type FtSuggetKey Completed + +func (c FtSuggetKey) Prefix(prefix string) FtSuggetPrefix { + c.cs.s = append(c.cs.s, prefix) + return (FtSuggetPrefix)(c) +} + +type FtSuggetMax Completed + +func (c FtSuggetMax) Build() Completed { + return Completed(c) +} + +type FtSuggetPrefix Completed + +func (c FtSuggetPrefix) Fuzzy() FtSuggetFuzzy { + c.cs.s = append(c.cs.s, "FUZZY") + return (FtSuggetFuzzy)(c) +} + +func (c FtSuggetPrefix) Withscores() FtSuggetWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (FtSuggetWithscores)(c) +} + +func (c FtSuggetPrefix) Withpayloads() FtSuggetWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSuggetWithpayloads)(c) +} + +func (c FtSuggetPrefix) Max(max int64) FtSuggetMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(max, 10)) + return (FtSuggetMax)(c) +} + +func (c FtSuggetPrefix) Build() Completed { + return Completed(c) +} + +type FtSuggetWithpayloads Completed + +func (c FtSuggetWithpayloads) Max(max int64) FtSuggetMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(max, 10)) + return (FtSuggetMax)(c) +} + +func (c FtSuggetWithpayloads) Build() Completed { + return Completed(c) +} + +type FtSuggetWithscores Completed + +func (c FtSuggetWithscores) Withpayloads() FtSuggetWithpayloads { + c.cs.s = append(c.cs.s, "WITHPAYLOADS") + return (FtSuggetWithpayloads)(c) +} + +func (c FtSuggetWithscores) Max(max int64) FtSuggetMax { + c.cs.s = append(c.cs.s, "MAX", strconv.FormatInt(max, 10)) + return (FtSuggetMax)(c) +} + +func (c FtSuggetWithscores) Build() Completed { + return Completed(c) +} + +type FtSuglen Completed + +func (b Builder) FtSuglen() (c FtSuglen) { + c = FtSuglen{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SUGLEN") + return c +} + +func (c FtSuglen) Key(key string) FtSuglenKey { + c.cs.s = append(c.cs.s, key) + return (FtSuglenKey)(c) +} + +type FtSuglenKey Completed + +func (c FtSuglenKey) Build() Completed { + return Completed(c) +} + +type FtSyndump Completed + +func (b Builder) FtSyndump() (c FtSyndump) { + c = FtSyndump{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SYNDUMP") + return c +} + +func (c FtSyndump) Index(index string) FtSyndumpIndex { + c.cs.s = append(c.cs.s, index) + return (FtSyndumpIndex)(c) +} + +type FtSyndumpIndex Completed + +func (c FtSyndumpIndex) Build() Completed { + return Completed(c) +} + +type FtSynupdate Completed + +func (b Builder) FtSynupdate() (c FtSynupdate) { + c = FtSynupdate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.SYNUPDATE") + return c +} + +func (c FtSynupdate) Index(index string) FtSynupdateIndex { + c.cs.s = append(c.cs.s, index) + return (FtSynupdateIndex)(c) +} + +type FtSynupdateIndex Completed + +func (c FtSynupdateIndex) SynonymGroupId(synonymGroupId string) FtSynupdateSynonymGroupId { + c.cs.s = append(c.cs.s, synonymGroupId) + return (FtSynupdateSynonymGroupId)(c) +} + +type FtSynupdateSkipinitialscan Completed + +func (c FtSynupdateSkipinitialscan) Term(term ...string) FtSynupdateTerm { + c.cs.s = append(c.cs.s, term...) + return (FtSynupdateTerm)(c) +} + +type FtSynupdateSynonymGroupId Completed + +func (c FtSynupdateSynonymGroupId) Skipinitialscan() FtSynupdateSkipinitialscan { + c.cs.s = append(c.cs.s, "SKIPINITIALSCAN") + return (FtSynupdateSkipinitialscan)(c) +} + +func (c FtSynupdateSynonymGroupId) Term(term ...string) FtSynupdateTerm { + c.cs.s = append(c.cs.s, term...) + return (FtSynupdateTerm)(c) +} + +type FtSynupdateTerm Completed + +func (c FtSynupdateTerm) Term(term ...string) FtSynupdateTerm { + c.cs.s = append(c.cs.s, term...) + return c +} + +func (c FtSynupdateTerm) Build() Completed { + return Completed(c) +} + +type FtTagvals Completed + +func (b Builder) FtTagvals() (c FtTagvals) { + c = FtTagvals{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FT.TAGVALS") + return c +} + +func (c FtTagvals) Index(index string) FtTagvalsIndex { + c.cs.s = append(c.cs.s, index) + return (FtTagvalsIndex)(c) +} + +type FtTagvalsFieldName Completed + +func (c FtTagvalsFieldName) Build() Completed { + return Completed(c) +} + +type FtTagvalsIndex Completed + +func (c FtTagvalsIndex) FieldName(fieldName string) FtTagvalsFieldName { + c.cs.s = append(c.cs.s, fieldName) + return (FtTagvalsFieldName)(c) +} + +type FunctionDelete Completed + +func (b Builder) FunctionDelete() (c FunctionDelete) { + c = FunctionDelete{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "DELETE") + return c +} + +func (c FunctionDelete) LibraryName(libraryName string) FunctionDeleteLibraryName { + c.cs.s = append(c.cs.s, libraryName) + return (FunctionDeleteLibraryName)(c) +} + +type FunctionDeleteLibraryName Completed + +func (c FunctionDeleteLibraryName) Build() Completed { + return Completed(c) +} + +type FunctionDump Completed + +func (b Builder) FunctionDump() (c FunctionDump) { + c = FunctionDump{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "DUMP") + return c +} + +func (c FunctionDump) Build() Completed { + return Completed(c) +} + +type FunctionFlush Completed + +func (b Builder) FunctionFlush() (c FunctionFlush) { + c = FunctionFlush{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "FLUSH") + return c +} + +func (c FunctionFlush) Async() FunctionFlushAsync { + c.cs.s = append(c.cs.s, "ASYNC") + return (FunctionFlushAsync)(c) +} + +func (c FunctionFlush) Sync() FunctionFlushAsyncSync { + c.cs.s = append(c.cs.s, "SYNC") + return (FunctionFlushAsyncSync)(c) +} + +func (c FunctionFlush) Build() Completed { + return Completed(c) +} + +type FunctionFlushAsync Completed + +func (c FunctionFlushAsync) Build() Completed { + return Completed(c) +} + +type FunctionFlushAsyncSync Completed + +func (c FunctionFlushAsyncSync) Build() Completed { + return Completed(c) +} + +type FunctionHelp Completed + +func (b Builder) FunctionHelp() (c FunctionHelp) { + c = FunctionHelp{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "HELP") + return c +} + +func (c FunctionHelp) Build() Completed { + return Completed(c) +} + +type FunctionKill Completed + +func (b Builder) FunctionKill() (c FunctionKill) { + c = FunctionKill{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "KILL") + return c +} + +func (c FunctionKill) Build() Completed { + return Completed(c) +} + +type FunctionList Completed + +func (b Builder) FunctionList() (c FunctionList) { + c = FunctionList{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "LIST") + return c +} + +func (c FunctionList) Libraryname(libraryNamePattern string) FunctionListLibraryname { + c.cs.s = append(c.cs.s, "LIBRARYNAME", libraryNamePattern) + return (FunctionListLibraryname)(c) +} + +func (c FunctionList) Withcode() FunctionListWithcode { + c.cs.s = append(c.cs.s, "WITHCODE") + return (FunctionListWithcode)(c) +} + +func (c FunctionList) Build() Completed { + return Completed(c) +} + +type FunctionListLibraryname Completed + +func (c FunctionListLibraryname) Withcode() FunctionListWithcode { + c.cs.s = append(c.cs.s, "WITHCODE") + return (FunctionListWithcode)(c) +} + +func (c FunctionListLibraryname) Build() Completed { + return Completed(c) +} + +type FunctionListWithcode Completed + +func (c FunctionListWithcode) Build() Completed { + return Completed(c) +} + +type FunctionLoad Completed + +func (b Builder) FunctionLoad() (c FunctionLoad) { + c = FunctionLoad{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "LOAD") + return c +} + +func (c FunctionLoad) Replace() FunctionLoadReplace { + c.cs.s = append(c.cs.s, "REPLACE") + return (FunctionLoadReplace)(c) +} + +func (c FunctionLoad) FunctionCode(functionCode string) FunctionLoadFunctionCode { + c.cs.s = append(c.cs.s, functionCode) + return (FunctionLoadFunctionCode)(c) +} + +type FunctionLoadFunctionCode Completed + +func (c FunctionLoadFunctionCode) Build() Completed { + return Completed(c) +} + +type FunctionLoadReplace Completed + +func (c FunctionLoadReplace) FunctionCode(functionCode string) FunctionLoadFunctionCode { + c.cs.s = append(c.cs.s, functionCode) + return (FunctionLoadFunctionCode)(c) +} + +type FunctionRestore Completed + +func (b Builder) FunctionRestore() (c FunctionRestore) { + c = FunctionRestore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "RESTORE") + return c +} + +func (c FunctionRestore) SerializedValue(serializedValue string) FunctionRestoreSerializedValue { + c.cs.s = append(c.cs.s, serializedValue) + return (FunctionRestoreSerializedValue)(c) +} + +type FunctionRestorePolicyAppend Completed + +func (c FunctionRestorePolicyAppend) Build() Completed { + return Completed(c) +} + +type FunctionRestorePolicyFlush Completed + +func (c FunctionRestorePolicyFlush) Build() Completed { + return Completed(c) +} + +type FunctionRestorePolicyReplace Completed + +func (c FunctionRestorePolicyReplace) Build() Completed { + return Completed(c) +} + +type FunctionRestoreSerializedValue Completed + +func (c FunctionRestoreSerializedValue) Flush() FunctionRestorePolicyFlush { + c.cs.s = append(c.cs.s, "FLUSH") + return (FunctionRestorePolicyFlush)(c) +} + +func (c FunctionRestoreSerializedValue) Append() FunctionRestorePolicyAppend { + c.cs.s = append(c.cs.s, "APPEND") + return (FunctionRestorePolicyAppend)(c) +} + +func (c FunctionRestoreSerializedValue) Replace() FunctionRestorePolicyReplace { + c.cs.s = append(c.cs.s, "REPLACE") + return (FunctionRestorePolicyReplace)(c) +} + +func (c FunctionRestoreSerializedValue) Build() Completed { + return Completed(c) +} + +type FunctionStats Completed + +func (b Builder) FunctionStats() (c FunctionStats) { + c = FunctionStats{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "FUNCTION", "STATS") + return c +} + +func (c FunctionStats) Build() Completed { + return Completed(c) +} + +type Geoadd Completed + +func (b Builder) Geoadd() (c Geoadd) { + c = Geoadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GEOADD") + return c +} + +func (c Geoadd) Key(key string) GeoaddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeoaddKey)(c) +} + +type GeoaddChangeCh Completed + +func (c GeoaddChangeCh) LongitudeLatitudeMember() GeoaddLongitudeLatitudeMember { + return (GeoaddLongitudeLatitudeMember)(c) +} + +type GeoaddConditionNx Completed + +func (c GeoaddConditionNx) Ch() GeoaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (GeoaddChangeCh)(c) +} + +func (c GeoaddConditionNx) LongitudeLatitudeMember() GeoaddLongitudeLatitudeMember { + return (GeoaddLongitudeLatitudeMember)(c) +} + +type GeoaddConditionXx Completed + +func (c GeoaddConditionXx) Ch() GeoaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (GeoaddChangeCh)(c) +} + +func (c GeoaddConditionXx) LongitudeLatitudeMember() GeoaddLongitudeLatitudeMember { + return (GeoaddLongitudeLatitudeMember)(c) +} + +type GeoaddKey Completed + +func (c GeoaddKey) Nx() GeoaddConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (GeoaddConditionNx)(c) +} + +func (c GeoaddKey) Xx() GeoaddConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (GeoaddConditionXx)(c) +} + +func (c GeoaddKey) Ch() GeoaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (GeoaddChangeCh)(c) +} + +func (c GeoaddKey) LongitudeLatitudeMember() GeoaddLongitudeLatitudeMember { + return (GeoaddLongitudeLatitudeMember)(c) +} + +type GeoaddLongitudeLatitudeMember Completed + +func (c GeoaddLongitudeLatitudeMember) LongitudeLatitudeMember(longitude float64, latitude float64, member string) GeoaddLongitudeLatitudeMember { + c.cs.s = append(c.cs.s, strconv.FormatFloat(longitude, 'f', -1, 64), strconv.FormatFloat(latitude, 'f', -1, 64), member) + return c +} + +func (c GeoaddLongitudeLatitudeMember) Build() Completed { + return Completed(c) +} + +type Geodist Completed + +func (b Builder) Geodist() (c Geodist) { + c = Geodist{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GEODIST") + return c +} + +func (c Geodist) Key(key string) GeodistKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeodistKey)(c) +} + +type GeodistKey Completed + +func (c GeodistKey) Member1(member1 string) GeodistMember1 { + c.cs.s = append(c.cs.s, member1) + return (GeodistMember1)(c) +} + +type GeodistMember1 Completed + +func (c GeodistMember1) Member2(member2 string) GeodistMember2 { + c.cs.s = append(c.cs.s, member2) + return (GeodistMember2)(c) +} + +type GeodistMember2 Completed + +func (c GeodistMember2) M() GeodistUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeodistUnitM)(c) +} + +func (c GeodistMember2) Km() GeodistUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeodistUnitKm)(c) +} + +func (c GeodistMember2) Ft() GeodistUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeodistUnitFt)(c) +} + +func (c GeodistMember2) Mi() GeodistUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeodistUnitMi)(c) +} + +func (c GeodistMember2) Build() Completed { + return Completed(c) +} + +func (c GeodistMember2) Cache() Cacheable { + return Cacheable(c) +} + +type GeodistUnitFt Completed + +func (c GeodistUnitFt) Build() Completed { + return Completed(c) +} + +func (c GeodistUnitFt) Cache() Cacheable { + return Cacheable(c) +} + +type GeodistUnitKm Completed + +func (c GeodistUnitKm) Build() Completed { + return Completed(c) +} + +func (c GeodistUnitKm) Cache() Cacheable { + return Cacheable(c) +} + +type GeodistUnitM Completed + +func (c GeodistUnitM) Build() Completed { + return Completed(c) +} + +func (c GeodistUnitM) Cache() Cacheable { + return Cacheable(c) +} + +type GeodistUnitMi Completed + +func (c GeodistUnitMi) Build() Completed { + return Completed(c) +} + +func (c GeodistUnitMi) Cache() Cacheable { + return Cacheable(c) +} + +type Geohash Completed + +func (b Builder) Geohash() (c Geohash) { + c = Geohash{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GEOHASH") + return c +} + +func (c Geohash) Key(key string) GeohashKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeohashKey)(c) +} + +type GeohashKey Completed + +func (c GeohashKey) Member(member ...string) GeohashMember { + c.cs.s = append(c.cs.s, member...) + return (GeohashMember)(c) +} + +type GeohashMember Completed + +func (c GeohashMember) Member(member ...string) GeohashMember { + c.cs.s = append(c.cs.s, member...) + return c +} + +func (c GeohashMember) Build() Completed { + return Completed(c) +} + +func (c GeohashMember) Cache() Cacheable { + return Cacheable(c) +} + +type Geopos Completed + +func (b Builder) Geopos() (c Geopos) { + c = Geopos{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GEOPOS") + return c +} + +func (c Geopos) Key(key string) GeoposKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeoposKey)(c) +} + +type GeoposKey Completed + +func (c GeoposKey) Member(member ...string) GeoposMember { + c.cs.s = append(c.cs.s, member...) + return (GeoposMember)(c) +} + +type GeoposMember Completed + +func (c GeoposMember) Member(member ...string) GeoposMember { + c.cs.s = append(c.cs.s, member...) + return c +} + +func (c GeoposMember) Build() Completed { + return Completed(c) +} + +func (c GeoposMember) Cache() Cacheable { + return Cacheable(c) +} + +type Georadius Completed + +func (b Builder) Georadius() (c Georadius) { + c = Georadius{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GEORADIUS") + return c +} + +func (c Georadius) Key(key string) GeoradiusKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeoradiusKey)(c) +} + +type GeoradiusCountAny Completed + +func (c GeoradiusCountAny) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusCountAny) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusCountAny) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusCountAny) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusCountAny) Build() Completed { + return Completed(c) +} + +type GeoradiusCountCount Completed + +func (c GeoradiusCountCount) Any() GeoradiusCountAny { + c.cs.s = append(c.cs.s, "ANY") + return (GeoradiusCountAny)(c) +} + +func (c GeoradiusCountCount) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusCountCount) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusCountCount) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusCountCount) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusCountCount) Build() Completed { + return Completed(c) +} + +type GeoradiusKey Completed + +func (c GeoradiusKey) Longitude(longitude float64) GeoradiusLongitude { + c.cs.s = append(c.cs.s, strconv.FormatFloat(longitude, 'f', -1, 64)) + return (GeoradiusLongitude)(c) +} + +type GeoradiusLatitude Completed + +func (c GeoradiusLatitude) Radius(radius float64) GeoradiusRadius { + c.cs.s = append(c.cs.s, strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeoradiusRadius)(c) +} + +type GeoradiusLongitude Completed + +func (c GeoradiusLongitude) Latitude(latitude float64) GeoradiusLatitude { + c.cs.s = append(c.cs.s, strconv.FormatFloat(latitude, 'f', -1, 64)) + return (GeoradiusLatitude)(c) +} + +type GeoradiusOrderAsc Completed + +func (c GeoradiusOrderAsc) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusOrderAsc) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusOrderAsc) Build() Completed { + return Completed(c) +} + +type GeoradiusOrderDesc Completed + +func (c GeoradiusOrderDesc) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusOrderDesc) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusOrderDesc) Build() Completed { + return Completed(c) +} + +type GeoradiusRadius Completed + +func (c GeoradiusRadius) M() GeoradiusUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeoradiusUnitM)(c) +} + +func (c GeoradiusRadius) Km() GeoradiusUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeoradiusUnitKm)(c) +} + +func (c GeoradiusRadius) Ft() GeoradiusUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeoradiusUnitFt)(c) +} + +func (c GeoradiusRadius) Mi() GeoradiusUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeoradiusUnitMi)(c) +} + +type GeoradiusRo Completed + +func (b Builder) GeoradiusRo() (c GeoradiusRo) { + c = GeoradiusRo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GEORADIUS_RO") + return c +} + +func (c GeoradiusRo) Key(key string) GeoradiusRoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeoradiusRoKey)(c) +} + +type GeoradiusRoCountAny Completed + +func (c GeoradiusRoCountAny) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoCountAny) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoCountAny) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoCountAny) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoCountCount Completed + +func (c GeoradiusRoCountCount) Any() GeoradiusRoCountAny { + c.cs.s = append(c.cs.s, "ANY") + return (GeoradiusRoCountAny)(c) +} + +func (c GeoradiusRoCountCount) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoCountCount) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoCountCount) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoCountCount) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoKey Completed + +func (c GeoradiusRoKey) Longitude(longitude float64) GeoradiusRoLongitude { + c.cs.s = append(c.cs.s, strconv.FormatFloat(longitude, 'f', -1, 64)) + return (GeoradiusRoLongitude)(c) +} + +type GeoradiusRoLatitude Completed + +func (c GeoradiusRoLatitude) Radius(radius float64) GeoradiusRoRadius { + c.cs.s = append(c.cs.s, strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeoradiusRoRadius)(c) +} + +type GeoradiusRoLongitude Completed + +func (c GeoradiusRoLongitude) Latitude(latitude float64) GeoradiusRoLatitude { + c.cs.s = append(c.cs.s, strconv.FormatFloat(latitude, 'f', -1, 64)) + return (GeoradiusRoLatitude)(c) +} + +type GeoradiusRoOrderAsc Completed + +func (c GeoradiusRoOrderAsc) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoOrderAsc) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoOrderDesc Completed + +func (c GeoradiusRoOrderDesc) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoOrderDesc) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoRadius Completed + +func (c GeoradiusRoRadius) M() GeoradiusRoUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeoradiusRoUnitM)(c) +} + +func (c GeoradiusRoRadius) Km() GeoradiusRoUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeoradiusRoUnitKm)(c) +} + +func (c GeoradiusRoRadius) Ft() GeoradiusRoUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeoradiusRoUnitFt)(c) +} + +func (c GeoradiusRoRadius) Mi() GeoradiusRoUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeoradiusRoUnitMi)(c) +} + +type GeoradiusRoUnitFt Completed + +func (c GeoradiusRoUnitFt) Withcoord() GeoradiusRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusRoWithcoord)(c) +} + +func (c GeoradiusRoUnitFt) Withdist() GeoradiusRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusRoWithdist)(c) +} + +func (c GeoradiusRoUnitFt) Withhash() GeoradiusRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusRoWithhash)(c) +} + +func (c GeoradiusRoUnitFt) Count(count int64) GeoradiusRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusRoCountCount)(c) +} + +func (c GeoradiusRoUnitFt) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoUnitFt) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoUnitFt) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoUnitFt) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoUnitKm Completed + +func (c GeoradiusRoUnitKm) Withcoord() GeoradiusRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusRoWithcoord)(c) +} + +func (c GeoradiusRoUnitKm) Withdist() GeoradiusRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusRoWithdist)(c) +} + +func (c GeoradiusRoUnitKm) Withhash() GeoradiusRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusRoWithhash)(c) +} + +func (c GeoradiusRoUnitKm) Count(count int64) GeoradiusRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusRoCountCount)(c) +} + +func (c GeoradiusRoUnitKm) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoUnitKm) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoUnitKm) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoUnitKm) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoUnitM Completed + +func (c GeoradiusRoUnitM) Withcoord() GeoradiusRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusRoWithcoord)(c) +} + +func (c GeoradiusRoUnitM) Withdist() GeoradiusRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusRoWithdist)(c) +} + +func (c GeoradiusRoUnitM) Withhash() GeoradiusRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusRoWithhash)(c) +} + +func (c GeoradiusRoUnitM) Count(count int64) GeoradiusRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusRoCountCount)(c) +} + +func (c GeoradiusRoUnitM) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoUnitM) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoUnitM) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoUnitM) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoUnitMi Completed + +func (c GeoradiusRoUnitMi) Withcoord() GeoradiusRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusRoWithcoord)(c) +} + +func (c GeoradiusRoUnitMi) Withdist() GeoradiusRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusRoWithdist)(c) +} + +func (c GeoradiusRoUnitMi) Withhash() GeoradiusRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusRoWithhash)(c) +} + +func (c GeoradiusRoUnitMi) Count(count int64) GeoradiusRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusRoCountCount)(c) +} + +func (c GeoradiusRoUnitMi) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoUnitMi) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoUnitMi) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoUnitMi) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoWithcoord Completed + +func (c GeoradiusRoWithcoord) Withdist() GeoradiusRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusRoWithdist)(c) +} + +func (c GeoradiusRoWithcoord) Withhash() GeoradiusRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusRoWithhash)(c) +} + +func (c GeoradiusRoWithcoord) Count(count int64) GeoradiusRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusRoCountCount)(c) +} + +func (c GeoradiusRoWithcoord) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoWithcoord) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoWithcoord) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoWithcoord) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoWithdist Completed + +func (c GeoradiusRoWithdist) Withhash() GeoradiusRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusRoWithhash)(c) +} + +func (c GeoradiusRoWithdist) Count(count int64) GeoradiusRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusRoCountCount)(c) +} + +func (c GeoradiusRoWithdist) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoWithdist) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoWithdist) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoWithdist) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusRoWithhash Completed + +func (c GeoradiusRoWithhash) Count(count int64) GeoradiusRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusRoCountCount)(c) +} + +func (c GeoradiusRoWithhash) Asc() GeoradiusRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusRoOrderAsc)(c) +} + +func (c GeoradiusRoWithhash) Desc() GeoradiusRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusRoOrderDesc)(c) +} + +func (c GeoradiusRoWithhash) Build() Completed { + return Completed(c) +} + +func (c GeoradiusRoWithhash) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusStore Completed + +func (c GeoradiusStore) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusStore) Build() Completed { + return Completed(c) +} + +type GeoradiusStoredist Completed + +func (c GeoradiusStoredist) Build() Completed { + return Completed(c) +} + +type GeoradiusUnitFt Completed + +func (c GeoradiusUnitFt) Withcoord() GeoradiusWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusWithcoord)(c) +} + +func (c GeoradiusUnitFt) Withdist() GeoradiusWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusWithdist)(c) +} + +func (c GeoradiusUnitFt) Withhash() GeoradiusWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusWithhash)(c) +} + +func (c GeoradiusUnitFt) Count(count int64) GeoradiusCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusCountCount)(c) +} + +func (c GeoradiusUnitFt) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusUnitFt) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusUnitFt) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusUnitFt) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusUnitFt) Build() Completed { + return Completed(c) +} + +type GeoradiusUnitKm Completed + +func (c GeoradiusUnitKm) Withcoord() GeoradiusWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusWithcoord)(c) +} + +func (c GeoradiusUnitKm) Withdist() GeoradiusWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusWithdist)(c) +} + +func (c GeoradiusUnitKm) Withhash() GeoradiusWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusWithhash)(c) +} + +func (c GeoradiusUnitKm) Count(count int64) GeoradiusCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusCountCount)(c) +} + +func (c GeoradiusUnitKm) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusUnitKm) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusUnitKm) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusUnitKm) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusUnitKm) Build() Completed { + return Completed(c) +} + +type GeoradiusUnitM Completed + +func (c GeoradiusUnitM) Withcoord() GeoradiusWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusWithcoord)(c) +} + +func (c GeoradiusUnitM) Withdist() GeoradiusWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusWithdist)(c) +} + +func (c GeoradiusUnitM) Withhash() GeoradiusWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusWithhash)(c) +} + +func (c GeoradiusUnitM) Count(count int64) GeoradiusCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusCountCount)(c) +} + +func (c GeoradiusUnitM) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusUnitM) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusUnitM) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusUnitM) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusUnitM) Build() Completed { + return Completed(c) +} + +type GeoradiusUnitMi Completed + +func (c GeoradiusUnitMi) Withcoord() GeoradiusWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusWithcoord)(c) +} + +func (c GeoradiusUnitMi) Withdist() GeoradiusWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusWithdist)(c) +} + +func (c GeoradiusUnitMi) Withhash() GeoradiusWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusWithhash)(c) +} + +func (c GeoradiusUnitMi) Count(count int64) GeoradiusCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusCountCount)(c) +} + +func (c GeoradiusUnitMi) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusUnitMi) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusUnitMi) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusUnitMi) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusUnitMi) Build() Completed { + return Completed(c) +} + +type GeoradiusWithcoord Completed + +func (c GeoradiusWithcoord) Withdist() GeoradiusWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusWithdist)(c) +} + +func (c GeoradiusWithcoord) Withhash() GeoradiusWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusWithhash)(c) +} + +func (c GeoradiusWithcoord) Count(count int64) GeoradiusCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusCountCount)(c) +} + +func (c GeoradiusWithcoord) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusWithcoord) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusWithcoord) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusWithcoord) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusWithcoord) Build() Completed { + return Completed(c) +} + +type GeoradiusWithdist Completed + +func (c GeoradiusWithdist) Withhash() GeoradiusWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusWithhash)(c) +} + +func (c GeoradiusWithdist) Count(count int64) GeoradiusCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusCountCount)(c) +} + +func (c GeoradiusWithdist) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusWithdist) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusWithdist) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusWithdist) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusWithdist) Build() Completed { + return Completed(c) +} + +type GeoradiusWithhash Completed + +func (c GeoradiusWithhash) Count(count int64) GeoradiusCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusCountCount)(c) +} + +func (c GeoradiusWithhash) Asc() GeoradiusOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusOrderAsc)(c) +} + +func (c GeoradiusWithhash) Desc() GeoradiusOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusOrderDesc)(c) +} + +func (c GeoradiusWithhash) Store(key string) GeoradiusStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusStore)(c) +} + +func (c GeoradiusWithhash) Storedist(key string) GeoradiusStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusStoredist)(c) +} + +func (c GeoradiusWithhash) Build() Completed { + return Completed(c) +} + +type Georadiusbymember Completed + +func (b Builder) Georadiusbymember() (c Georadiusbymember) { + c = Georadiusbymember{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GEORADIUSBYMEMBER") + return c +} + +func (c Georadiusbymember) Key(key string) GeoradiusbymemberKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeoradiusbymemberKey)(c) +} + +type GeoradiusbymemberCountAny Completed + +func (c GeoradiusbymemberCountAny) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberCountAny) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberCountAny) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberCountAny) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberCountAny) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberCountCount Completed + +func (c GeoradiusbymemberCountCount) Any() GeoradiusbymemberCountAny { + c.cs.s = append(c.cs.s, "ANY") + return (GeoradiusbymemberCountAny)(c) +} + +func (c GeoradiusbymemberCountCount) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberCountCount) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberCountCount) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberCountCount) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberCountCount) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberKey Completed + +func (c GeoradiusbymemberKey) Member(member string) GeoradiusbymemberMember { + c.cs.s = append(c.cs.s, member) + return (GeoradiusbymemberMember)(c) +} + +type GeoradiusbymemberMember Completed + +func (c GeoradiusbymemberMember) Radius(radius float64) GeoradiusbymemberRadius { + c.cs.s = append(c.cs.s, strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeoradiusbymemberRadius)(c) +} + +type GeoradiusbymemberOrderAsc Completed + +func (c GeoradiusbymemberOrderAsc) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberOrderAsc) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberOrderAsc) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberOrderDesc Completed + +func (c GeoradiusbymemberOrderDesc) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberOrderDesc) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberOrderDesc) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberRadius Completed + +func (c GeoradiusbymemberRadius) M() GeoradiusbymemberUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeoradiusbymemberUnitM)(c) +} + +func (c GeoradiusbymemberRadius) Km() GeoradiusbymemberUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeoradiusbymemberUnitKm)(c) +} + +func (c GeoradiusbymemberRadius) Ft() GeoradiusbymemberUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeoradiusbymemberUnitFt)(c) +} + +func (c GeoradiusbymemberRadius) Mi() GeoradiusbymemberUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeoradiusbymemberUnitMi)(c) +} + +type GeoradiusbymemberRo Completed + +func (b Builder) GeoradiusbymemberRo() (c GeoradiusbymemberRo) { + c = GeoradiusbymemberRo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GEORADIUSBYMEMBER_RO") + return c +} + +func (c GeoradiusbymemberRo) Key(key string) GeoradiusbymemberRoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeoradiusbymemberRoKey)(c) +} + +type GeoradiusbymemberRoCountAny Completed + +func (c GeoradiusbymemberRoCountAny) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoCountAny) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoCountAny) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoCountAny) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoCountCount Completed + +func (c GeoradiusbymemberRoCountCount) Any() GeoradiusbymemberRoCountAny { + c.cs.s = append(c.cs.s, "ANY") + return (GeoradiusbymemberRoCountAny)(c) +} + +func (c GeoradiusbymemberRoCountCount) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoCountCount) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoCountCount) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoCountCount) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoKey Completed + +func (c GeoradiusbymemberRoKey) Member(member string) GeoradiusbymemberRoMember { + c.cs.s = append(c.cs.s, member) + return (GeoradiusbymemberRoMember)(c) +} + +type GeoradiusbymemberRoMember Completed + +func (c GeoradiusbymemberRoMember) Radius(radius float64) GeoradiusbymemberRoRadius { + c.cs.s = append(c.cs.s, strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeoradiusbymemberRoRadius)(c) +} + +type GeoradiusbymemberRoOrderAsc Completed + +func (c GeoradiusbymemberRoOrderAsc) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoOrderAsc) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoOrderDesc Completed + +func (c GeoradiusbymemberRoOrderDesc) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoOrderDesc) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoRadius Completed + +func (c GeoradiusbymemberRoRadius) M() GeoradiusbymemberRoUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeoradiusbymemberRoUnitM)(c) +} + +func (c GeoradiusbymemberRoRadius) Km() GeoradiusbymemberRoUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeoradiusbymemberRoUnitKm)(c) +} + +func (c GeoradiusbymemberRoRadius) Ft() GeoradiusbymemberRoUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeoradiusbymemberRoUnitFt)(c) +} + +func (c GeoradiusbymemberRoRadius) Mi() GeoradiusbymemberRoUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeoradiusbymemberRoUnitMi)(c) +} + +type GeoradiusbymemberRoUnitFt Completed + +func (c GeoradiusbymemberRoUnitFt) Withcoord() GeoradiusbymemberRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberRoWithcoord)(c) +} + +func (c GeoradiusbymemberRoUnitFt) Withdist() GeoradiusbymemberRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberRoWithdist)(c) +} + +func (c GeoradiusbymemberRoUnitFt) Withhash() GeoradiusbymemberRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberRoWithhash)(c) +} + +func (c GeoradiusbymemberRoUnitFt) Count(count int64) GeoradiusbymemberRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberRoCountCount)(c) +} + +func (c GeoradiusbymemberRoUnitFt) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoUnitFt) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoUnitFt) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoUnitFt) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoUnitKm Completed + +func (c GeoradiusbymemberRoUnitKm) Withcoord() GeoradiusbymemberRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberRoWithcoord)(c) +} + +func (c GeoradiusbymemberRoUnitKm) Withdist() GeoradiusbymemberRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberRoWithdist)(c) +} + +func (c GeoradiusbymemberRoUnitKm) Withhash() GeoradiusbymemberRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberRoWithhash)(c) +} + +func (c GeoradiusbymemberRoUnitKm) Count(count int64) GeoradiusbymemberRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberRoCountCount)(c) +} + +func (c GeoradiusbymemberRoUnitKm) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoUnitKm) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoUnitKm) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoUnitKm) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoUnitM Completed + +func (c GeoradiusbymemberRoUnitM) Withcoord() GeoradiusbymemberRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberRoWithcoord)(c) +} + +func (c GeoradiusbymemberRoUnitM) Withdist() GeoradiusbymemberRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberRoWithdist)(c) +} + +func (c GeoradiusbymemberRoUnitM) Withhash() GeoradiusbymemberRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberRoWithhash)(c) +} + +func (c GeoradiusbymemberRoUnitM) Count(count int64) GeoradiusbymemberRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberRoCountCount)(c) +} + +func (c GeoradiusbymemberRoUnitM) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoUnitM) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoUnitM) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoUnitM) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoUnitMi Completed + +func (c GeoradiusbymemberRoUnitMi) Withcoord() GeoradiusbymemberRoWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberRoWithcoord)(c) +} + +func (c GeoradiusbymemberRoUnitMi) Withdist() GeoradiusbymemberRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberRoWithdist)(c) +} + +func (c GeoradiusbymemberRoUnitMi) Withhash() GeoradiusbymemberRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberRoWithhash)(c) +} + +func (c GeoradiusbymemberRoUnitMi) Count(count int64) GeoradiusbymemberRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberRoCountCount)(c) +} + +func (c GeoradiusbymemberRoUnitMi) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoUnitMi) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoUnitMi) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoUnitMi) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoWithcoord Completed + +func (c GeoradiusbymemberRoWithcoord) Withdist() GeoradiusbymemberRoWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberRoWithdist)(c) +} + +func (c GeoradiusbymemberRoWithcoord) Withhash() GeoradiusbymemberRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberRoWithhash)(c) +} + +func (c GeoradiusbymemberRoWithcoord) Count(count int64) GeoradiusbymemberRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberRoCountCount)(c) +} + +func (c GeoradiusbymemberRoWithcoord) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoWithcoord) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoWithcoord) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoWithcoord) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoWithdist Completed + +func (c GeoradiusbymemberRoWithdist) Withhash() GeoradiusbymemberRoWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberRoWithhash)(c) +} + +func (c GeoradiusbymemberRoWithdist) Count(count int64) GeoradiusbymemberRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberRoCountCount)(c) +} + +func (c GeoradiusbymemberRoWithdist) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoWithdist) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoWithdist) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoWithdist) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberRoWithhash Completed + +func (c GeoradiusbymemberRoWithhash) Count(count int64) GeoradiusbymemberRoCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberRoCountCount)(c) +} + +func (c GeoradiusbymemberRoWithhash) Asc() GeoradiusbymemberRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberRoOrderAsc)(c) +} + +func (c GeoradiusbymemberRoWithhash) Desc() GeoradiusbymemberRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberRoOrderDesc)(c) +} + +func (c GeoradiusbymemberRoWithhash) Build() Completed { + return Completed(c) +} + +func (c GeoradiusbymemberRoWithhash) Cache() Cacheable { + return Cacheable(c) +} + +type GeoradiusbymemberStore Completed + +func (c GeoradiusbymemberStore) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberStore) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberStoredist Completed + +func (c GeoradiusbymemberStoredist) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberUnitFt Completed + +func (c GeoradiusbymemberUnitFt) Withcoord() GeoradiusbymemberWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberWithcoord)(c) +} + +func (c GeoradiusbymemberUnitFt) Withdist() GeoradiusbymemberWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberWithdist)(c) +} + +func (c GeoradiusbymemberUnitFt) Withhash() GeoradiusbymemberWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberWithhash)(c) +} + +func (c GeoradiusbymemberUnitFt) Count(count int64) GeoradiusbymemberCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberCountCount)(c) +} + +func (c GeoradiusbymemberUnitFt) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberUnitFt) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberUnitFt) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberUnitFt) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberUnitFt) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberUnitKm Completed + +func (c GeoradiusbymemberUnitKm) Withcoord() GeoradiusbymemberWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberWithcoord)(c) +} + +func (c GeoradiusbymemberUnitKm) Withdist() GeoradiusbymemberWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberWithdist)(c) +} + +func (c GeoradiusbymemberUnitKm) Withhash() GeoradiusbymemberWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberWithhash)(c) +} + +func (c GeoradiusbymemberUnitKm) Count(count int64) GeoradiusbymemberCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberCountCount)(c) +} + +func (c GeoradiusbymemberUnitKm) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberUnitKm) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberUnitKm) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberUnitKm) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberUnitKm) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberUnitM Completed + +func (c GeoradiusbymemberUnitM) Withcoord() GeoradiusbymemberWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberWithcoord)(c) +} + +func (c GeoradiusbymemberUnitM) Withdist() GeoradiusbymemberWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberWithdist)(c) +} + +func (c GeoradiusbymemberUnitM) Withhash() GeoradiusbymemberWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberWithhash)(c) +} + +func (c GeoradiusbymemberUnitM) Count(count int64) GeoradiusbymemberCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberCountCount)(c) +} + +func (c GeoradiusbymemberUnitM) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberUnitM) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberUnitM) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberUnitM) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberUnitM) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberUnitMi Completed + +func (c GeoradiusbymemberUnitMi) Withcoord() GeoradiusbymemberWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeoradiusbymemberWithcoord)(c) +} + +func (c GeoradiusbymemberUnitMi) Withdist() GeoradiusbymemberWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberWithdist)(c) +} + +func (c GeoradiusbymemberUnitMi) Withhash() GeoradiusbymemberWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberWithhash)(c) +} + +func (c GeoradiusbymemberUnitMi) Count(count int64) GeoradiusbymemberCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberCountCount)(c) +} + +func (c GeoradiusbymemberUnitMi) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberUnitMi) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberUnitMi) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberUnitMi) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberUnitMi) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberWithcoord Completed + +func (c GeoradiusbymemberWithcoord) Withdist() GeoradiusbymemberWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeoradiusbymemberWithdist)(c) +} + +func (c GeoradiusbymemberWithcoord) Withhash() GeoradiusbymemberWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberWithhash)(c) +} + +func (c GeoradiusbymemberWithcoord) Count(count int64) GeoradiusbymemberCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberCountCount)(c) +} + +func (c GeoradiusbymemberWithcoord) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberWithcoord) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberWithcoord) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberWithcoord) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberWithcoord) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberWithdist Completed + +func (c GeoradiusbymemberWithdist) Withhash() GeoradiusbymemberWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeoradiusbymemberWithhash)(c) +} + +func (c GeoradiusbymemberWithdist) Count(count int64) GeoradiusbymemberCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberCountCount)(c) +} + +func (c GeoradiusbymemberWithdist) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberWithdist) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberWithdist) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberWithdist) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberWithdist) Build() Completed { + return Completed(c) +} + +type GeoradiusbymemberWithhash Completed + +func (c GeoradiusbymemberWithhash) Count(count int64) GeoradiusbymemberCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeoradiusbymemberCountCount)(c) +} + +func (c GeoradiusbymemberWithhash) Asc() GeoradiusbymemberOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeoradiusbymemberOrderAsc)(c) +} + +func (c GeoradiusbymemberWithhash) Desc() GeoradiusbymemberOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeoradiusbymemberOrderDesc)(c) +} + +func (c GeoradiusbymemberWithhash) Store(key string) GeoradiusbymemberStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STORE", key) + return (GeoradiusbymemberStore)(c) +} + +func (c GeoradiusbymemberWithhash) Storedist(key string) GeoradiusbymemberStoredist { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, "STOREDIST", key) + return (GeoradiusbymemberStoredist)(c) +} + +func (c GeoradiusbymemberWithhash) Build() Completed { + return Completed(c) +} + +type Geosearch Completed + +func (b Builder) Geosearch() (c Geosearch) { + c = Geosearch{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GEOSEARCH") + return c +} + +func (c Geosearch) Key(key string) GeosearchKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GeosearchKey)(c) +} + +type GeosearchCircleBoxBybox Completed + +func (c GeosearchCircleBoxBybox) Height(height float64) GeosearchCircleBoxHeight { + c.cs.s = append(c.cs.s, strconv.FormatFloat(height, 'f', -1, 64)) + return (GeosearchCircleBoxHeight)(c) +} + +type GeosearchCircleBoxHeight Completed + +func (c GeosearchCircleBoxHeight) M() GeosearchCircleBoxUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeosearchCircleBoxUnitM)(c) +} + +func (c GeosearchCircleBoxHeight) Km() GeosearchCircleBoxUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeosearchCircleBoxUnitKm)(c) +} + +func (c GeosearchCircleBoxHeight) Ft() GeosearchCircleBoxUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeosearchCircleBoxUnitFt)(c) +} + +func (c GeosearchCircleBoxHeight) Mi() GeosearchCircleBoxUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeosearchCircleBoxUnitMi)(c) +} + +type GeosearchCircleBoxUnitFt Completed + +func (c GeosearchCircleBoxUnitFt) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleBoxUnitFt) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleBoxUnitFt) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleBoxUnitFt) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleBoxUnitFt) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleBoxUnitFt) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleBoxUnitFt) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleBoxUnitFt) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCircleBoxUnitKm Completed + +func (c GeosearchCircleBoxUnitKm) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleBoxUnitKm) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleBoxUnitKm) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleBoxUnitKm) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleBoxUnitKm) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleBoxUnitKm) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleBoxUnitKm) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleBoxUnitKm) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCircleBoxUnitM Completed + +func (c GeosearchCircleBoxUnitM) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleBoxUnitM) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleBoxUnitM) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleBoxUnitM) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleBoxUnitM) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleBoxUnitM) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleBoxUnitM) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleBoxUnitM) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCircleBoxUnitMi Completed + +func (c GeosearchCircleBoxUnitMi) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleBoxUnitMi) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleBoxUnitMi) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleBoxUnitMi) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleBoxUnitMi) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleBoxUnitMi) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleBoxUnitMi) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleBoxUnitMi) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCircleCircleByradius Completed + +func (c GeosearchCircleCircleByradius) M() GeosearchCircleCircleUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeosearchCircleCircleUnitM)(c) +} + +func (c GeosearchCircleCircleByradius) Km() GeosearchCircleCircleUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeosearchCircleCircleUnitKm)(c) +} + +func (c GeosearchCircleCircleByradius) Ft() GeosearchCircleCircleUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeosearchCircleCircleUnitFt)(c) +} + +func (c GeosearchCircleCircleByradius) Mi() GeosearchCircleCircleUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeosearchCircleCircleUnitMi)(c) +} + +type GeosearchCircleCircleUnitFt Completed + +func (c GeosearchCircleCircleUnitFt) Bybox(width float64) GeosearchCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchCircleBoxBybox)(c) +} + +func (c GeosearchCircleCircleUnitFt) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleCircleUnitFt) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleCircleUnitFt) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleCircleUnitFt) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleCircleUnitFt) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleCircleUnitFt) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleCircleUnitFt) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleCircleUnitFt) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCircleCircleUnitKm Completed + +func (c GeosearchCircleCircleUnitKm) Bybox(width float64) GeosearchCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchCircleBoxBybox)(c) +} + +func (c GeosearchCircleCircleUnitKm) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleCircleUnitKm) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleCircleUnitKm) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleCircleUnitKm) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleCircleUnitKm) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleCircleUnitKm) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleCircleUnitKm) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleCircleUnitKm) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCircleCircleUnitM Completed + +func (c GeosearchCircleCircleUnitM) Bybox(width float64) GeosearchCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchCircleBoxBybox)(c) +} + +func (c GeosearchCircleCircleUnitM) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleCircleUnitM) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleCircleUnitM) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleCircleUnitM) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleCircleUnitM) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleCircleUnitM) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleCircleUnitM) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleCircleUnitM) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCircleCircleUnitMi Completed + +func (c GeosearchCircleCircleUnitMi) Bybox(width float64) GeosearchCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchCircleBoxBybox)(c) +} + +func (c GeosearchCircleCircleUnitMi) Asc() GeosearchOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchOrderAsc)(c) +} + +func (c GeosearchCircleCircleUnitMi) Desc() GeosearchOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchOrderDesc)(c) +} + +func (c GeosearchCircleCircleUnitMi) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchCircleCircleUnitMi) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCircleCircleUnitMi) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCircleCircleUnitMi) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCircleCircleUnitMi) Build() Completed { + return Completed(c) +} + +func (c GeosearchCircleCircleUnitMi) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCountAny Completed + +func (c GeosearchCountAny) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCountAny) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCountAny) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCountAny) Build() Completed { + return Completed(c) +} + +func (c GeosearchCountAny) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchCountCount Completed + +func (c GeosearchCountCount) Any() GeosearchCountAny { + c.cs.s = append(c.cs.s, "ANY") + return (GeosearchCountAny)(c) +} + +func (c GeosearchCountCount) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchCountCount) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchCountCount) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchCountCount) Build() Completed { + return Completed(c) +} + +func (c GeosearchCountCount) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchFrommemberFromlonlat Completed + +func (c GeosearchFrommemberFromlonlat) Byradius(radius float64) GeosearchCircleCircleByradius { + c.cs.s = append(c.cs.s, "BYRADIUS", strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeosearchCircleCircleByradius)(c) +} + +func (c GeosearchFrommemberFromlonlat) Bybox(width float64) GeosearchCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchCircleBoxBybox)(c) +} + +type GeosearchFrommemberFrommember Completed + +func (c GeosearchFrommemberFrommember) Fromlonlat(longitude float64, latitude float64) GeosearchFrommemberFromlonlat { + c.cs.s = append(c.cs.s, "FROMLONLAT", strconv.FormatFloat(longitude, 'f', -1, 64), strconv.FormatFloat(latitude, 'f', -1, 64)) + return (GeosearchFrommemberFromlonlat)(c) +} + +func (c GeosearchFrommemberFrommember) Byradius(radius float64) GeosearchCircleCircleByradius { + c.cs.s = append(c.cs.s, "BYRADIUS", strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeosearchCircleCircleByradius)(c) +} + +func (c GeosearchFrommemberFrommember) Bybox(width float64) GeosearchCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchCircleBoxBybox)(c) +} + +type GeosearchKey Completed + +func (c GeosearchKey) Frommember(member string) GeosearchFrommemberFrommember { + c.cs.s = append(c.cs.s, "FROMMEMBER", member) + return (GeosearchFrommemberFrommember)(c) +} + +func (c GeosearchKey) Fromlonlat(longitude float64, latitude float64) GeosearchFrommemberFromlonlat { + c.cs.s = append(c.cs.s, "FROMLONLAT", strconv.FormatFloat(longitude, 'f', -1, 64), strconv.FormatFloat(latitude, 'f', -1, 64)) + return (GeosearchFrommemberFromlonlat)(c) +} + +type GeosearchOrderAsc Completed + +func (c GeosearchOrderAsc) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchOrderAsc) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchOrderAsc) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchOrderAsc) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchOrderAsc) Build() Completed { + return Completed(c) +} + +func (c GeosearchOrderAsc) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchOrderDesc Completed + +func (c GeosearchOrderDesc) Count(count int64) GeosearchCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchCountCount)(c) +} + +func (c GeosearchOrderDesc) Withcoord() GeosearchWithcoord { + c.cs.s = append(c.cs.s, "WITHCOORD") + return (GeosearchWithcoord)(c) +} + +func (c GeosearchOrderDesc) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchOrderDesc) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchOrderDesc) Build() Completed { + return Completed(c) +} + +func (c GeosearchOrderDesc) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchWithcoord Completed + +func (c GeosearchWithcoord) Withdist() GeosearchWithdist { + c.cs.s = append(c.cs.s, "WITHDIST") + return (GeosearchWithdist)(c) +} + +func (c GeosearchWithcoord) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchWithcoord) Build() Completed { + return Completed(c) +} + +func (c GeosearchWithcoord) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchWithdist Completed + +func (c GeosearchWithdist) Withhash() GeosearchWithhash { + c.cs.s = append(c.cs.s, "WITHHASH") + return (GeosearchWithhash)(c) +} + +func (c GeosearchWithdist) Build() Completed { + return Completed(c) +} + +func (c GeosearchWithdist) Cache() Cacheable { + return Cacheable(c) +} + +type GeosearchWithhash Completed + +func (c GeosearchWithhash) Build() Completed { + return Completed(c) +} + +func (c GeosearchWithhash) Cache() Cacheable { + return Cacheable(c) +} + +type Geosearchstore Completed + +func (b Builder) Geosearchstore() (c Geosearchstore) { + c = Geosearchstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GEOSEARCHSTORE") + return c +} + +func (c Geosearchstore) Destination(destination string) GeosearchstoreDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (GeosearchstoreDestination)(c) +} + +type GeosearchstoreCircleBoxBybox Completed + +func (c GeosearchstoreCircleBoxBybox) Height(height float64) GeosearchstoreCircleBoxHeight { + c.cs.s = append(c.cs.s, strconv.FormatFloat(height, 'f', -1, 64)) + return (GeosearchstoreCircleBoxHeight)(c) +} + +type GeosearchstoreCircleBoxHeight Completed + +func (c GeosearchstoreCircleBoxHeight) M() GeosearchstoreCircleBoxUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeosearchstoreCircleBoxUnitM)(c) +} + +func (c GeosearchstoreCircleBoxHeight) Km() GeosearchstoreCircleBoxUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeosearchstoreCircleBoxUnitKm)(c) +} + +func (c GeosearchstoreCircleBoxHeight) Ft() GeosearchstoreCircleBoxUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeosearchstoreCircleBoxUnitFt)(c) +} + +func (c GeosearchstoreCircleBoxHeight) Mi() GeosearchstoreCircleBoxUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeosearchstoreCircleBoxUnitMi)(c) +} + +type GeosearchstoreCircleBoxUnitFt Completed + +func (c GeosearchstoreCircleBoxUnitFt) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleBoxUnitFt) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleBoxUnitFt) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleBoxUnitFt) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleBoxUnitFt) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCircleBoxUnitKm Completed + +func (c GeosearchstoreCircleBoxUnitKm) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleBoxUnitKm) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleBoxUnitKm) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleBoxUnitKm) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleBoxUnitKm) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCircleBoxUnitM Completed + +func (c GeosearchstoreCircleBoxUnitM) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleBoxUnitM) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleBoxUnitM) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleBoxUnitM) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleBoxUnitM) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCircleBoxUnitMi Completed + +func (c GeosearchstoreCircleBoxUnitMi) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleBoxUnitMi) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleBoxUnitMi) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleBoxUnitMi) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleBoxUnitMi) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCircleCircleByradius Completed + +func (c GeosearchstoreCircleCircleByradius) M() GeosearchstoreCircleCircleUnitM { + c.cs.s = append(c.cs.s, "m") + return (GeosearchstoreCircleCircleUnitM)(c) +} + +func (c GeosearchstoreCircleCircleByradius) Km() GeosearchstoreCircleCircleUnitKm { + c.cs.s = append(c.cs.s, "km") + return (GeosearchstoreCircleCircleUnitKm)(c) +} + +func (c GeosearchstoreCircleCircleByradius) Ft() GeosearchstoreCircleCircleUnitFt { + c.cs.s = append(c.cs.s, "ft") + return (GeosearchstoreCircleCircleUnitFt)(c) +} + +func (c GeosearchstoreCircleCircleByradius) Mi() GeosearchstoreCircleCircleUnitMi { + c.cs.s = append(c.cs.s, "mi") + return (GeosearchstoreCircleCircleUnitMi)(c) +} + +type GeosearchstoreCircleCircleUnitFt Completed + +func (c GeosearchstoreCircleCircleUnitFt) Bybox(width float64) GeosearchstoreCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchstoreCircleBoxBybox)(c) +} + +func (c GeosearchstoreCircleCircleUnitFt) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleCircleUnitFt) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleCircleUnitFt) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleCircleUnitFt) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleCircleUnitFt) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCircleCircleUnitKm Completed + +func (c GeosearchstoreCircleCircleUnitKm) Bybox(width float64) GeosearchstoreCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchstoreCircleBoxBybox)(c) +} + +func (c GeosearchstoreCircleCircleUnitKm) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleCircleUnitKm) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleCircleUnitKm) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleCircleUnitKm) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleCircleUnitKm) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCircleCircleUnitM Completed + +func (c GeosearchstoreCircleCircleUnitM) Bybox(width float64) GeosearchstoreCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchstoreCircleBoxBybox)(c) +} + +func (c GeosearchstoreCircleCircleUnitM) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleCircleUnitM) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleCircleUnitM) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleCircleUnitM) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleCircleUnitM) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCircleCircleUnitMi Completed + +func (c GeosearchstoreCircleCircleUnitMi) Bybox(width float64) GeosearchstoreCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchstoreCircleBoxBybox)(c) +} + +func (c GeosearchstoreCircleCircleUnitMi) Asc() GeosearchstoreOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (GeosearchstoreOrderAsc)(c) +} + +func (c GeosearchstoreCircleCircleUnitMi) Desc() GeosearchstoreOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (GeosearchstoreOrderDesc)(c) +} + +func (c GeosearchstoreCircleCircleUnitMi) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreCircleCircleUnitMi) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCircleCircleUnitMi) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCountAny Completed + +func (c GeosearchstoreCountAny) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCountAny) Build() Completed { + return Completed(c) +} + +type GeosearchstoreCountCount Completed + +func (c GeosearchstoreCountCount) Any() GeosearchstoreCountAny { + c.cs.s = append(c.cs.s, "ANY") + return (GeosearchstoreCountAny)(c) +} + +func (c GeosearchstoreCountCount) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreCountCount) Build() Completed { + return Completed(c) +} + +type GeosearchstoreDestination Completed + +func (c GeosearchstoreDestination) Source(source string) GeosearchstoreSource { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(source) + } else { + c.ks = check(c.ks, slot(source)) + } + c.cs.s = append(c.cs.s, source) + return (GeosearchstoreSource)(c) +} + +type GeosearchstoreFrommemberFromlonlat Completed + +func (c GeosearchstoreFrommemberFromlonlat) Byradius(radius float64) GeosearchstoreCircleCircleByradius { + c.cs.s = append(c.cs.s, "BYRADIUS", strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeosearchstoreCircleCircleByradius)(c) +} + +func (c GeosearchstoreFrommemberFromlonlat) Bybox(width float64) GeosearchstoreCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchstoreCircleBoxBybox)(c) +} + +type GeosearchstoreFrommemberFrommember Completed + +func (c GeosearchstoreFrommemberFrommember) Fromlonlat(longitude float64, latitude float64) GeosearchstoreFrommemberFromlonlat { + c.cs.s = append(c.cs.s, "FROMLONLAT", strconv.FormatFloat(longitude, 'f', -1, 64), strconv.FormatFloat(latitude, 'f', -1, 64)) + return (GeosearchstoreFrommemberFromlonlat)(c) +} + +func (c GeosearchstoreFrommemberFrommember) Byradius(radius float64) GeosearchstoreCircleCircleByradius { + c.cs.s = append(c.cs.s, "BYRADIUS", strconv.FormatFloat(radius, 'f', -1, 64)) + return (GeosearchstoreCircleCircleByradius)(c) +} + +func (c GeosearchstoreFrommemberFrommember) Bybox(width float64) GeosearchstoreCircleBoxBybox { + c.cs.s = append(c.cs.s, "BYBOX", strconv.FormatFloat(width, 'f', -1, 64)) + return (GeosearchstoreCircleBoxBybox)(c) +} + +type GeosearchstoreOrderAsc Completed + +func (c GeosearchstoreOrderAsc) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreOrderAsc) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreOrderAsc) Build() Completed { + return Completed(c) +} + +type GeosearchstoreOrderDesc Completed + +func (c GeosearchstoreOrderDesc) Count(count int64) GeosearchstoreCountCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (GeosearchstoreCountCount)(c) +} + +func (c GeosearchstoreOrderDesc) Storedist() GeosearchstoreStoredist { + c.cs.s = append(c.cs.s, "STOREDIST") + return (GeosearchstoreStoredist)(c) +} + +func (c GeosearchstoreOrderDesc) Build() Completed { + return Completed(c) +} + +type GeosearchstoreSource Completed + +func (c GeosearchstoreSource) Frommember(member string) GeosearchstoreFrommemberFrommember { + c.cs.s = append(c.cs.s, "FROMMEMBER", member) + return (GeosearchstoreFrommemberFrommember)(c) +} + +func (c GeosearchstoreSource) Fromlonlat(longitude float64, latitude float64) GeosearchstoreFrommemberFromlonlat { + c.cs.s = append(c.cs.s, "FROMLONLAT", strconv.FormatFloat(longitude, 'f', -1, 64), strconv.FormatFloat(latitude, 'f', -1, 64)) + return (GeosearchstoreFrommemberFromlonlat)(c) +} + +type GeosearchstoreStoredist Completed + +func (c GeosearchstoreStoredist) Build() Completed { + return Completed(c) +} + +type Get Completed + +func (b Builder) Get() (c Get) { + c = Get{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GET") + return c +} + +func (c Get) Key(key string) GetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GetKey)(c) +} + +type GetKey Completed + +func (c GetKey) Build() Completed { + return Completed(c) +} + +func (c GetKey) Cache() Cacheable { + return Cacheable(c) +} + +type Getbit Completed + +func (b Builder) Getbit() (c Getbit) { + c = Getbit{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GETBIT") + return c +} + +func (c Getbit) Key(key string) GetbitKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GetbitKey)(c) +} + +type GetbitKey Completed + +func (c GetbitKey) Offset(offset int64) GetbitOffset { + c.cs.s = append(c.cs.s, strconv.FormatInt(offset, 10)) + return (GetbitOffset)(c) +} + +type GetbitOffset Completed + +func (c GetbitOffset) Build() Completed { + return Completed(c) +} + +func (c GetbitOffset) Cache() Cacheable { + return Cacheable(c) +} + +type Getdel Completed + +func (b Builder) Getdel() (c Getdel) { + c = Getdel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GETDEL") + return c +} + +func (c Getdel) Key(key string) GetdelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GetdelKey)(c) +} + +type GetdelKey Completed + +func (c GetdelKey) Build() Completed { + return Completed(c) +} + +type Getex Completed + +func (b Builder) Getex() (c Getex) { + c = Getex{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GETEX") + return c +} + +func (c Getex) Key(key string) GetexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GetexKey)(c) +} + +type GetexExpirationExSeconds Completed + +func (c GetexExpirationExSeconds) Build() Completed { + return Completed(c) +} + +type GetexExpirationExatTimestamp Completed + +func (c GetexExpirationExatTimestamp) Build() Completed { + return Completed(c) +} + +type GetexExpirationPersist Completed + +func (c GetexExpirationPersist) Build() Completed { + return Completed(c) +} + +type GetexExpirationPxMilliseconds Completed + +func (c GetexExpirationPxMilliseconds) Build() Completed { + return Completed(c) +} + +type GetexExpirationPxatMillisecondsTimestamp Completed + +func (c GetexExpirationPxatMillisecondsTimestamp) Build() Completed { + return Completed(c) +} + +type GetexKey Completed + +func (c GetexKey) ExSeconds(seconds int64) GetexExpirationExSeconds { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(seconds, 10)) + return (GetexExpirationExSeconds)(c) +} + +func (c GetexKey) PxMilliseconds(milliseconds int64) GetexExpirationPxMilliseconds { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(milliseconds, 10)) + return (GetexExpirationPxMilliseconds)(c) +} + +func (c GetexKey) ExatTimestamp(timestamp int64) GetexExpirationExatTimestamp { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp, 10)) + return (GetexExpirationExatTimestamp)(c) +} + +func (c GetexKey) PxatMillisecondsTimestamp(millisecondsTimestamp int64) GetexExpirationPxatMillisecondsTimestamp { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(millisecondsTimestamp, 10)) + return (GetexExpirationPxatMillisecondsTimestamp)(c) +} + +func (c GetexKey) Persist() GetexExpirationPersist { + c.cs.s = append(c.cs.s, "PERSIST") + return (GetexExpirationPersist)(c) +} + +func (c GetexKey) Build() Completed { + return Completed(c) +} + +type Getrange Completed + +func (b Builder) Getrange() (c Getrange) { + c = Getrange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GETRANGE") + return c +} + +func (c Getrange) Key(key string) GetrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GetrangeKey)(c) +} + +type GetrangeEnd Completed + +func (c GetrangeEnd) Build() Completed { + return Completed(c) +} + +func (c GetrangeEnd) Cache() Cacheable { + return Cacheable(c) +} + +type GetrangeKey Completed + +func (c GetrangeKey) Start(start int64) GetrangeStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (GetrangeStart)(c) +} + +type GetrangeStart Completed + +func (c GetrangeStart) End(end int64) GetrangeEnd { + c.cs.s = append(c.cs.s, strconv.FormatInt(end, 10)) + return (GetrangeEnd)(c) +} + +type Getset Completed + +func (b Builder) Getset() (c Getset) { + c = Getset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GETSET") + return c +} + +func (c Getset) Key(key string) GetsetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (GetsetKey)(c) +} + +type GetsetKey Completed + +func (c GetsetKey) Value(value string) GetsetValue { + c.cs.s = append(c.cs.s, value) + return (GetsetValue)(c) +} + +type GetsetValue Completed + +func (c GetsetValue) Build() Completed { + return Completed(c) +} + +type GraphConfigGet Completed + +func (b Builder) GraphConfigGet() (c GraphConfigGet) { + c = GraphConfigGet{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GRAPH.CONFIG", "GET") + return c +} + +func (c GraphConfigGet) Name(name string) GraphConfigGetName { + c.cs.s = append(c.cs.s, name) + return (GraphConfigGetName)(c) +} + +type GraphConfigGetName Completed + +func (c GraphConfigGetName) Build() Completed { + return Completed(c) +} + +type GraphConfigSet Completed + +func (b Builder) GraphConfigSet() (c GraphConfigSet) { + c = GraphConfigSet{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GRAPH.CONFIG", "SET") + return c +} + +func (c GraphConfigSet) Name(name string) GraphConfigSetName { + c.cs.s = append(c.cs.s, name) + return (GraphConfigSetName)(c) +} + +type GraphConfigSetName Completed + +func (c GraphConfigSetName) Value(value string) GraphConfigSetValue { + c.cs.s = append(c.cs.s, value) + return (GraphConfigSetValue)(c) +} + +type GraphConfigSetValue Completed + +func (c GraphConfigSetValue) Build() Completed { + return Completed(c) +} + +type GraphDelete Completed + +func (b Builder) GraphDelete() (c GraphDelete) { + c = GraphDelete{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GRAPH.DELETE") + return c +} + +func (c GraphDelete) Graph(graph string) GraphDeleteGraph { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(graph) + } else { + c.ks = check(c.ks, slot(graph)) + } + c.cs.s = append(c.cs.s, graph) + return (GraphDeleteGraph)(c) +} + +type GraphDeleteGraph Completed + +func (c GraphDeleteGraph) Build() Completed { + return Completed(c) +} + +type GraphExplain Completed + +func (b Builder) GraphExplain() (c GraphExplain) { + c = GraphExplain{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GRAPH.EXPLAIN") + return c +} + +func (c GraphExplain) Graph(graph string) GraphExplainGraph { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(graph) + } else { + c.ks = check(c.ks, slot(graph)) + } + c.cs.s = append(c.cs.s, graph) + return (GraphExplainGraph)(c) +} + +type GraphExplainGraph Completed + +func (c GraphExplainGraph) Query(query string) GraphExplainQuery { + c.cs.s = append(c.cs.s, query) + return (GraphExplainQuery)(c) +} + +type GraphExplainQuery Completed + +func (c GraphExplainQuery) Build() Completed { + return Completed(c) +} + +type GraphList Completed + +func (b Builder) GraphList() (c GraphList) { + c = GraphList{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GRAPH.LIST") + return c +} + +func (c GraphList) Build() Completed { + return Completed(c) +} + +type GraphProfile Completed + +func (b Builder) GraphProfile() (c GraphProfile) { + c = GraphProfile{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GRAPH.PROFILE") + return c +} + +func (c GraphProfile) Graph(graph string) GraphProfileGraph { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(graph) + } else { + c.ks = check(c.ks, slot(graph)) + } + c.cs.s = append(c.cs.s, graph) + return (GraphProfileGraph)(c) +} + +type GraphProfileGraph Completed + +func (c GraphProfileGraph) Query(query string) GraphProfileQuery { + c.cs.s = append(c.cs.s, query) + return (GraphProfileQuery)(c) +} + +type GraphProfileQuery Completed + +func (c GraphProfileQuery) Timeout(timeout int64) GraphProfileTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (GraphProfileTimeout)(c) +} + +func (c GraphProfileQuery) Build() Completed { + return Completed(c) +} + +type GraphProfileTimeout Completed + +func (c GraphProfileTimeout) Build() Completed { + return Completed(c) +} + +type GraphQuery Completed + +func (b Builder) GraphQuery() (c GraphQuery) { + c = GraphQuery{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "GRAPH.QUERY") + return c +} + +func (c GraphQuery) Graph(graph string) GraphQueryGraph { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(graph) + } else { + c.ks = check(c.ks, slot(graph)) + } + c.cs.s = append(c.cs.s, graph) + return (GraphQueryGraph)(c) +} + +type GraphQueryGraph Completed + +func (c GraphQueryGraph) Query(query string) GraphQueryQuery { + c.cs.s = append(c.cs.s, query) + return (GraphQueryQuery)(c) +} + +type GraphQueryQuery Completed + +func (c GraphQueryQuery) Timeout(timeout int64) GraphQueryTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (GraphQueryTimeout)(c) +} + +func (c GraphQueryQuery) Build() Completed { + return Completed(c) +} + +type GraphQueryTimeout Completed + +func (c GraphQueryTimeout) Build() Completed { + return Completed(c) +} + +type GraphRoQuery Completed + +func (b Builder) GraphRoQuery() (c GraphRoQuery) { + c = GraphRoQuery{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GRAPH.RO_QUERY") + return c +} + +func (c GraphRoQuery) Graph(graph string) GraphRoQueryGraph { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(graph) + } else { + c.ks = check(c.ks, slot(graph)) + } + c.cs.s = append(c.cs.s, graph) + return (GraphRoQueryGraph)(c) +} + +type GraphRoQueryGraph Completed + +func (c GraphRoQueryGraph) Query(query string) GraphRoQueryQuery { + c.cs.s = append(c.cs.s, query) + return (GraphRoQueryQuery)(c) +} + +type GraphRoQueryQuery Completed + +func (c GraphRoQueryQuery) Timeout(timeout int64) GraphRoQueryTimeout { + c.cs.s = append(c.cs.s, "TIMEOUT", strconv.FormatInt(timeout, 10)) + return (GraphRoQueryTimeout)(c) +} + +func (c GraphRoQueryQuery) Build() Completed { + return Completed(c) +} + +func (c GraphRoQueryQuery) Cache() Cacheable { + return Cacheable(c) +} + +type GraphRoQueryTimeout Completed + +func (c GraphRoQueryTimeout) Build() Completed { + return Completed(c) +} + +func (c GraphRoQueryTimeout) Cache() Cacheable { + return Cacheable(c) +} + +type GraphSlowlog Completed + +func (b Builder) GraphSlowlog() (c GraphSlowlog) { + c = GraphSlowlog{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "GRAPH.SLOWLOG") + return c +} + +func (c GraphSlowlog) Graph(graph string) GraphSlowlogGraph { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(graph) + } else { + c.ks = check(c.ks, slot(graph)) + } + c.cs.s = append(c.cs.s, graph) + return (GraphSlowlogGraph)(c) +} + +type GraphSlowlogGraph Completed + +func (c GraphSlowlogGraph) Build() Completed { + return Completed(c) +} + +type Hdel Completed + +func (b Builder) Hdel() (c Hdel) { + c = Hdel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "HDEL") + return c +} + +func (c Hdel) Key(key string) HdelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HdelKey)(c) +} + +type HdelField Completed + +func (c HdelField) Field(field ...string) HdelField { + c.cs.s = append(c.cs.s, field...) + return c +} + +func (c HdelField) Build() Completed { + return Completed(c) +} + +type HdelKey Completed + +func (c HdelKey) Field(field ...string) HdelField { + c.cs.s = append(c.cs.s, field...) + return (HdelField)(c) +} + +type Hello Completed + +func (b Builder) Hello() (c Hello) { + c = Hello{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "HELLO") + return c +} + +func (c Hello) Protover(protover int64) HelloArgumentsProtover { + c.cs.s = append(c.cs.s, strconv.FormatInt(protover, 10)) + return (HelloArgumentsProtover)(c) +} + +func (c Hello) Build() Completed { + return Completed(c) +} + +type HelloArgumentsAuth Completed + +func (c HelloArgumentsAuth) Setname(clientname string) HelloArgumentsSetname { + c.cs.s = append(c.cs.s, "SETNAME", clientname) + return (HelloArgumentsSetname)(c) +} + +func (c HelloArgumentsAuth) Build() Completed { + return Completed(c) +} + +type HelloArgumentsProtover Completed + +func (c HelloArgumentsProtover) Auth(username string, password string) HelloArgumentsAuth { + c.cs.s = append(c.cs.s, "AUTH", username, password) + return (HelloArgumentsAuth)(c) +} + +func (c HelloArgumentsProtover) Setname(clientname string) HelloArgumentsSetname { + c.cs.s = append(c.cs.s, "SETNAME", clientname) + return (HelloArgumentsSetname)(c) +} + +func (c HelloArgumentsProtover) Build() Completed { + return Completed(c) +} + +type HelloArgumentsSetname Completed + +func (c HelloArgumentsSetname) Build() Completed { + return Completed(c) +} + +type Hexists Completed + +func (b Builder) Hexists() (c Hexists) { + c = Hexists{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HEXISTS") + return c +} + +func (c Hexists) Key(key string) HexistsKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HexistsKey)(c) +} + +type HexistsField Completed + +func (c HexistsField) Build() Completed { + return Completed(c) +} + +func (c HexistsField) Cache() Cacheable { + return Cacheable(c) +} + +type HexistsKey Completed + +func (c HexistsKey) Field(field string) HexistsField { + c.cs.s = append(c.cs.s, field) + return (HexistsField)(c) +} + +type Hget Completed + +func (b Builder) Hget() (c Hget) { + c = Hget{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HGET") + return c +} + +func (c Hget) Key(key string) HgetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HgetKey)(c) +} + +type HgetField Completed + +func (c HgetField) Build() Completed { + return Completed(c) +} + +func (c HgetField) Cache() Cacheable { + return Cacheable(c) +} + +type HgetKey Completed + +func (c HgetKey) Field(field string) HgetField { + c.cs.s = append(c.cs.s, field) + return (HgetField)(c) +} + +type Hgetall Completed + +func (b Builder) Hgetall() (c Hgetall) { + c = Hgetall{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HGETALL") + return c +} + +func (c Hgetall) Key(key string) HgetallKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HgetallKey)(c) +} + +type HgetallKey Completed + +func (c HgetallKey) Build() Completed { + return Completed(c) +} + +func (c HgetallKey) Cache() Cacheable { + return Cacheable(c) +} + +type Hincrby Completed + +func (b Builder) Hincrby() (c Hincrby) { + c = Hincrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "HINCRBY") + return c +} + +func (c Hincrby) Key(key string) HincrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HincrbyKey)(c) +} + +type HincrbyField Completed + +func (c HincrbyField) Increment(increment int64) HincrbyIncrement { + c.cs.s = append(c.cs.s, strconv.FormatInt(increment, 10)) + return (HincrbyIncrement)(c) +} + +type HincrbyIncrement Completed + +func (c HincrbyIncrement) Build() Completed { + return Completed(c) +} + +type HincrbyKey Completed + +func (c HincrbyKey) Field(field string) HincrbyField { + c.cs.s = append(c.cs.s, field) + return (HincrbyField)(c) +} + +type Hincrbyfloat Completed + +func (b Builder) Hincrbyfloat() (c Hincrbyfloat) { + c = Hincrbyfloat{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "HINCRBYFLOAT") + return c +} + +func (c Hincrbyfloat) Key(key string) HincrbyfloatKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HincrbyfloatKey)(c) +} + +type HincrbyfloatField Completed + +func (c HincrbyfloatField) Increment(increment float64) HincrbyfloatIncrement { + c.cs.s = append(c.cs.s, strconv.FormatFloat(increment, 'f', -1, 64)) + return (HincrbyfloatIncrement)(c) +} + +type HincrbyfloatIncrement Completed + +func (c HincrbyfloatIncrement) Build() Completed { + return Completed(c) +} + +type HincrbyfloatKey Completed + +func (c HincrbyfloatKey) Field(field string) HincrbyfloatField { + c.cs.s = append(c.cs.s, field) + return (HincrbyfloatField)(c) +} + +type Hkeys Completed + +func (b Builder) Hkeys() (c Hkeys) { + c = Hkeys{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HKEYS") + return c +} + +func (c Hkeys) Key(key string) HkeysKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HkeysKey)(c) +} + +type HkeysKey Completed + +func (c HkeysKey) Build() Completed { + return Completed(c) +} + +func (c HkeysKey) Cache() Cacheable { + return Cacheable(c) +} + +type Hlen Completed + +func (b Builder) Hlen() (c Hlen) { + c = Hlen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HLEN") + return c +} + +func (c Hlen) Key(key string) HlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HlenKey)(c) +} + +type HlenKey Completed + +func (c HlenKey) Build() Completed { + return Completed(c) +} + +func (c HlenKey) Cache() Cacheable { + return Cacheable(c) +} + +type Hmget Completed + +func (b Builder) Hmget() (c Hmget) { + c = Hmget{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HMGET") + return c +} + +func (c Hmget) Key(key string) HmgetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HmgetKey)(c) +} + +type HmgetField Completed + +func (c HmgetField) Field(field ...string) HmgetField { + c.cs.s = append(c.cs.s, field...) + return c +} + +func (c HmgetField) Build() Completed { + return Completed(c) +} + +func (c HmgetField) Cache() Cacheable { + return Cacheable(c) +} + +type HmgetKey Completed + +func (c HmgetKey) Field(field ...string) HmgetField { + c.cs.s = append(c.cs.s, field...) + return (HmgetField)(c) +} + +type Hmset Completed + +func (b Builder) Hmset() (c Hmset) { + c = Hmset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "HMSET") + return c +} + +func (c Hmset) Key(key string) HmsetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HmsetKey)(c) +} + +type HmsetFieldValue Completed + +func (c HmsetFieldValue) FieldValue(field string, value string) HmsetFieldValue { + c.cs.s = append(c.cs.s, field, value) + return c +} + +func (c HmsetFieldValue) Build() Completed { + return Completed(c) +} + +type HmsetKey Completed + +func (c HmsetKey) FieldValue() HmsetFieldValue { + return (HmsetFieldValue)(c) +} + +type Hrandfield Completed + +func (b Builder) Hrandfield() (c Hrandfield) { + c = Hrandfield{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HRANDFIELD") + return c +} + +func (c Hrandfield) Key(key string) HrandfieldKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HrandfieldKey)(c) +} + +type HrandfieldKey Completed + +func (c HrandfieldKey) Count(count int64) HrandfieldOptionsCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (HrandfieldOptionsCount)(c) +} + +func (c HrandfieldKey) Build() Completed { + return Completed(c) +} + +type HrandfieldOptionsCount Completed + +func (c HrandfieldOptionsCount) Withvalues() HrandfieldOptionsWithvalues { + c.cs.s = append(c.cs.s, "WITHVALUES") + return (HrandfieldOptionsWithvalues)(c) +} + +func (c HrandfieldOptionsCount) Build() Completed { + return Completed(c) +} + +type HrandfieldOptionsWithvalues Completed + +func (c HrandfieldOptionsWithvalues) Build() Completed { + return Completed(c) +} + +type Hscan Completed + +func (b Builder) Hscan() (c Hscan) { + c = Hscan{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HSCAN") + return c +} + +func (c Hscan) Key(key string) HscanKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HscanKey)(c) +} + +type HscanCount Completed + +func (c HscanCount) Build() Completed { + return Completed(c) +} + +type HscanCursor Completed + +func (c HscanCursor) Match(pattern string) HscanMatch { + c.cs.s = append(c.cs.s, "MATCH", pattern) + return (HscanMatch)(c) +} + +func (c HscanCursor) Count(count int64) HscanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (HscanCount)(c) +} + +func (c HscanCursor) Build() Completed { + return Completed(c) +} + +type HscanKey Completed + +func (c HscanKey) Cursor(cursor int64) HscanCursor { + c.cs.s = append(c.cs.s, strconv.FormatInt(cursor, 10)) + return (HscanCursor)(c) +} + +type HscanMatch Completed + +func (c HscanMatch) Count(count int64) HscanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (HscanCount)(c) +} + +func (c HscanMatch) Build() Completed { + return Completed(c) +} + +type Hset Completed + +func (b Builder) Hset() (c Hset) { + c = Hset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "HSET") + return c +} + +func (c Hset) Key(key string) HsetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HsetKey)(c) +} + +type HsetFieldValue Completed + +func (c HsetFieldValue) FieldValue(field string, value string) HsetFieldValue { + c.cs.s = append(c.cs.s, field, value) + return c +} + +func (c HsetFieldValue) Build() Completed { + return Completed(c) +} + +type HsetKey Completed + +func (c HsetKey) FieldValue() HsetFieldValue { + return (HsetFieldValue)(c) +} + +type Hsetnx Completed + +func (b Builder) Hsetnx() (c Hsetnx) { + c = Hsetnx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "HSETNX") + return c +} + +func (c Hsetnx) Key(key string) HsetnxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HsetnxKey)(c) +} + +type HsetnxField Completed + +func (c HsetnxField) Value(value string) HsetnxValue { + c.cs.s = append(c.cs.s, value) + return (HsetnxValue)(c) +} + +type HsetnxKey Completed + +func (c HsetnxKey) Field(field string) HsetnxField { + c.cs.s = append(c.cs.s, field) + return (HsetnxField)(c) +} + +type HsetnxValue Completed + +func (c HsetnxValue) Build() Completed { + return Completed(c) +} + +type Hstrlen Completed + +func (b Builder) Hstrlen() (c Hstrlen) { + c = Hstrlen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HSTRLEN") + return c +} + +func (c Hstrlen) Key(key string) HstrlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HstrlenKey)(c) +} + +type HstrlenField Completed + +func (c HstrlenField) Build() Completed { + return Completed(c) +} + +func (c HstrlenField) Cache() Cacheable { + return Cacheable(c) +} + +type HstrlenKey Completed + +func (c HstrlenKey) Field(field string) HstrlenField { + c.cs.s = append(c.cs.s, field) + return (HstrlenField)(c) +} + +type Hvals Completed + +func (b Builder) Hvals() (c Hvals) { + c = Hvals{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "HVALS") + return c +} + +func (c Hvals) Key(key string) HvalsKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (HvalsKey)(c) +} + +type HvalsKey Completed + +func (c HvalsKey) Build() Completed { + return Completed(c) +} + +func (c HvalsKey) Cache() Cacheable { + return Cacheable(c) +} + +type Incr Completed + +func (b Builder) Incr() (c Incr) { + c = Incr{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "INCR") + return c +} + +func (c Incr) Key(key string) IncrKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (IncrKey)(c) +} + +type IncrKey Completed + +func (c IncrKey) Build() Completed { + return Completed(c) +} + +type Incrby Completed + +func (b Builder) Incrby() (c Incrby) { + c = Incrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "INCRBY") + return c +} + +func (c Incrby) Key(key string) IncrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (IncrbyKey)(c) +} + +type IncrbyIncrement Completed + +func (c IncrbyIncrement) Build() Completed { + return Completed(c) +} + +type IncrbyKey Completed + +func (c IncrbyKey) Increment(increment int64) IncrbyIncrement { + c.cs.s = append(c.cs.s, strconv.FormatInt(increment, 10)) + return (IncrbyIncrement)(c) +} + +type Incrbyfloat Completed + +func (b Builder) Incrbyfloat() (c Incrbyfloat) { + c = Incrbyfloat{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "INCRBYFLOAT") + return c +} + +func (c Incrbyfloat) Key(key string) IncrbyfloatKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (IncrbyfloatKey)(c) +} + +type IncrbyfloatIncrement Completed + +func (c IncrbyfloatIncrement) Build() Completed { + return Completed(c) +} + +type IncrbyfloatKey Completed + +func (c IncrbyfloatKey) Increment(increment float64) IncrbyfloatIncrement { + c.cs.s = append(c.cs.s, strconv.FormatFloat(increment, 'f', -1, 64)) + return (IncrbyfloatIncrement)(c) +} + +type Info Completed + +func (b Builder) Info() (c Info) { + c = Info{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "INFO") + return c +} + +func (c Info) Section(section ...string) InfoSection { + c.cs.s = append(c.cs.s, section...) + return (InfoSection)(c) +} + +func (c Info) Build() Completed { + return Completed(c) +} + +type InfoSection Completed + +func (c InfoSection) Section(section ...string) InfoSection { + c.cs.s = append(c.cs.s, section...) + return c +} + +func (c InfoSection) Build() Completed { + return Completed(c) +} + +type JsonArrappend Completed + +func (b Builder) JsonArrappend() (c JsonArrappend) { + c = JsonArrappend{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.ARRAPPEND") + return c +} + +func (c JsonArrappend) Key(key string) JsonArrappendKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonArrappendKey)(c) +} + +type JsonArrappendKey Completed + +func (c JsonArrappendKey) Path(path string) JsonArrappendPath { + c.cs.s = append(c.cs.s, path) + return (JsonArrappendPath)(c) +} + +func (c JsonArrappendKey) Value(value ...string) JsonArrappendValue { + c.cs.s = append(c.cs.s, value...) + return (JsonArrappendValue)(c) +} + +type JsonArrappendPath Completed + +func (c JsonArrappendPath) Value(value ...string) JsonArrappendValue { + c.cs.s = append(c.cs.s, value...) + return (JsonArrappendValue)(c) +} + +type JsonArrappendValue Completed + +func (c JsonArrappendValue) Value(value ...string) JsonArrappendValue { + c.cs.s = append(c.cs.s, value...) + return c +} + +func (c JsonArrappendValue) Build() Completed { + return Completed(c) +} + +type JsonArrindex Completed + +func (b Builder) JsonArrindex() (c JsonArrindex) { + c = JsonArrindex{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.ARRINDEX") + return c +} + +func (c JsonArrindex) Key(key string) JsonArrindexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonArrindexKey)(c) +} + +type JsonArrindexKey Completed + +func (c JsonArrindexKey) Path(path string) JsonArrindexPath { + c.cs.s = append(c.cs.s, path) + return (JsonArrindexPath)(c) +} + +type JsonArrindexPath Completed + +func (c JsonArrindexPath) Value(value string) JsonArrindexValue { + c.cs.s = append(c.cs.s, value) + return (JsonArrindexValue)(c) +} + +type JsonArrindexStartStart Completed + +func (c JsonArrindexStartStart) Stop(stop int64) JsonArrindexStartStop { + c.cs.s = append(c.cs.s, strconv.FormatInt(stop, 10)) + return (JsonArrindexStartStop)(c) +} + +func (c JsonArrindexStartStart) Build() Completed { + return Completed(c) +} + +func (c JsonArrindexStartStart) Cache() Cacheable { + return Cacheable(c) +} + +type JsonArrindexStartStop Completed + +func (c JsonArrindexStartStop) Build() Completed { + return Completed(c) +} + +func (c JsonArrindexStartStop) Cache() Cacheable { + return Cacheable(c) +} + +type JsonArrindexValue Completed + +func (c JsonArrindexValue) Start(start int64) JsonArrindexStartStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (JsonArrindexStartStart)(c) +} + +func (c JsonArrindexValue) Build() Completed { + return Completed(c) +} + +func (c JsonArrindexValue) Cache() Cacheable { + return Cacheable(c) +} + +type JsonArrinsert Completed + +func (b Builder) JsonArrinsert() (c JsonArrinsert) { + c = JsonArrinsert{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.ARRINSERT") + return c +} + +func (c JsonArrinsert) Key(key string) JsonArrinsertKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonArrinsertKey)(c) +} + +type JsonArrinsertIndex Completed + +func (c JsonArrinsertIndex) Value(value ...string) JsonArrinsertValue { + c.cs.s = append(c.cs.s, value...) + return (JsonArrinsertValue)(c) +} + +type JsonArrinsertKey Completed + +func (c JsonArrinsertKey) Path(path string) JsonArrinsertPath { + c.cs.s = append(c.cs.s, path) + return (JsonArrinsertPath)(c) +} + +type JsonArrinsertPath Completed + +func (c JsonArrinsertPath) Index(index int64) JsonArrinsertIndex { + c.cs.s = append(c.cs.s, strconv.FormatInt(index, 10)) + return (JsonArrinsertIndex)(c) +} + +type JsonArrinsertValue Completed + +func (c JsonArrinsertValue) Value(value ...string) JsonArrinsertValue { + c.cs.s = append(c.cs.s, value...) + return c +} + +func (c JsonArrinsertValue) Build() Completed { + return Completed(c) +} + +type JsonArrlen Completed + +func (b Builder) JsonArrlen() (c JsonArrlen) { + c = JsonArrlen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.ARRLEN") + return c +} + +func (c JsonArrlen) Key(key string) JsonArrlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonArrlenKey)(c) +} + +type JsonArrlenKey Completed + +func (c JsonArrlenKey) Path(path string) JsonArrlenPath { + c.cs.s = append(c.cs.s, path) + return (JsonArrlenPath)(c) +} + +func (c JsonArrlenKey) Build() Completed { + return Completed(c) +} + +func (c JsonArrlenKey) Cache() Cacheable { + return Cacheable(c) +} + +type JsonArrlenPath Completed + +func (c JsonArrlenPath) Build() Completed { + return Completed(c) +} + +func (c JsonArrlenPath) Cache() Cacheable { + return Cacheable(c) +} + +type JsonArrpop Completed + +func (b Builder) JsonArrpop() (c JsonArrpop) { + c = JsonArrpop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.ARRPOP") + return c +} + +func (c JsonArrpop) Key(key string) JsonArrpopKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonArrpopKey)(c) +} + +type JsonArrpopKey Completed + +func (c JsonArrpopKey) Path(path string) JsonArrpopPathPath { + c.cs.s = append(c.cs.s, path) + return (JsonArrpopPathPath)(c) +} + +func (c JsonArrpopKey) Build() Completed { + return Completed(c) +} + +type JsonArrpopPathIndex Completed + +func (c JsonArrpopPathIndex) Build() Completed { + return Completed(c) +} + +type JsonArrpopPathPath Completed + +func (c JsonArrpopPathPath) Index(index int64) JsonArrpopPathIndex { + c.cs.s = append(c.cs.s, strconv.FormatInt(index, 10)) + return (JsonArrpopPathIndex)(c) +} + +func (c JsonArrpopPathPath) Build() Completed { + return Completed(c) +} + +type JsonArrtrim Completed + +func (b Builder) JsonArrtrim() (c JsonArrtrim) { + c = JsonArrtrim{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.ARRTRIM") + return c +} + +func (c JsonArrtrim) Key(key string) JsonArrtrimKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonArrtrimKey)(c) +} + +type JsonArrtrimKey Completed + +func (c JsonArrtrimKey) Path(path string) JsonArrtrimPath { + c.cs.s = append(c.cs.s, path) + return (JsonArrtrimPath)(c) +} + +type JsonArrtrimPath Completed + +func (c JsonArrtrimPath) Start(start int64) JsonArrtrimStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (JsonArrtrimStart)(c) +} + +type JsonArrtrimStart Completed + +func (c JsonArrtrimStart) Stop(stop int64) JsonArrtrimStop { + c.cs.s = append(c.cs.s, strconv.FormatInt(stop, 10)) + return (JsonArrtrimStop)(c) +} + +type JsonArrtrimStop Completed + +func (c JsonArrtrimStop) Build() Completed { + return Completed(c) +} + +type JsonClear Completed + +func (b Builder) JsonClear() (c JsonClear) { + c = JsonClear{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.CLEAR") + return c +} + +func (c JsonClear) Key(key string) JsonClearKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonClearKey)(c) +} + +type JsonClearKey Completed + +func (c JsonClearKey) Path(path string) JsonClearPath { + c.cs.s = append(c.cs.s, path) + return (JsonClearPath)(c) +} + +func (c JsonClearKey) Build() Completed { + return Completed(c) +} + +type JsonClearPath Completed + +func (c JsonClearPath) Build() Completed { + return Completed(c) +} + +type JsonDebugHelp Completed + +func (b Builder) JsonDebugHelp() (c JsonDebugHelp) { + c = JsonDebugHelp{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.DEBUG", "HELP") + return c +} + +func (c JsonDebugHelp) Build() Completed { + return Completed(c) +} + +type JsonDebugMemory Completed + +func (b Builder) JsonDebugMemory() (c JsonDebugMemory) { + c = JsonDebugMemory{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.DEBUG", "MEMORY") + return c +} + +func (c JsonDebugMemory) Key(key string) JsonDebugMemoryKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonDebugMemoryKey)(c) +} + +type JsonDebugMemoryKey Completed + +func (c JsonDebugMemoryKey) Path(path string) JsonDebugMemoryPath { + c.cs.s = append(c.cs.s, path) + return (JsonDebugMemoryPath)(c) +} + +func (c JsonDebugMemoryKey) Build() Completed { + return Completed(c) +} + +type JsonDebugMemoryPath Completed + +func (c JsonDebugMemoryPath) Build() Completed { + return Completed(c) +} + +type JsonDel Completed + +func (b Builder) JsonDel() (c JsonDel) { + c = JsonDel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.DEL") + return c +} + +func (c JsonDel) Key(key string) JsonDelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonDelKey)(c) +} + +type JsonDelKey Completed + +func (c JsonDelKey) Path(path string) JsonDelPath { + c.cs.s = append(c.cs.s, path) + return (JsonDelPath)(c) +} + +func (c JsonDelKey) Build() Completed { + return Completed(c) +} + +type JsonDelPath Completed + +func (c JsonDelPath) Build() Completed { + return Completed(c) +} + +type JsonForget Completed + +func (b Builder) JsonForget() (c JsonForget) { + c = JsonForget{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.FORGET") + return c +} + +func (c JsonForget) Key(key string) JsonForgetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonForgetKey)(c) +} + +type JsonForgetKey Completed + +func (c JsonForgetKey) Path(path string) JsonForgetPath { + c.cs.s = append(c.cs.s, path) + return (JsonForgetPath)(c) +} + +func (c JsonForgetKey) Build() Completed { + return Completed(c) +} + +type JsonForgetPath Completed + +func (c JsonForgetPath) Build() Completed { + return Completed(c) +} + +type JsonGet Completed + +func (b Builder) JsonGet() (c JsonGet) { + c = JsonGet{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.GET") + return c +} + +func (c JsonGet) Key(key string) JsonGetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonGetKey)(c) +} + +type JsonGetIndent Completed + +func (c JsonGetIndent) Newline(newline string) JsonGetNewline { + c.cs.s = append(c.cs.s, "NEWLINE", newline) + return (JsonGetNewline)(c) +} + +func (c JsonGetIndent) Space(space string) JsonGetSpace { + c.cs.s = append(c.cs.s, "SPACE", space) + return (JsonGetSpace)(c) +} + +func (c JsonGetIndent) Paths(paths ...string) JsonGetPaths { + c.cs.s = append(c.cs.s, paths...) + return (JsonGetPaths)(c) +} + +func (c JsonGetIndent) Build() Completed { + return Completed(c) +} + +func (c JsonGetIndent) Cache() Cacheable { + return Cacheable(c) +} + +type JsonGetKey Completed + +func (c JsonGetKey) Indent(indent string) JsonGetIndent { + c.cs.s = append(c.cs.s, "INDENT", indent) + return (JsonGetIndent)(c) +} + +func (c JsonGetKey) Newline(newline string) JsonGetNewline { + c.cs.s = append(c.cs.s, "NEWLINE", newline) + return (JsonGetNewline)(c) +} + +func (c JsonGetKey) Space(space string) JsonGetSpace { + c.cs.s = append(c.cs.s, "SPACE", space) + return (JsonGetSpace)(c) +} + +func (c JsonGetKey) Paths(paths ...string) JsonGetPaths { + c.cs.s = append(c.cs.s, paths...) + return (JsonGetPaths)(c) +} + +func (c JsonGetKey) Build() Completed { + return Completed(c) +} + +func (c JsonGetKey) Cache() Cacheable { + return Cacheable(c) +} + +type JsonGetNewline Completed + +func (c JsonGetNewline) Space(space string) JsonGetSpace { + c.cs.s = append(c.cs.s, "SPACE", space) + return (JsonGetSpace)(c) +} + +func (c JsonGetNewline) Paths(paths ...string) JsonGetPaths { + c.cs.s = append(c.cs.s, paths...) + return (JsonGetPaths)(c) +} + +func (c JsonGetNewline) Build() Completed { + return Completed(c) +} + +func (c JsonGetNewline) Cache() Cacheable { + return Cacheable(c) +} + +type JsonGetPaths Completed + +func (c JsonGetPaths) Paths(paths ...string) JsonGetPaths { + c.cs.s = append(c.cs.s, paths...) + return c +} + +func (c JsonGetPaths) Build() Completed { + return Completed(c) +} + +func (c JsonGetPaths) Cache() Cacheable { + return Cacheable(c) +} + +type JsonGetSpace Completed + +func (c JsonGetSpace) Paths(paths ...string) JsonGetPaths { + c.cs.s = append(c.cs.s, paths...) + return (JsonGetPaths)(c) +} + +func (c JsonGetSpace) Build() Completed { + return Completed(c) +} + +func (c JsonGetSpace) Cache() Cacheable { + return Cacheable(c) +} + +type JsonMget Completed + +func (b Builder) JsonMget() (c JsonMget) { + c = JsonMget{cs: get(), ks: b.ks, cf: mtGetTag} + c.cs.s = append(c.cs.s, "JSON.MGET") + return c +} + +func (c JsonMget) Key(key ...string) JsonMgetKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (JsonMgetKey)(c) +} + +type JsonMgetKey Completed + +func (c JsonMgetKey) Key(key ...string) JsonMgetKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c JsonMgetKey) Path(path string) JsonMgetPath { + c.cs.s = append(c.cs.s, path) + return (JsonMgetPath)(c) +} + +type JsonMgetPath Completed + +func (c JsonMgetPath) Build() Completed { + return Completed(c) +} + +func (c JsonMgetPath) Cache() Cacheable { + return Cacheable(c) +} + +type JsonNumincrby Completed + +func (b Builder) JsonNumincrby() (c JsonNumincrby) { + c = JsonNumincrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.NUMINCRBY") + return c +} + +func (c JsonNumincrby) Key(key string) JsonNumincrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonNumincrbyKey)(c) +} + +type JsonNumincrbyKey Completed + +func (c JsonNumincrbyKey) Path(path string) JsonNumincrbyPath { + c.cs.s = append(c.cs.s, path) + return (JsonNumincrbyPath)(c) +} + +type JsonNumincrbyPath Completed + +func (c JsonNumincrbyPath) Value(value float64) JsonNumincrbyValue { + c.cs.s = append(c.cs.s, strconv.FormatFloat(value, 'f', -1, 64)) + return (JsonNumincrbyValue)(c) +} + +type JsonNumincrbyValue Completed + +func (c JsonNumincrbyValue) Build() Completed { + return Completed(c) +} + +type JsonNummultby Completed + +func (b Builder) JsonNummultby() (c JsonNummultby) { + c = JsonNummultby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.NUMMULTBY") + return c +} + +func (c JsonNummultby) Key(key string) JsonNummultbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonNummultbyKey)(c) +} + +type JsonNummultbyKey Completed + +func (c JsonNummultbyKey) Path(path string) JsonNummultbyPath { + c.cs.s = append(c.cs.s, path) + return (JsonNummultbyPath)(c) +} + +type JsonNummultbyPath Completed + +func (c JsonNummultbyPath) Value(value float64) JsonNummultbyValue { + c.cs.s = append(c.cs.s, strconv.FormatFloat(value, 'f', -1, 64)) + return (JsonNummultbyValue)(c) +} + +type JsonNummultbyValue Completed + +func (c JsonNummultbyValue) Build() Completed { + return Completed(c) +} + +type JsonObjkeys Completed + +func (b Builder) JsonObjkeys() (c JsonObjkeys) { + c = JsonObjkeys{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.OBJKEYS") + return c +} + +func (c JsonObjkeys) Key(key string) JsonObjkeysKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonObjkeysKey)(c) +} + +type JsonObjkeysKey Completed + +func (c JsonObjkeysKey) Path(path string) JsonObjkeysPath { + c.cs.s = append(c.cs.s, path) + return (JsonObjkeysPath)(c) +} + +func (c JsonObjkeysKey) Build() Completed { + return Completed(c) +} + +func (c JsonObjkeysKey) Cache() Cacheable { + return Cacheable(c) +} + +type JsonObjkeysPath Completed + +func (c JsonObjkeysPath) Build() Completed { + return Completed(c) +} + +func (c JsonObjkeysPath) Cache() Cacheable { + return Cacheable(c) +} + +type JsonObjlen Completed + +func (b Builder) JsonObjlen() (c JsonObjlen) { + c = JsonObjlen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.OBJLEN") + return c +} + +func (c JsonObjlen) Key(key string) JsonObjlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonObjlenKey)(c) +} + +type JsonObjlenKey Completed + +func (c JsonObjlenKey) Path(path string) JsonObjlenPath { + c.cs.s = append(c.cs.s, path) + return (JsonObjlenPath)(c) +} + +func (c JsonObjlenKey) Build() Completed { + return Completed(c) +} + +func (c JsonObjlenKey) Cache() Cacheable { + return Cacheable(c) +} + +type JsonObjlenPath Completed + +func (c JsonObjlenPath) Build() Completed { + return Completed(c) +} + +func (c JsonObjlenPath) Cache() Cacheable { + return Cacheable(c) +} + +type JsonResp Completed + +func (b Builder) JsonResp() (c JsonResp) { + c = JsonResp{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.RESP") + return c +} + +func (c JsonResp) Key(key string) JsonRespKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonRespKey)(c) +} + +type JsonRespKey Completed + +func (c JsonRespKey) Path(path string) JsonRespPath { + c.cs.s = append(c.cs.s, path) + return (JsonRespPath)(c) +} + +func (c JsonRespKey) Build() Completed { + return Completed(c) +} + +func (c JsonRespKey) Cache() Cacheable { + return Cacheable(c) +} + +type JsonRespPath Completed + +func (c JsonRespPath) Build() Completed { + return Completed(c) +} + +func (c JsonRespPath) Cache() Cacheable { + return Cacheable(c) +} + +type JsonSet Completed + +func (b Builder) JsonSet() (c JsonSet) { + c = JsonSet{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.SET") + return c +} + +func (c JsonSet) Key(key string) JsonSetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonSetKey)(c) +} + +type JsonSetConditionNx Completed + +func (c JsonSetConditionNx) Build() Completed { + return Completed(c) +} + +type JsonSetConditionXx Completed + +func (c JsonSetConditionXx) Build() Completed { + return Completed(c) +} + +type JsonSetKey Completed + +func (c JsonSetKey) Path(path string) JsonSetPath { + c.cs.s = append(c.cs.s, path) + return (JsonSetPath)(c) +} + +type JsonSetPath Completed + +func (c JsonSetPath) Value(value string) JsonSetValue { + c.cs.s = append(c.cs.s, value) + return (JsonSetValue)(c) +} + +type JsonSetValue Completed + +func (c JsonSetValue) Nx() JsonSetConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (JsonSetConditionNx)(c) +} + +func (c JsonSetValue) Xx() JsonSetConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (JsonSetConditionXx)(c) +} + +func (c JsonSetValue) Build() Completed { + return Completed(c) +} + +type JsonStrappend Completed + +func (b Builder) JsonStrappend() (c JsonStrappend) { + c = JsonStrappend{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.STRAPPEND") + return c +} + +func (c JsonStrappend) Key(key string) JsonStrappendKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonStrappendKey)(c) +} + +type JsonStrappendKey Completed + +func (c JsonStrappendKey) Path(path string) JsonStrappendPath { + c.cs.s = append(c.cs.s, path) + return (JsonStrappendPath)(c) +} + +func (c JsonStrappendKey) Value(value string) JsonStrappendValue { + c.cs.s = append(c.cs.s, value) + return (JsonStrappendValue)(c) +} + +type JsonStrappendPath Completed + +func (c JsonStrappendPath) Value(value string) JsonStrappendValue { + c.cs.s = append(c.cs.s, value) + return (JsonStrappendValue)(c) +} + +type JsonStrappendValue Completed + +func (c JsonStrappendValue) Build() Completed { + return Completed(c) +} + +type JsonStrlen Completed + +func (b Builder) JsonStrlen() (c JsonStrlen) { + c = JsonStrlen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.STRLEN") + return c +} + +func (c JsonStrlen) Key(key string) JsonStrlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonStrlenKey)(c) +} + +type JsonStrlenKey Completed + +func (c JsonStrlenKey) Path(path string) JsonStrlenPath { + c.cs.s = append(c.cs.s, path) + return (JsonStrlenPath)(c) +} + +func (c JsonStrlenKey) Build() Completed { + return Completed(c) +} + +func (c JsonStrlenKey) Cache() Cacheable { + return Cacheable(c) +} + +type JsonStrlenPath Completed + +func (c JsonStrlenPath) Build() Completed { + return Completed(c) +} + +func (c JsonStrlenPath) Cache() Cacheable { + return Cacheable(c) +} + +type JsonToggle Completed + +func (b Builder) JsonToggle() (c JsonToggle) { + c = JsonToggle{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "JSON.TOGGLE") + return c +} + +func (c JsonToggle) Key(key string) JsonToggleKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonToggleKey)(c) +} + +type JsonToggleKey Completed + +func (c JsonToggleKey) Path(path string) JsonTogglePath { + c.cs.s = append(c.cs.s, path) + return (JsonTogglePath)(c) +} + +type JsonTogglePath Completed + +func (c JsonTogglePath) Build() Completed { + return Completed(c) +} + +type JsonType Completed + +func (b Builder) JsonType() (c JsonType) { + c = JsonType{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "JSON.TYPE") + return c +} + +func (c JsonType) Key(key string) JsonTypeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (JsonTypeKey)(c) +} + +type JsonTypeKey Completed + +func (c JsonTypeKey) Path(path string) JsonTypePath { + c.cs.s = append(c.cs.s, path) + return (JsonTypePath)(c) +} + +func (c JsonTypeKey) Build() Completed { + return Completed(c) +} + +func (c JsonTypeKey) Cache() Cacheable { + return Cacheable(c) +} + +type JsonTypePath Completed + +func (c JsonTypePath) Build() Completed { + return Completed(c) +} + +func (c JsonTypePath) Cache() Cacheable { + return Cacheable(c) +} + +type Keys Completed + +func (b Builder) Keys() (c Keys) { + c = Keys{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "KEYS") + return c +} + +func (c Keys) Pattern(pattern string) KeysPattern { + c.cs.s = append(c.cs.s, pattern) + return (KeysPattern)(c) +} + +type KeysPattern Completed + +func (c KeysPattern) Build() Completed { + return Completed(c) +} + +type Lastsave Completed + +func (b Builder) Lastsave() (c Lastsave) { + c = Lastsave{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LASTSAVE") + return c +} + +func (c Lastsave) Build() Completed { + return Completed(c) +} + +type LatencyDoctor Completed + +func (b Builder) LatencyDoctor() (c LatencyDoctor) { + c = LatencyDoctor{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LATENCY", "DOCTOR") + return c +} + +func (c LatencyDoctor) Build() Completed { + return Completed(c) +} + +type LatencyGraph Completed + +func (b Builder) LatencyGraph() (c LatencyGraph) { + c = LatencyGraph{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LATENCY", "GRAPH") + return c +} + +func (c LatencyGraph) Event(event string) LatencyGraphEvent { + c.cs.s = append(c.cs.s, event) + return (LatencyGraphEvent)(c) +} + +type LatencyGraphEvent Completed + +func (c LatencyGraphEvent) Build() Completed { + return Completed(c) +} + +type LatencyHelp Completed + +func (b Builder) LatencyHelp() (c LatencyHelp) { + c = LatencyHelp{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LATENCY", "HELP") + return c +} + +func (c LatencyHelp) Build() Completed { + return Completed(c) +} + +type LatencyHistogram Completed + +func (b Builder) LatencyHistogram() (c LatencyHistogram) { + c = LatencyHistogram{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LATENCY", "HISTOGRAM") + return c +} + +func (c LatencyHistogram) Command(command ...string) LatencyHistogramCommand { + c.cs.s = append(c.cs.s, command...) + return (LatencyHistogramCommand)(c) +} + +func (c LatencyHistogram) Build() Completed { + return Completed(c) +} + +type LatencyHistogramCommand Completed + +func (c LatencyHistogramCommand) Command(command ...string) LatencyHistogramCommand { + c.cs.s = append(c.cs.s, command...) + return c +} + +func (c LatencyHistogramCommand) Build() Completed { + return Completed(c) +} + +type LatencyHistory Completed + +func (b Builder) LatencyHistory() (c LatencyHistory) { + c = LatencyHistory{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LATENCY", "HISTORY") + return c +} + +func (c LatencyHistory) Event(event string) LatencyHistoryEvent { + c.cs.s = append(c.cs.s, event) + return (LatencyHistoryEvent)(c) +} + +type LatencyHistoryEvent Completed + +func (c LatencyHistoryEvent) Build() Completed { + return Completed(c) +} + +type LatencyLatest Completed + +func (b Builder) LatencyLatest() (c LatencyLatest) { + c = LatencyLatest{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LATENCY", "LATEST") + return c +} + +func (c LatencyLatest) Build() Completed { + return Completed(c) +} + +type LatencyReset Completed + +func (b Builder) LatencyReset() (c LatencyReset) { + c = LatencyReset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LATENCY", "RESET") + return c +} + +func (c LatencyReset) Event(event ...string) LatencyResetEvent { + c.cs.s = append(c.cs.s, event...) + return (LatencyResetEvent)(c) +} + +func (c LatencyReset) Build() Completed { + return Completed(c) +} + +type LatencyResetEvent Completed + +func (c LatencyResetEvent) Event(event ...string) LatencyResetEvent { + c.cs.s = append(c.cs.s, event...) + return c +} + +func (c LatencyResetEvent) Build() Completed { + return Completed(c) +} + +type Lcs Completed + +func (b Builder) Lcs() (c Lcs) { + c = Lcs{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "LCS") + return c +} + +func (c Lcs) Key1(key1 string) LcsKey1 { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key1) + } else { + c.ks = check(c.ks, slot(key1)) + } + c.cs.s = append(c.cs.s, key1) + return (LcsKey1)(c) +} + +type LcsIdx Completed + +func (c LcsIdx) Minmatchlen(len int64) LcsMinmatchlen { + c.cs.s = append(c.cs.s, "MINMATCHLEN", strconv.FormatInt(len, 10)) + return (LcsMinmatchlen)(c) +} + +func (c LcsIdx) Withmatchlen() LcsWithmatchlen { + c.cs.s = append(c.cs.s, "WITHMATCHLEN") + return (LcsWithmatchlen)(c) +} + +func (c LcsIdx) Build() Completed { + return Completed(c) +} + +type LcsKey1 Completed + +func (c LcsKey1) Key2(key2 string) LcsKey2 { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key2) + } else { + c.ks = check(c.ks, slot(key2)) + } + c.cs.s = append(c.cs.s, key2) + return (LcsKey2)(c) +} + +type LcsKey2 Completed + +func (c LcsKey2) Len() LcsLen { + c.cs.s = append(c.cs.s, "LEN") + return (LcsLen)(c) +} + +func (c LcsKey2) Idx() LcsIdx { + c.cs.s = append(c.cs.s, "IDX") + return (LcsIdx)(c) +} + +func (c LcsKey2) Minmatchlen(len int64) LcsMinmatchlen { + c.cs.s = append(c.cs.s, "MINMATCHLEN", strconv.FormatInt(len, 10)) + return (LcsMinmatchlen)(c) +} + +func (c LcsKey2) Withmatchlen() LcsWithmatchlen { + c.cs.s = append(c.cs.s, "WITHMATCHLEN") + return (LcsWithmatchlen)(c) +} + +func (c LcsKey2) Build() Completed { + return Completed(c) +} + +type LcsLen Completed + +func (c LcsLen) Idx() LcsIdx { + c.cs.s = append(c.cs.s, "IDX") + return (LcsIdx)(c) +} + +func (c LcsLen) Minmatchlen(len int64) LcsMinmatchlen { + c.cs.s = append(c.cs.s, "MINMATCHLEN", strconv.FormatInt(len, 10)) + return (LcsMinmatchlen)(c) +} + +func (c LcsLen) Withmatchlen() LcsWithmatchlen { + c.cs.s = append(c.cs.s, "WITHMATCHLEN") + return (LcsWithmatchlen)(c) +} + +func (c LcsLen) Build() Completed { + return Completed(c) +} + +type LcsMinmatchlen Completed + +func (c LcsMinmatchlen) Withmatchlen() LcsWithmatchlen { + c.cs.s = append(c.cs.s, "WITHMATCHLEN") + return (LcsWithmatchlen)(c) +} + +func (c LcsMinmatchlen) Build() Completed { + return Completed(c) +} + +type LcsWithmatchlen Completed + +func (c LcsWithmatchlen) Build() Completed { + return Completed(c) +} + +type Lindex Completed + +func (b Builder) Lindex() (c Lindex) { + c = Lindex{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "LINDEX") + return c +} + +func (c Lindex) Key(key string) LindexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LindexKey)(c) +} + +type LindexIndex Completed + +func (c LindexIndex) Build() Completed { + return Completed(c) +} + +func (c LindexIndex) Cache() Cacheable { + return Cacheable(c) +} + +type LindexKey Completed + +func (c LindexKey) Index(index int64) LindexIndex { + c.cs.s = append(c.cs.s, strconv.FormatInt(index, 10)) + return (LindexIndex)(c) +} + +type Linsert Completed + +func (b Builder) Linsert() (c Linsert) { + c = Linsert{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LINSERT") + return c +} + +func (c Linsert) Key(key string) LinsertKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LinsertKey)(c) +} + +type LinsertElement Completed + +func (c LinsertElement) Build() Completed { + return Completed(c) +} + +type LinsertKey Completed + +func (c LinsertKey) Before() LinsertWhereBefore { + c.cs.s = append(c.cs.s, "BEFORE") + return (LinsertWhereBefore)(c) +} + +func (c LinsertKey) After() LinsertWhereAfter { + c.cs.s = append(c.cs.s, "AFTER") + return (LinsertWhereAfter)(c) +} + +type LinsertPivot Completed + +func (c LinsertPivot) Element(element string) LinsertElement { + c.cs.s = append(c.cs.s, element) + return (LinsertElement)(c) +} + +type LinsertWhereAfter Completed + +func (c LinsertWhereAfter) Pivot(pivot string) LinsertPivot { + c.cs.s = append(c.cs.s, pivot) + return (LinsertPivot)(c) +} + +type LinsertWhereBefore Completed + +func (c LinsertWhereBefore) Pivot(pivot string) LinsertPivot { + c.cs.s = append(c.cs.s, pivot) + return (LinsertPivot)(c) +} + +type Llen Completed + +func (b Builder) Llen() (c Llen) { + c = Llen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "LLEN") + return c +} + +func (c Llen) Key(key string) LlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LlenKey)(c) +} + +type LlenKey Completed + +func (c LlenKey) Build() Completed { + return Completed(c) +} + +func (c LlenKey) Cache() Cacheable { + return Cacheable(c) +} + +type Lmove Completed + +func (b Builder) Lmove() (c Lmove) { + c = Lmove{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LMOVE") + return c +} + +func (c Lmove) Source(source string) LmoveSource { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(source) + } else { + c.ks = check(c.ks, slot(source)) + } + c.cs.s = append(c.cs.s, source) + return (LmoveSource)(c) +} + +type LmoveDestination Completed + +func (c LmoveDestination) Left() LmoveWherefromLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (LmoveWherefromLeft)(c) +} + +func (c LmoveDestination) Right() LmoveWherefromRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (LmoveWherefromRight)(c) +} + +type LmoveSource Completed + +func (c LmoveSource) Destination(destination string) LmoveDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (LmoveDestination)(c) +} + +type LmoveWherefromLeft Completed + +func (c LmoveWherefromLeft) Left() LmoveWheretoLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (LmoveWheretoLeft)(c) +} + +func (c LmoveWherefromLeft) Right() LmoveWheretoRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (LmoveWheretoRight)(c) +} + +type LmoveWherefromRight Completed + +func (c LmoveWherefromRight) Left() LmoveWheretoLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (LmoveWheretoLeft)(c) +} + +func (c LmoveWherefromRight) Right() LmoveWheretoRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (LmoveWheretoRight)(c) +} + +type LmoveWheretoLeft Completed + +func (c LmoveWheretoLeft) Build() Completed { + return Completed(c) +} + +type LmoveWheretoRight Completed + +func (c LmoveWheretoRight) Build() Completed { + return Completed(c) +} + +type Lmpop Completed + +func (b Builder) Lmpop() (c Lmpop) { + c = Lmpop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LMPOP") + return c +} + +func (c Lmpop) Numkeys(numkeys int64) LmpopNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (LmpopNumkeys)(c) +} + +type LmpopCount Completed + +func (c LmpopCount) Build() Completed { + return Completed(c) +} + +type LmpopKey Completed + +func (c LmpopKey) Key(key ...string) LmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c LmpopKey) Left() LmpopWhereLeft { + c.cs.s = append(c.cs.s, "LEFT") + return (LmpopWhereLeft)(c) +} + +func (c LmpopKey) Right() LmpopWhereRight { + c.cs.s = append(c.cs.s, "RIGHT") + return (LmpopWhereRight)(c) +} + +type LmpopNumkeys Completed + +func (c LmpopNumkeys) Key(key ...string) LmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (LmpopKey)(c) +} + +type LmpopWhereLeft Completed + +func (c LmpopWhereLeft) Count(count int64) LmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (LmpopCount)(c) +} + +func (c LmpopWhereLeft) Build() Completed { + return Completed(c) +} + +type LmpopWhereRight Completed + +func (c LmpopWhereRight) Count(count int64) LmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (LmpopCount)(c) +} + +func (c LmpopWhereRight) Build() Completed { + return Completed(c) +} + +type Lolwut Completed + +func (b Builder) Lolwut() (c Lolwut) { + c = Lolwut{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "LOLWUT") + return c +} + +func (c Lolwut) Version(version int64) LolwutVersion { + c.cs.s = append(c.cs.s, "VERSION", strconv.FormatInt(version, 10)) + return (LolwutVersion)(c) +} + +func (c Lolwut) Build() Completed { + return Completed(c) +} + +type LolwutVersion Completed + +func (c LolwutVersion) Build() Completed { + return Completed(c) +} + +type Lpop Completed + +func (b Builder) Lpop() (c Lpop) { + c = Lpop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LPOP") + return c +} + +func (c Lpop) Key(key string) LpopKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LpopKey)(c) +} + +type LpopCount Completed + +func (c LpopCount) Build() Completed { + return Completed(c) +} + +type LpopKey Completed + +func (c LpopKey) Count(count int64) LpopCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (LpopCount)(c) +} + +func (c LpopKey) Build() Completed { + return Completed(c) +} + +type Lpos Completed + +func (b Builder) Lpos() (c Lpos) { + c = Lpos{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "LPOS") + return c +} + +func (c Lpos) Key(key string) LposKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LposKey)(c) +} + +type LposCount Completed + +func (c LposCount) Maxlen(len int64) LposMaxlen { + c.cs.s = append(c.cs.s, "MAXLEN", strconv.FormatInt(len, 10)) + return (LposMaxlen)(c) +} + +func (c LposCount) Build() Completed { + return Completed(c) +} + +func (c LposCount) Cache() Cacheable { + return Cacheable(c) +} + +type LposElement Completed + +func (c LposElement) Rank(rank int64) LposRank { + c.cs.s = append(c.cs.s, "RANK", strconv.FormatInt(rank, 10)) + return (LposRank)(c) +} + +func (c LposElement) Count(numMatches int64) LposCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(numMatches, 10)) + return (LposCount)(c) +} + +func (c LposElement) Maxlen(len int64) LposMaxlen { + c.cs.s = append(c.cs.s, "MAXLEN", strconv.FormatInt(len, 10)) + return (LposMaxlen)(c) +} + +func (c LposElement) Build() Completed { + return Completed(c) +} + +func (c LposElement) Cache() Cacheable { + return Cacheable(c) +} + +type LposKey Completed + +func (c LposKey) Element(element string) LposElement { + c.cs.s = append(c.cs.s, element) + return (LposElement)(c) +} + +type LposMaxlen Completed + +func (c LposMaxlen) Build() Completed { + return Completed(c) +} + +func (c LposMaxlen) Cache() Cacheable { + return Cacheable(c) +} + +type LposRank Completed + +func (c LposRank) Count(numMatches int64) LposCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(numMatches, 10)) + return (LposCount)(c) +} + +func (c LposRank) Maxlen(len int64) LposMaxlen { + c.cs.s = append(c.cs.s, "MAXLEN", strconv.FormatInt(len, 10)) + return (LposMaxlen)(c) +} + +func (c LposRank) Build() Completed { + return Completed(c) +} + +func (c LposRank) Cache() Cacheable { + return Cacheable(c) +} + +type Lpush Completed + +func (b Builder) Lpush() (c Lpush) { + c = Lpush{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LPUSH") + return c +} + +func (c Lpush) Key(key string) LpushKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LpushKey)(c) +} + +type LpushElement Completed + +func (c LpushElement) Element(element ...string) LpushElement { + c.cs.s = append(c.cs.s, element...) + return c +} + +func (c LpushElement) Build() Completed { + return Completed(c) +} + +type LpushKey Completed + +func (c LpushKey) Element(element ...string) LpushElement { + c.cs.s = append(c.cs.s, element...) + return (LpushElement)(c) +} + +type Lpushx Completed + +func (b Builder) Lpushx() (c Lpushx) { + c = Lpushx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LPUSHX") + return c +} + +func (c Lpushx) Key(key string) LpushxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LpushxKey)(c) +} + +type LpushxElement Completed + +func (c LpushxElement) Element(element ...string) LpushxElement { + c.cs.s = append(c.cs.s, element...) + return c +} + +func (c LpushxElement) Build() Completed { + return Completed(c) +} + +type LpushxKey Completed + +func (c LpushxKey) Element(element ...string) LpushxElement { + c.cs.s = append(c.cs.s, element...) + return (LpushxElement)(c) +} + +type Lrange Completed + +func (b Builder) Lrange() (c Lrange) { + c = Lrange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "LRANGE") + return c +} + +func (c Lrange) Key(key string) LrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LrangeKey)(c) +} + +type LrangeKey Completed + +func (c LrangeKey) Start(start int64) LrangeStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (LrangeStart)(c) +} + +type LrangeStart Completed + +func (c LrangeStart) Stop(stop int64) LrangeStop { + c.cs.s = append(c.cs.s, strconv.FormatInt(stop, 10)) + return (LrangeStop)(c) +} + +type LrangeStop Completed + +func (c LrangeStop) Build() Completed { + return Completed(c) +} + +func (c LrangeStop) Cache() Cacheable { + return Cacheable(c) +} + +type Lrem Completed + +func (b Builder) Lrem() (c Lrem) { + c = Lrem{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LREM") + return c +} + +func (c Lrem) Key(key string) LremKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LremKey)(c) +} + +type LremCount Completed + +func (c LremCount) Element(element string) LremElement { + c.cs.s = append(c.cs.s, element) + return (LremElement)(c) +} + +type LremElement Completed + +func (c LremElement) Build() Completed { + return Completed(c) +} + +type LremKey Completed + +func (c LremKey) Count(count int64) LremCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (LremCount)(c) +} + +type Lset Completed + +func (b Builder) Lset() (c Lset) { + c = Lset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LSET") + return c +} + +func (c Lset) Key(key string) LsetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LsetKey)(c) +} + +type LsetElement Completed + +func (c LsetElement) Build() Completed { + return Completed(c) +} + +type LsetIndex Completed + +func (c LsetIndex) Element(element string) LsetElement { + c.cs.s = append(c.cs.s, element) + return (LsetElement)(c) +} + +type LsetKey Completed + +func (c LsetKey) Index(index int64) LsetIndex { + c.cs.s = append(c.cs.s, strconv.FormatInt(index, 10)) + return (LsetIndex)(c) +} + +type Ltrim Completed + +func (b Builder) Ltrim() (c Ltrim) { + c = Ltrim{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "LTRIM") + return c +} + +func (c Ltrim) Key(key string) LtrimKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (LtrimKey)(c) +} + +type LtrimKey Completed + +func (c LtrimKey) Start(start int64) LtrimStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (LtrimStart)(c) +} + +type LtrimStart Completed + +func (c LtrimStart) Stop(stop int64) LtrimStop { + c.cs.s = append(c.cs.s, strconv.FormatInt(stop, 10)) + return (LtrimStop)(c) +} + +type LtrimStop Completed + +func (c LtrimStop) Build() Completed { + return Completed(c) +} + +type MemoryDoctor Completed + +func (b Builder) MemoryDoctor() (c MemoryDoctor) { + c = MemoryDoctor{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "MEMORY", "DOCTOR") + return c +} + +func (c MemoryDoctor) Build() Completed { + return Completed(c) +} + +type MemoryHelp Completed + +func (b Builder) MemoryHelp() (c MemoryHelp) { + c = MemoryHelp{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MEMORY", "HELP") + return c +} + +func (c MemoryHelp) Build() Completed { + return Completed(c) +} + +type MemoryMallocStats Completed + +func (b Builder) MemoryMallocStats() (c MemoryMallocStats) { + c = MemoryMallocStats{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "MEMORY", "MALLOC-STATS") + return c +} + +func (c MemoryMallocStats) Build() Completed { + return Completed(c) +} + +type MemoryPurge Completed + +func (b Builder) MemoryPurge() (c MemoryPurge) { + c = MemoryPurge{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MEMORY", "PURGE") + return c +} + +func (c MemoryPurge) Build() Completed { + return Completed(c) +} + +type MemoryStats Completed + +func (b Builder) MemoryStats() (c MemoryStats) { + c = MemoryStats{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "MEMORY", "STATS") + return c +} + +func (c MemoryStats) Build() Completed { + return Completed(c) +} + +type MemoryUsage Completed + +func (b Builder) MemoryUsage() (c MemoryUsage) { + c = MemoryUsage{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "MEMORY", "USAGE") + return c +} + +func (c MemoryUsage) Key(key string) MemoryUsageKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (MemoryUsageKey)(c) +} + +type MemoryUsageKey Completed + +func (c MemoryUsageKey) Samples(count int64) MemoryUsageSamples { + c.cs.s = append(c.cs.s, "SAMPLES", strconv.FormatInt(count, 10)) + return (MemoryUsageSamples)(c) +} + +func (c MemoryUsageKey) Build() Completed { + return Completed(c) +} + +type MemoryUsageSamples Completed + +func (c MemoryUsageSamples) Build() Completed { + return Completed(c) +} + +type Mget Completed + +func (b Builder) Mget() (c Mget) { + c = Mget{cs: get(), ks: b.ks, cf: mtGetTag} + c.cs.s = append(c.cs.s, "MGET") + return c +} + +func (c Mget) Key(key ...string) MgetKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (MgetKey)(c) +} + +type MgetKey Completed + +func (c MgetKey) Key(key ...string) MgetKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c MgetKey) Build() Completed { + return Completed(c) +} + +func (c MgetKey) Cache() Cacheable { + return Cacheable(c) +} + +type Migrate Completed + +func (b Builder) Migrate() (c Migrate) { + c = Migrate{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "MIGRATE") + return c +} + +func (c Migrate) Host(host string) MigrateHost { + c.cs.s = append(c.cs.s, host) + return (MigrateHost)(c) +} + +type MigrateAuthAuth Completed + +func (c MigrateAuthAuth) Auth2(username string, password string) MigrateAuthAuth2 { + c.cs.s = append(c.cs.s, "AUTH2", username, password) + return (MigrateAuthAuth2)(c) +} + +func (c MigrateAuthAuth) Keys(key ...string) MigrateKeys { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, "KEYS") + c.cs.s = append(c.cs.s, key...) + return (MigrateKeys)(c) +} + +func (c MigrateAuthAuth) Build() Completed { + return Completed(c) +} + +type MigrateAuthAuth2 Completed + +func (c MigrateAuthAuth2) Keys(key ...string) MigrateKeys { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, "KEYS") + c.cs.s = append(c.cs.s, key...) + return (MigrateKeys)(c) +} + +func (c MigrateAuthAuth2) Build() Completed { + return Completed(c) +} + +type MigrateCopy Completed + +func (c MigrateCopy) Replace() MigrateReplace { + c.cs.s = append(c.cs.s, "REPLACE") + return (MigrateReplace)(c) +} + +func (c MigrateCopy) Auth(password string) MigrateAuthAuth { + c.cs.s = append(c.cs.s, "AUTH", password) + return (MigrateAuthAuth)(c) +} + +func (c MigrateCopy) Auth2(username string, password string) MigrateAuthAuth2 { + c.cs.s = append(c.cs.s, "AUTH2", username, password) + return (MigrateAuthAuth2)(c) +} + +func (c MigrateCopy) Keys(key ...string) MigrateKeys { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, "KEYS") + c.cs.s = append(c.cs.s, key...) + return (MigrateKeys)(c) +} + +func (c MigrateCopy) Build() Completed { + return Completed(c) +} + +type MigrateDestinationDb Completed + +func (c MigrateDestinationDb) Timeout(timeout int64) MigrateTimeout { + c.cs.s = append(c.cs.s, strconv.FormatInt(timeout, 10)) + return (MigrateTimeout)(c) +} + +type MigrateHost Completed + +func (c MigrateHost) Port(port int64) MigratePort { + c.cs.s = append(c.cs.s, strconv.FormatInt(port, 10)) + return (MigratePort)(c) +} + +type MigrateKey Completed + +func (c MigrateKey) DestinationDb(destinationDb int64) MigrateDestinationDb { + c.cs.s = append(c.cs.s, strconv.FormatInt(destinationDb, 10)) + return (MigrateDestinationDb)(c) +} + +type MigrateKeys Completed + +func (c MigrateKeys) Keys(key ...string) MigrateKeys { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, "KEYS") + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c MigrateKeys) Build() Completed { + return Completed(c) +} + +type MigratePort Completed + +func (c MigratePort) Key(key string) MigrateKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (MigrateKey)(c) +} + +type MigrateReplace Completed + +func (c MigrateReplace) Auth(password string) MigrateAuthAuth { + c.cs.s = append(c.cs.s, "AUTH", password) + return (MigrateAuthAuth)(c) +} + +func (c MigrateReplace) Auth2(username string, password string) MigrateAuthAuth2 { + c.cs.s = append(c.cs.s, "AUTH2", username, password) + return (MigrateAuthAuth2)(c) +} + +func (c MigrateReplace) Keys(key ...string) MigrateKeys { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, "KEYS") + c.cs.s = append(c.cs.s, key...) + return (MigrateKeys)(c) +} + +func (c MigrateReplace) Build() Completed { + return Completed(c) +} + +type MigrateTimeout Completed + +func (c MigrateTimeout) Copy() MigrateCopy { + c.cs.s = append(c.cs.s, "COPY") + return (MigrateCopy)(c) +} + +func (c MigrateTimeout) Replace() MigrateReplace { + c.cs.s = append(c.cs.s, "REPLACE") + return (MigrateReplace)(c) +} + +func (c MigrateTimeout) Auth(password string) MigrateAuthAuth { + c.cs.s = append(c.cs.s, "AUTH", password) + return (MigrateAuthAuth)(c) +} + +func (c MigrateTimeout) Auth2(username string, password string) MigrateAuthAuth2 { + c.cs.s = append(c.cs.s, "AUTH2", username, password) + return (MigrateAuthAuth2)(c) +} + +func (c MigrateTimeout) Keys(key ...string) MigrateKeys { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, "KEYS") + c.cs.s = append(c.cs.s, key...) + return (MigrateKeys)(c) +} + +func (c MigrateTimeout) Build() Completed { + return Completed(c) +} + +type ModuleList Completed + +func (b Builder) ModuleList() (c ModuleList) { + c = ModuleList{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MODULE", "LIST") + return c +} + +func (c ModuleList) Build() Completed { + return Completed(c) +} + +type ModuleLoad Completed + +func (b Builder) ModuleLoad() (c ModuleLoad) { + c = ModuleLoad{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MODULE", "LOAD") + return c +} + +func (c ModuleLoad) Path(path string) ModuleLoadPath { + c.cs.s = append(c.cs.s, path) + return (ModuleLoadPath)(c) +} + +type ModuleLoadArg Completed + +func (c ModuleLoadArg) Arg(arg ...string) ModuleLoadArg { + c.cs.s = append(c.cs.s, arg...) + return c +} + +func (c ModuleLoadArg) Build() Completed { + return Completed(c) +} + +type ModuleLoadPath Completed + +func (c ModuleLoadPath) Arg(arg ...string) ModuleLoadArg { + c.cs.s = append(c.cs.s, arg...) + return (ModuleLoadArg)(c) +} + +func (c ModuleLoadPath) Build() Completed { + return Completed(c) +} + +type ModuleLoadex Completed + +func (b Builder) ModuleLoadex() (c ModuleLoadex) { + c = ModuleLoadex{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MODULE", "LOADEX") + return c +} + +func (c ModuleLoadex) Path(path string) ModuleLoadexPath { + c.cs.s = append(c.cs.s, path) + return (ModuleLoadexPath)(c) +} + +type ModuleLoadexArgs Completed + +func (c ModuleLoadexArgs) Args(args ...string) ModuleLoadexArgs { + c.cs.s = append(c.cs.s, "ARGS") + c.cs.s = append(c.cs.s, args...) + return c +} + +func (c ModuleLoadexArgs) Build() Completed { + return Completed(c) +} + +type ModuleLoadexConfig Completed + +func (c ModuleLoadexConfig) Config(name string, value string) ModuleLoadexConfig { + c.cs.s = append(c.cs.s, "CONFIG", name, value) + return c +} + +func (c ModuleLoadexConfig) Args(args ...string) ModuleLoadexArgs { + c.cs.s = append(c.cs.s, "ARGS") + c.cs.s = append(c.cs.s, args...) + return (ModuleLoadexArgs)(c) +} + +func (c ModuleLoadexConfig) Build() Completed { + return Completed(c) +} + +type ModuleLoadexPath Completed + +func (c ModuleLoadexPath) Config() ModuleLoadexConfig { + return (ModuleLoadexConfig)(c) +} + +func (c ModuleLoadexPath) Args(args ...string) ModuleLoadexArgs { + c.cs.s = append(c.cs.s, "ARGS") + c.cs.s = append(c.cs.s, args...) + return (ModuleLoadexArgs)(c) +} + +func (c ModuleLoadexPath) Build() Completed { + return Completed(c) +} + +type ModuleUnload Completed + +func (b Builder) ModuleUnload() (c ModuleUnload) { + c = ModuleUnload{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MODULE", "UNLOAD") + return c +} + +func (c ModuleUnload) Name(name string) ModuleUnloadName { + c.cs.s = append(c.cs.s, name) + return (ModuleUnloadName)(c) +} + +type ModuleUnloadName Completed + +func (c ModuleUnloadName) Build() Completed { + return Completed(c) +} + +type Monitor Completed + +func (b Builder) Monitor() (c Monitor) { + c = Monitor{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MONITOR") + return c +} + +func (c Monitor) Build() Completed { + return Completed(c) +} + +type Move Completed + +func (b Builder) Move() (c Move) { + c = Move{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MOVE") + return c +} + +func (c Move) Key(key string) MoveKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (MoveKey)(c) +} + +type MoveDb Completed + +func (c MoveDb) Build() Completed { + return Completed(c) +} + +type MoveKey Completed + +func (c MoveKey) Db(db int64) MoveDb { + c.cs.s = append(c.cs.s, strconv.FormatInt(db, 10)) + return (MoveDb)(c) +} + +type Mset Completed + +func (b Builder) Mset() (c Mset) { + c = Mset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MSET") + return c +} + +func (c Mset) KeyValue() MsetKeyValue { + return (MsetKeyValue)(c) +} + +type MsetKeyValue Completed + +func (c MsetKeyValue) KeyValue(key string, value string) MsetKeyValue { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key, value) + return c +} + +func (c MsetKeyValue) Build() Completed { + return Completed(c) +} + +type Msetnx Completed + +func (b Builder) Msetnx() (c Msetnx) { + c = Msetnx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MSETNX") + return c +} + +func (c Msetnx) KeyValue() MsetnxKeyValue { + return (MsetnxKeyValue)(c) +} + +type MsetnxKeyValue Completed + +func (c MsetnxKeyValue) KeyValue(key string, value string) MsetnxKeyValue { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key, value) + return c +} + +func (c MsetnxKeyValue) Build() Completed { + return Completed(c) +} + +type Multi Completed + +func (b Builder) Multi() (c Multi) { + c = Multi{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "MULTI") + return c +} + +func (c Multi) Build() Completed { + return Completed(c) +} + +type ObjectEncoding Completed + +func (b Builder) ObjectEncoding() (c ObjectEncoding) { + c = ObjectEncoding{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "OBJECT", "ENCODING") + return c +} + +func (c ObjectEncoding) Key(key string) ObjectEncodingKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ObjectEncodingKey)(c) +} + +type ObjectEncodingKey Completed + +func (c ObjectEncodingKey) Build() Completed { + return Completed(c) +} + +type ObjectFreq Completed + +func (b Builder) ObjectFreq() (c ObjectFreq) { + c = ObjectFreq{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "OBJECT", "FREQ") + return c +} + +func (c ObjectFreq) Key(key string) ObjectFreqKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ObjectFreqKey)(c) +} + +type ObjectFreqKey Completed + +func (c ObjectFreqKey) Build() Completed { + return Completed(c) +} + +type ObjectHelp Completed + +func (b Builder) ObjectHelp() (c ObjectHelp) { + c = ObjectHelp{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "OBJECT", "HELP") + return c +} + +func (c ObjectHelp) Build() Completed { + return Completed(c) +} + +type ObjectIdletime Completed + +func (b Builder) ObjectIdletime() (c ObjectIdletime) { + c = ObjectIdletime{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "OBJECT", "IDLETIME") + return c +} + +func (c ObjectIdletime) Key(key string) ObjectIdletimeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ObjectIdletimeKey)(c) +} + +type ObjectIdletimeKey Completed + +func (c ObjectIdletimeKey) Build() Completed { + return Completed(c) +} + +type ObjectRefcount Completed + +func (b Builder) ObjectRefcount() (c ObjectRefcount) { + c = ObjectRefcount{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "OBJECT", "REFCOUNT") + return c +} + +func (c ObjectRefcount) Key(key string) ObjectRefcountKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ObjectRefcountKey)(c) +} + +type ObjectRefcountKey Completed + +func (c ObjectRefcountKey) Build() Completed { + return Completed(c) +} + +type Persist Completed + +func (b Builder) Persist() (c Persist) { + c = Persist{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PERSIST") + return c +} + +func (c Persist) Key(key string) PersistKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (PersistKey)(c) +} + +type PersistKey Completed + +func (c PersistKey) Build() Completed { + return Completed(c) +} + +type Pexpire Completed + +func (b Builder) Pexpire() (c Pexpire) { + c = Pexpire{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PEXPIRE") + return c +} + +func (c Pexpire) Key(key string) PexpireKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (PexpireKey)(c) +} + +type PexpireConditionGt Completed + +func (c PexpireConditionGt) Build() Completed { + return Completed(c) +} + +type PexpireConditionLt Completed + +func (c PexpireConditionLt) Build() Completed { + return Completed(c) +} + +type PexpireConditionNx Completed + +func (c PexpireConditionNx) Build() Completed { + return Completed(c) +} + +type PexpireConditionXx Completed + +func (c PexpireConditionXx) Build() Completed { + return Completed(c) +} + +type PexpireKey Completed + +func (c PexpireKey) Milliseconds(milliseconds int64) PexpireMilliseconds { + c.cs.s = append(c.cs.s, strconv.FormatInt(milliseconds, 10)) + return (PexpireMilliseconds)(c) +} + +type PexpireMilliseconds Completed + +func (c PexpireMilliseconds) Nx() PexpireConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (PexpireConditionNx)(c) +} + +func (c PexpireMilliseconds) Xx() PexpireConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (PexpireConditionXx)(c) +} + +func (c PexpireMilliseconds) Gt() PexpireConditionGt { + c.cs.s = append(c.cs.s, "GT") + return (PexpireConditionGt)(c) +} + +func (c PexpireMilliseconds) Lt() PexpireConditionLt { + c.cs.s = append(c.cs.s, "LT") + return (PexpireConditionLt)(c) +} + +func (c PexpireMilliseconds) Build() Completed { + return Completed(c) +} + +type Pexpireat Completed + +func (b Builder) Pexpireat() (c Pexpireat) { + c = Pexpireat{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PEXPIREAT") + return c +} + +func (c Pexpireat) Key(key string) PexpireatKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (PexpireatKey)(c) +} + +type PexpireatConditionGt Completed + +func (c PexpireatConditionGt) Build() Completed { + return Completed(c) +} + +type PexpireatConditionLt Completed + +func (c PexpireatConditionLt) Build() Completed { + return Completed(c) +} + +type PexpireatConditionNx Completed + +func (c PexpireatConditionNx) Build() Completed { + return Completed(c) +} + +type PexpireatConditionXx Completed + +func (c PexpireatConditionXx) Build() Completed { + return Completed(c) +} + +type PexpireatKey Completed + +func (c PexpireatKey) MillisecondsTimestamp(millisecondsTimestamp int64) PexpireatMillisecondsTimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(millisecondsTimestamp, 10)) + return (PexpireatMillisecondsTimestamp)(c) +} + +type PexpireatMillisecondsTimestamp Completed + +func (c PexpireatMillisecondsTimestamp) Nx() PexpireatConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (PexpireatConditionNx)(c) +} + +func (c PexpireatMillisecondsTimestamp) Xx() PexpireatConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (PexpireatConditionXx)(c) +} + +func (c PexpireatMillisecondsTimestamp) Gt() PexpireatConditionGt { + c.cs.s = append(c.cs.s, "GT") + return (PexpireatConditionGt)(c) +} + +func (c PexpireatMillisecondsTimestamp) Lt() PexpireatConditionLt { + c.cs.s = append(c.cs.s, "LT") + return (PexpireatConditionLt)(c) +} + +func (c PexpireatMillisecondsTimestamp) Build() Completed { + return Completed(c) +} + +type Pexpiretime Completed + +func (b Builder) Pexpiretime() (c Pexpiretime) { + c = Pexpiretime{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "PEXPIRETIME") + return c +} + +func (c Pexpiretime) Key(key string) PexpiretimeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (PexpiretimeKey)(c) +} + +type PexpiretimeKey Completed + +func (c PexpiretimeKey) Build() Completed { + return Completed(c) +} + +func (c PexpiretimeKey) Cache() Cacheable { + return Cacheable(c) +} + +type Pfadd Completed + +func (b Builder) Pfadd() (c Pfadd) { + c = Pfadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PFADD") + return c +} + +func (c Pfadd) Key(key string) PfaddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (PfaddKey)(c) +} + +type PfaddElement Completed + +func (c PfaddElement) Element(element ...string) PfaddElement { + c.cs.s = append(c.cs.s, element...) + return c +} + +func (c PfaddElement) Build() Completed { + return Completed(c) +} + +type PfaddKey Completed + +func (c PfaddKey) Element(element ...string) PfaddElement { + c.cs.s = append(c.cs.s, element...) + return (PfaddElement)(c) +} + +func (c PfaddKey) Build() Completed { + return Completed(c) +} + +type Pfcount Completed + +func (b Builder) Pfcount() (c Pfcount) { + c = Pfcount{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "PFCOUNT") + return c +} + +func (c Pfcount) Key(key ...string) PfcountKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (PfcountKey)(c) +} + +type PfcountKey Completed + +func (c PfcountKey) Key(key ...string) PfcountKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c PfcountKey) Build() Completed { + return Completed(c) +} + +type Pfmerge Completed + +func (b Builder) Pfmerge() (c Pfmerge) { + c = Pfmerge{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PFMERGE") + return c +} + +func (c Pfmerge) Destkey(destkey string) PfmergeDestkey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destkey) + } else { + c.ks = check(c.ks, slot(destkey)) + } + c.cs.s = append(c.cs.s, destkey) + return (PfmergeDestkey)(c) +} + +type PfmergeDestkey Completed + +func (c PfmergeDestkey) Sourcekey(sourcekey ...string) PfmergeSourcekey { + if c.ks&NoSlot == NoSlot { + for _, k := range sourcekey { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range sourcekey { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, sourcekey...) + return (PfmergeSourcekey)(c) +} + +type PfmergeSourcekey Completed + +func (c PfmergeSourcekey) Sourcekey(sourcekey ...string) PfmergeSourcekey { + if c.ks&NoSlot == NoSlot { + for _, k := range sourcekey { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range sourcekey { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, sourcekey...) + return c +} + +func (c PfmergeSourcekey) Build() Completed { + return Completed(c) +} + +type Ping Completed + +func (b Builder) Ping() (c Ping) { + c = Ping{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PING") + return c +} + +func (c Ping) Message(message string) PingMessage { + c.cs.s = append(c.cs.s, message) + return (PingMessage)(c) +} + +func (c Ping) Build() Completed { + return Completed(c) +} + +type PingMessage Completed + +func (c PingMessage) Build() Completed { + return Completed(c) +} + +type Psetex Completed + +func (b Builder) Psetex() (c Psetex) { + c = Psetex{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PSETEX") + return c +} + +func (c Psetex) Key(key string) PsetexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (PsetexKey)(c) +} + +type PsetexKey Completed + +func (c PsetexKey) Milliseconds(milliseconds int64) PsetexMilliseconds { + c.cs.s = append(c.cs.s, strconv.FormatInt(milliseconds, 10)) + return (PsetexMilliseconds)(c) +} + +type PsetexMilliseconds Completed + +func (c PsetexMilliseconds) Value(value string) PsetexValue { + c.cs.s = append(c.cs.s, value) + return (PsetexValue)(c) +} + +type PsetexValue Completed + +func (c PsetexValue) Build() Completed { + return Completed(c) +} + +type Psubscribe Completed + +func (b Builder) Psubscribe() (c Psubscribe) { + c = Psubscribe{cs: get(), ks: b.ks, cf: noRetTag} + c.cs.s = append(c.cs.s, "PSUBSCRIBE") + return c +} + +func (c Psubscribe) Pattern(pattern ...string) PsubscribePattern { + c.cs.s = append(c.cs.s, pattern...) + return (PsubscribePattern)(c) +} + +type PsubscribePattern Completed + +func (c PsubscribePattern) Pattern(pattern ...string) PsubscribePattern { + c.cs.s = append(c.cs.s, pattern...) + return c +} + +func (c PsubscribePattern) Build() Completed { + return Completed(c) +} + +type Psync Completed + +func (b Builder) Psync() (c Psync) { + c = Psync{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PSYNC") + return c +} + +func (c Psync) Replicationid(replicationid string) PsyncReplicationid { + c.cs.s = append(c.cs.s, replicationid) + return (PsyncReplicationid)(c) +} + +type PsyncOffset Completed + +func (c PsyncOffset) Build() Completed { + return Completed(c) +} + +type PsyncReplicationid Completed + +func (c PsyncReplicationid) Offset(offset int64) PsyncOffset { + c.cs.s = append(c.cs.s, strconv.FormatInt(offset, 10)) + return (PsyncOffset)(c) +} + +type Pttl Completed + +func (b Builder) Pttl() (c Pttl) { + c = Pttl{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "PTTL") + return c +} + +func (c Pttl) Key(key string) PttlKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (PttlKey)(c) +} + +type PttlKey Completed + +func (c PttlKey) Build() Completed { + return Completed(c) +} + +func (c PttlKey) Cache() Cacheable { + return Cacheable(c) +} + +type Publish Completed + +func (b Builder) Publish() (c Publish) { + c = Publish{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PUBLISH") + return c +} + +func (c Publish) Channel(channel string) PublishChannel { + c.cs.s = append(c.cs.s, channel) + return (PublishChannel)(c) +} + +type PublishChannel Completed + +func (c PublishChannel) Message(message string) PublishMessage { + c.cs.s = append(c.cs.s, message) + return (PublishMessage)(c) +} + +type PublishMessage Completed + +func (c PublishMessage) Build() Completed { + return Completed(c) +} + +type PubsubChannels Completed + +func (b Builder) PubsubChannels() (c PubsubChannels) { + c = PubsubChannels{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "PUBSUB", "CHANNELS") + return c +} + +func (c PubsubChannels) Pattern(pattern string) PubsubChannelsPattern { + c.cs.s = append(c.cs.s, pattern) + return (PubsubChannelsPattern)(c) +} + +func (c PubsubChannels) Build() Completed { + return Completed(c) +} + +type PubsubChannelsPattern Completed + +func (c PubsubChannelsPattern) Build() Completed { + return Completed(c) +} + +type PubsubHelp Completed + +func (b Builder) PubsubHelp() (c PubsubHelp) { + c = PubsubHelp{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "PUBSUB", "HELP") + return c +} + +func (c PubsubHelp) Build() Completed { + return Completed(c) +} + +type PubsubNumpat Completed + +func (b Builder) PubsubNumpat() (c PubsubNumpat) { + c = PubsubNumpat{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "PUBSUB", "NUMPAT") + return c +} + +func (c PubsubNumpat) Build() Completed { + return Completed(c) +} + +type PubsubNumsub Completed + +func (b Builder) PubsubNumsub() (c PubsubNumsub) { + c = PubsubNumsub{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "PUBSUB", "NUMSUB") + return c +} + +func (c PubsubNumsub) Channel(channel ...string) PubsubNumsubChannel { + c.cs.s = append(c.cs.s, channel...) + return (PubsubNumsubChannel)(c) +} + +func (c PubsubNumsub) Build() Completed { + return Completed(c) +} + +type PubsubNumsubChannel Completed + +func (c PubsubNumsubChannel) Channel(channel ...string) PubsubNumsubChannel { + c.cs.s = append(c.cs.s, channel...) + return c +} + +func (c PubsubNumsubChannel) Build() Completed { + return Completed(c) +} + +type PubsubShardchannels Completed + +func (b Builder) PubsubShardchannels() (c PubsubShardchannels) { + c = PubsubShardchannels{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PUBSUB", "SHARDCHANNELS") + return c +} + +func (c PubsubShardchannels) Pattern(pattern string) PubsubShardchannelsPattern { + c.cs.s = append(c.cs.s, pattern) + return (PubsubShardchannelsPattern)(c) +} + +func (c PubsubShardchannels) Build() Completed { + return Completed(c) +} + +type PubsubShardchannelsPattern Completed + +func (c PubsubShardchannelsPattern) Build() Completed { + return Completed(c) +} + +type PubsubShardnumsub Completed + +func (b Builder) PubsubShardnumsub() (c PubsubShardnumsub) { + c = PubsubShardnumsub{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "PUBSUB", "SHARDNUMSUB") + return c +} + +func (c PubsubShardnumsub) Channel(channel ...string) PubsubShardnumsubChannel { + c.cs.s = append(c.cs.s, channel...) + return (PubsubShardnumsubChannel)(c) +} + +func (c PubsubShardnumsub) Build() Completed { + return Completed(c) +} + +type PubsubShardnumsubChannel Completed + +func (c PubsubShardnumsubChannel) Channel(channel ...string) PubsubShardnumsubChannel { + c.cs.s = append(c.cs.s, channel...) + return c +} + +func (c PubsubShardnumsubChannel) Build() Completed { + return Completed(c) +} + +type Punsubscribe Completed + +func (b Builder) Punsubscribe() (c Punsubscribe) { + c = Punsubscribe{cs: get(), ks: b.ks, cf: noRetTag} + c.cs.s = append(c.cs.s, "PUNSUBSCRIBE") + return c +} + +func (c Punsubscribe) Pattern(pattern ...string) PunsubscribePattern { + c.cs.s = append(c.cs.s, pattern...) + return (PunsubscribePattern)(c) +} + +func (c Punsubscribe) Build() Completed { + return Completed(c) +} + +type PunsubscribePattern Completed + +func (c PunsubscribePattern) Pattern(pattern ...string) PunsubscribePattern { + c.cs.s = append(c.cs.s, pattern...) + return c +} + +func (c PunsubscribePattern) Build() Completed { + return Completed(c) +} + +type Quit Completed + +func (b Builder) Quit() (c Quit) { + c = Quit{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "QUIT") + return c +} + +func (c Quit) Build() Completed { + return Completed(c) +} + +type Randomkey Completed + +func (b Builder) Randomkey() (c Randomkey) { + c = Randomkey{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "RANDOMKEY") + return c +} + +func (c Randomkey) Build() Completed { + return Completed(c) +} + +type Readonly Completed + +func (b Builder) Readonly() (c Readonly) { + c = Readonly{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "READONLY") + return c +} + +func (c Readonly) Build() Completed { + return Completed(c) +} + +type Readwrite Completed + +func (b Builder) Readwrite() (c Readwrite) { + c = Readwrite{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "READWRITE") + return c +} + +func (c Readwrite) Build() Completed { + return Completed(c) +} + +type Rename Completed + +func (b Builder) Rename() (c Rename) { + c = Rename{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RENAME") + return c +} + +func (c Rename) Key(key string) RenameKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (RenameKey)(c) +} + +type RenameKey Completed + +func (c RenameKey) Newkey(newkey string) RenameNewkey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(newkey) + } else { + c.ks = check(c.ks, slot(newkey)) + } + c.cs.s = append(c.cs.s, newkey) + return (RenameNewkey)(c) +} + +type RenameNewkey Completed + +func (c RenameNewkey) Build() Completed { + return Completed(c) +} + +type Renamenx Completed + +func (b Builder) Renamenx() (c Renamenx) { + c = Renamenx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RENAMENX") + return c +} + +func (c Renamenx) Key(key string) RenamenxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (RenamenxKey)(c) +} + +type RenamenxKey Completed + +func (c RenamenxKey) Newkey(newkey string) RenamenxNewkey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(newkey) + } else { + c.ks = check(c.ks, slot(newkey)) + } + c.cs.s = append(c.cs.s, newkey) + return (RenamenxNewkey)(c) +} + +type RenamenxNewkey Completed + +func (c RenamenxNewkey) Build() Completed { + return Completed(c) +} + +type Replicaof Completed + +func (b Builder) Replicaof() (c Replicaof) { + c = Replicaof{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "REPLICAOF") + return c +} + +func (c Replicaof) Host(host string) ReplicaofHost { + c.cs.s = append(c.cs.s, host) + return (ReplicaofHost)(c) +} + +type ReplicaofHost Completed + +func (c ReplicaofHost) Port(port int64) ReplicaofPort { + c.cs.s = append(c.cs.s, strconv.FormatInt(port, 10)) + return (ReplicaofPort)(c) +} + +type ReplicaofPort Completed + +func (c ReplicaofPort) Build() Completed { + return Completed(c) +} + +type Reset Completed + +func (b Builder) Reset() (c Reset) { + c = Reset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RESET") + return c +} + +func (c Reset) Build() Completed { + return Completed(c) +} + +type Restore Completed + +func (b Builder) Restore() (c Restore) { + c = Restore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RESTORE") + return c +} + +func (c Restore) Key(key string) RestoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (RestoreKey)(c) +} + +type RestoreAbsttl Completed + +func (c RestoreAbsttl) Idletime(seconds int64) RestoreIdletime { + c.cs.s = append(c.cs.s, "IDLETIME", strconv.FormatInt(seconds, 10)) + return (RestoreIdletime)(c) +} + +func (c RestoreAbsttl) Freq(frequency int64) RestoreFreq { + c.cs.s = append(c.cs.s, "FREQ", strconv.FormatInt(frequency, 10)) + return (RestoreFreq)(c) +} + +func (c RestoreAbsttl) Build() Completed { + return Completed(c) +} + +type RestoreFreq Completed + +func (c RestoreFreq) Build() Completed { + return Completed(c) +} + +type RestoreIdletime Completed + +func (c RestoreIdletime) Freq(frequency int64) RestoreFreq { + c.cs.s = append(c.cs.s, "FREQ", strconv.FormatInt(frequency, 10)) + return (RestoreFreq)(c) +} + +func (c RestoreIdletime) Build() Completed { + return Completed(c) +} + +type RestoreKey Completed + +func (c RestoreKey) Ttl(ttl int64) RestoreTtl { + c.cs.s = append(c.cs.s, strconv.FormatInt(ttl, 10)) + return (RestoreTtl)(c) +} + +type RestoreReplace Completed + +func (c RestoreReplace) Absttl() RestoreAbsttl { + c.cs.s = append(c.cs.s, "ABSTTL") + return (RestoreAbsttl)(c) +} + +func (c RestoreReplace) Idletime(seconds int64) RestoreIdletime { + c.cs.s = append(c.cs.s, "IDLETIME", strconv.FormatInt(seconds, 10)) + return (RestoreIdletime)(c) +} + +func (c RestoreReplace) Freq(frequency int64) RestoreFreq { + c.cs.s = append(c.cs.s, "FREQ", strconv.FormatInt(frequency, 10)) + return (RestoreFreq)(c) +} + +func (c RestoreReplace) Build() Completed { + return Completed(c) +} + +type RestoreSerializedValue Completed + +func (c RestoreSerializedValue) Replace() RestoreReplace { + c.cs.s = append(c.cs.s, "REPLACE") + return (RestoreReplace)(c) +} + +func (c RestoreSerializedValue) Absttl() RestoreAbsttl { + c.cs.s = append(c.cs.s, "ABSTTL") + return (RestoreAbsttl)(c) +} + +func (c RestoreSerializedValue) Idletime(seconds int64) RestoreIdletime { + c.cs.s = append(c.cs.s, "IDLETIME", strconv.FormatInt(seconds, 10)) + return (RestoreIdletime)(c) +} + +func (c RestoreSerializedValue) Freq(frequency int64) RestoreFreq { + c.cs.s = append(c.cs.s, "FREQ", strconv.FormatInt(frequency, 10)) + return (RestoreFreq)(c) +} + +func (c RestoreSerializedValue) Build() Completed { + return Completed(c) +} + +type RestoreTtl Completed + +func (c RestoreTtl) SerializedValue(serializedValue string) RestoreSerializedValue { + c.cs.s = append(c.cs.s, serializedValue) + return (RestoreSerializedValue)(c) +} + +type RgAbortexecution Completed + +func (b Builder) RgAbortexecution() (c RgAbortexecution) { + c = RgAbortexecution{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.ABORTEXECUTION") + return c +} + +func (c RgAbortexecution) Id(id string) RgAbortexecutionId { + c.cs.s = append(c.cs.s, id) + return (RgAbortexecutionId)(c) +} + +type RgAbortexecutionId Completed + +func (c RgAbortexecutionId) Build() Completed { + return Completed(c) +} + +type RgConfigget Completed + +func (b Builder) RgConfigget() (c RgConfigget) { + c = RgConfigget{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.CONFIGGET") + return c +} + +func (c RgConfigget) Key(key ...string) RgConfiggetKey { + c.cs.s = append(c.cs.s, key...) + return (RgConfiggetKey)(c) +} + +type RgConfiggetKey Completed + +func (c RgConfiggetKey) Key(key ...string) RgConfiggetKey { + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c RgConfiggetKey) Build() Completed { + return Completed(c) +} + +type RgConfigset Completed + +func (b Builder) RgConfigset() (c RgConfigset) { + c = RgConfigset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.CONFIGSET") + return c +} + +func (c RgConfigset) KeyValue() RgConfigsetKeyValue { + return (RgConfigsetKeyValue)(c) +} + +type RgConfigsetKeyValue Completed + +func (c RgConfigsetKeyValue) KeyValue(key string, value string) RgConfigsetKeyValue { + c.cs.s = append(c.cs.s, key, value) + return c +} + +func (c RgConfigsetKeyValue) Build() Completed { + return Completed(c) +} + +type RgDropexecution Completed + +func (b Builder) RgDropexecution() (c RgDropexecution) { + c = RgDropexecution{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.DROPEXECUTION") + return c +} + +func (c RgDropexecution) Id(id string) RgDropexecutionId { + c.cs.s = append(c.cs.s, id) + return (RgDropexecutionId)(c) +} + +type RgDropexecutionId Completed + +func (c RgDropexecutionId) Build() Completed { + return Completed(c) +} + +type RgDumpexecutions Completed + +func (b Builder) RgDumpexecutions() (c RgDumpexecutions) { + c = RgDumpexecutions{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.DUMPEXECUTIONS") + return c +} + +func (c RgDumpexecutions) Build() Completed { + return Completed(c) +} + +type RgDumpregistrations Completed + +func (b Builder) RgDumpregistrations() (c RgDumpregistrations) { + c = RgDumpregistrations{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.DUMPREGISTRATIONS") + return c +} + +func (c RgDumpregistrations) Build() Completed { + return Completed(c) +} + +type RgGetexecution Completed + +func (b Builder) RgGetexecution() (c RgGetexecution) { + c = RgGetexecution{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.GETEXECUTION") + return c +} + +func (c RgGetexecution) Id(id string) RgGetexecutionId { + c.cs.s = append(c.cs.s, id) + return (RgGetexecutionId)(c) +} + +type RgGetexecutionId Completed + +func (c RgGetexecutionId) Shard() RgGetexecutionModeShard { + c.cs.s = append(c.cs.s, "SHARD") + return (RgGetexecutionModeShard)(c) +} + +func (c RgGetexecutionId) Cluster() RgGetexecutionModeCluster { + c.cs.s = append(c.cs.s, "CLUSTER") + return (RgGetexecutionModeCluster)(c) +} + +func (c RgGetexecutionId) Build() Completed { + return Completed(c) +} + +type RgGetexecutionModeCluster Completed + +func (c RgGetexecutionModeCluster) Build() Completed { + return Completed(c) +} + +type RgGetexecutionModeShard Completed + +func (c RgGetexecutionModeShard) Build() Completed { + return Completed(c) +} + +type RgGetresults Completed + +func (b Builder) RgGetresults() (c RgGetresults) { + c = RgGetresults{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.GETRESULTS") + return c +} + +func (c RgGetresults) Id(id string) RgGetresultsId { + c.cs.s = append(c.cs.s, id) + return (RgGetresultsId)(c) +} + +type RgGetresultsId Completed + +func (c RgGetresultsId) Build() Completed { + return Completed(c) +} + +type RgGetresultsblocking Completed + +func (b Builder) RgGetresultsblocking() (c RgGetresultsblocking) { + c = RgGetresultsblocking{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.GETRESULTSBLOCKING") + return c +} + +func (c RgGetresultsblocking) Id(id string) RgGetresultsblockingId { + c.cs.s = append(c.cs.s, id) + return (RgGetresultsblockingId)(c) +} + +type RgGetresultsblockingId Completed + +func (c RgGetresultsblockingId) Build() Completed { + return Completed(c) +} + +type RgInfocluster Completed + +func (b Builder) RgInfocluster() (c RgInfocluster) { + c = RgInfocluster{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.INFOCLUSTER") + return c +} + +func (c RgInfocluster) Build() Completed { + return Completed(c) +} + +type RgPydumpreqs Completed + +func (b Builder) RgPydumpreqs() (c RgPydumpreqs) { + c = RgPydumpreqs{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.PYDUMPREQS") + return c +} + +func (c RgPydumpreqs) Build() Completed { + return Completed(c) +} + +type RgPyexecute Completed + +func (b Builder) RgPyexecute() (c RgPyexecute) { + c = RgPyexecute{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.PYEXECUTE") + return c +} + +func (c RgPyexecute) Function(function string) RgPyexecuteFunction { + c.cs.s = append(c.cs.s, function) + return (RgPyexecuteFunction)(c) +} + +type RgPyexecuteDescription Completed + +func (c RgPyexecuteDescription) Upgrade() RgPyexecuteUpgrade { + c.cs.s = append(c.cs.s, "UPGRADE") + return (RgPyexecuteUpgrade)(c) +} + +func (c RgPyexecuteDescription) ReplaceWith(replaceWith string) RgPyexecuteReplaceWith { + c.cs.s = append(c.cs.s, "REPLACE_WITH", replaceWith) + return (RgPyexecuteReplaceWith)(c) +} + +func (c RgPyexecuteDescription) Requirements(requirement ...string) RgPyexecuteRequirementsRequirements { + c.cs.s = append(c.cs.s, "REQUIREMENTS") + c.cs.s = append(c.cs.s, requirement...) + return (RgPyexecuteRequirementsRequirements)(c) +} + +func (c RgPyexecuteDescription) Build() Completed { + return Completed(c) +} + +type RgPyexecuteFunction Completed + +func (c RgPyexecuteFunction) Unblocking() RgPyexecuteUnblocking { + c.cs.s = append(c.cs.s, "UNBLOCKING") + return (RgPyexecuteUnblocking)(c) +} + +func (c RgPyexecuteFunction) Id(id string) RgPyexecuteId { + c.cs.s = append(c.cs.s, "ID", id) + return (RgPyexecuteId)(c) +} + +func (c RgPyexecuteFunction) Description(description string) RgPyexecuteDescription { + c.cs.s = append(c.cs.s, "DESCRIPTION", description) + return (RgPyexecuteDescription)(c) +} + +func (c RgPyexecuteFunction) Upgrade() RgPyexecuteUpgrade { + c.cs.s = append(c.cs.s, "UPGRADE") + return (RgPyexecuteUpgrade)(c) +} + +func (c RgPyexecuteFunction) ReplaceWith(replaceWith string) RgPyexecuteReplaceWith { + c.cs.s = append(c.cs.s, "REPLACE_WITH", replaceWith) + return (RgPyexecuteReplaceWith)(c) +} + +func (c RgPyexecuteFunction) Requirements(requirement ...string) RgPyexecuteRequirementsRequirements { + c.cs.s = append(c.cs.s, "REQUIREMENTS") + c.cs.s = append(c.cs.s, requirement...) + return (RgPyexecuteRequirementsRequirements)(c) +} + +func (c RgPyexecuteFunction) Build() Completed { + return Completed(c) +} + +type RgPyexecuteId Completed + +func (c RgPyexecuteId) Description(description string) RgPyexecuteDescription { + c.cs.s = append(c.cs.s, "DESCRIPTION", description) + return (RgPyexecuteDescription)(c) +} + +func (c RgPyexecuteId) Upgrade() RgPyexecuteUpgrade { + c.cs.s = append(c.cs.s, "UPGRADE") + return (RgPyexecuteUpgrade)(c) +} + +func (c RgPyexecuteId) ReplaceWith(replaceWith string) RgPyexecuteReplaceWith { + c.cs.s = append(c.cs.s, "REPLACE_WITH", replaceWith) + return (RgPyexecuteReplaceWith)(c) +} + +func (c RgPyexecuteId) Requirements(requirement ...string) RgPyexecuteRequirementsRequirements { + c.cs.s = append(c.cs.s, "REQUIREMENTS") + c.cs.s = append(c.cs.s, requirement...) + return (RgPyexecuteRequirementsRequirements)(c) +} + +func (c RgPyexecuteId) Build() Completed { + return Completed(c) +} + +type RgPyexecuteReplaceWith Completed + +func (c RgPyexecuteReplaceWith) Requirements(requirement ...string) RgPyexecuteRequirementsRequirements { + c.cs.s = append(c.cs.s, "REQUIREMENTS") + c.cs.s = append(c.cs.s, requirement...) + return (RgPyexecuteRequirementsRequirements)(c) +} + +func (c RgPyexecuteReplaceWith) Build() Completed { + return Completed(c) +} + +type RgPyexecuteRequirementsRequirements Completed + +func (c RgPyexecuteRequirementsRequirements) Requirements(requirement ...string) RgPyexecuteRequirementsRequirements { + c.cs.s = append(c.cs.s, "REQUIREMENTS") + c.cs.s = append(c.cs.s, requirement...) + return c +} + +func (c RgPyexecuteRequirementsRequirements) Build() Completed { + return Completed(c) +} + +type RgPyexecuteUnblocking Completed + +func (c RgPyexecuteUnblocking) Id(id string) RgPyexecuteId { + c.cs.s = append(c.cs.s, "ID", id) + return (RgPyexecuteId)(c) +} + +func (c RgPyexecuteUnblocking) Description(description string) RgPyexecuteDescription { + c.cs.s = append(c.cs.s, "DESCRIPTION", description) + return (RgPyexecuteDescription)(c) +} + +func (c RgPyexecuteUnblocking) Upgrade() RgPyexecuteUpgrade { + c.cs.s = append(c.cs.s, "UPGRADE") + return (RgPyexecuteUpgrade)(c) +} + +func (c RgPyexecuteUnblocking) ReplaceWith(replaceWith string) RgPyexecuteReplaceWith { + c.cs.s = append(c.cs.s, "REPLACE_WITH", replaceWith) + return (RgPyexecuteReplaceWith)(c) +} + +func (c RgPyexecuteUnblocking) Requirements(requirement ...string) RgPyexecuteRequirementsRequirements { + c.cs.s = append(c.cs.s, "REQUIREMENTS") + c.cs.s = append(c.cs.s, requirement...) + return (RgPyexecuteRequirementsRequirements)(c) +} + +func (c RgPyexecuteUnblocking) Build() Completed { + return Completed(c) +} + +type RgPyexecuteUpgrade Completed + +func (c RgPyexecuteUpgrade) ReplaceWith(replaceWith string) RgPyexecuteReplaceWith { + c.cs.s = append(c.cs.s, "REPLACE_WITH", replaceWith) + return (RgPyexecuteReplaceWith)(c) +} + +func (c RgPyexecuteUpgrade) Requirements(requirement ...string) RgPyexecuteRequirementsRequirements { + c.cs.s = append(c.cs.s, "REQUIREMENTS") + c.cs.s = append(c.cs.s, requirement...) + return (RgPyexecuteRequirementsRequirements)(c) +} + +func (c RgPyexecuteUpgrade) Build() Completed { + return Completed(c) +} + +type RgPystats Completed + +func (b Builder) RgPystats() (c RgPystats) { + c = RgPystats{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.PYSTATS") + return c +} + +func (c RgPystats) Build() Completed { + return Completed(c) +} + +type RgRefreshcluster Completed + +func (b Builder) RgRefreshcluster() (c RgRefreshcluster) { + c = RgRefreshcluster{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.REFRESHCLUSTER") + return c +} + +func (c RgRefreshcluster) Build() Completed { + return Completed(c) +} + +type RgTrigger Completed + +func (b Builder) RgTrigger() (c RgTrigger) { + c = RgTrigger{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.TRIGGER") + return c +} + +func (c RgTrigger) Trigger(trigger string) RgTriggerTrigger { + c.cs.s = append(c.cs.s, trigger) + return (RgTriggerTrigger)(c) +} + +type RgTriggerArgument Completed + +func (c RgTriggerArgument) Argument(argument ...string) RgTriggerArgument { + c.cs.s = append(c.cs.s, argument...) + return c +} + +func (c RgTriggerArgument) Build() Completed { + return Completed(c) +} + +type RgTriggerTrigger Completed + +func (c RgTriggerTrigger) Argument(argument ...string) RgTriggerArgument { + c.cs.s = append(c.cs.s, argument...) + return (RgTriggerArgument)(c) +} + +type RgUnregister Completed + +func (b Builder) RgUnregister() (c RgUnregister) { + c = RgUnregister{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RG.UNREGISTER") + return c +} + +func (c RgUnregister) Id(id string) RgUnregisterId { + c.cs.s = append(c.cs.s, id) + return (RgUnregisterId)(c) +} + +type RgUnregisterId Completed + +func (c RgUnregisterId) Build() Completed { + return Completed(c) +} + +type Role Completed + +func (b Builder) Role() (c Role) { + c = Role{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ROLE") + return c +} + +func (c Role) Build() Completed { + return Completed(c) +} + +type Rpop Completed + +func (b Builder) Rpop() (c Rpop) { + c = Rpop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RPOP") + return c +} + +func (c Rpop) Key(key string) RpopKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (RpopKey)(c) +} + +type RpopCount Completed + +func (c RpopCount) Build() Completed { + return Completed(c) +} + +type RpopKey Completed + +func (c RpopKey) Count(count int64) RpopCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (RpopCount)(c) +} + +func (c RpopKey) Build() Completed { + return Completed(c) +} + +type Rpoplpush Completed + +func (b Builder) Rpoplpush() (c Rpoplpush) { + c = Rpoplpush{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RPOPLPUSH") + return c +} + +func (c Rpoplpush) Source(source string) RpoplpushSource { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(source) + } else { + c.ks = check(c.ks, slot(source)) + } + c.cs.s = append(c.cs.s, source) + return (RpoplpushSource)(c) +} + +type RpoplpushDestination Completed + +func (c RpoplpushDestination) Build() Completed { + return Completed(c) +} + +type RpoplpushSource Completed + +func (c RpoplpushSource) Destination(destination string) RpoplpushDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (RpoplpushDestination)(c) +} + +type Rpush Completed + +func (b Builder) Rpush() (c Rpush) { + c = Rpush{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RPUSH") + return c +} + +func (c Rpush) Key(key string) RpushKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (RpushKey)(c) +} + +type RpushElement Completed + +func (c RpushElement) Element(element ...string) RpushElement { + c.cs.s = append(c.cs.s, element...) + return c +} + +func (c RpushElement) Build() Completed { + return Completed(c) +} + +type RpushKey Completed + +func (c RpushKey) Element(element ...string) RpushElement { + c.cs.s = append(c.cs.s, element...) + return (RpushElement)(c) +} + +type Rpushx Completed + +func (b Builder) Rpushx() (c Rpushx) { + c = Rpushx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "RPUSHX") + return c +} + +func (c Rpushx) Key(key string) RpushxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (RpushxKey)(c) +} + +type RpushxElement Completed + +func (c RpushxElement) Element(element ...string) RpushxElement { + c.cs.s = append(c.cs.s, element...) + return c +} + +func (c RpushxElement) Build() Completed { + return Completed(c) +} + +type RpushxKey Completed + +func (c RpushxKey) Element(element ...string) RpushxElement { + c.cs.s = append(c.cs.s, element...) + return (RpushxElement)(c) +} + +type Sadd Completed + +func (b Builder) Sadd() (c Sadd) { + c = Sadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SADD") + return c +} + +func (c Sadd) Key(key string) SaddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SaddKey)(c) +} + +type SaddKey Completed + +func (c SaddKey) Member(member ...string) SaddMember { + c.cs.s = append(c.cs.s, member...) + return (SaddMember)(c) +} + +type SaddMember Completed + +func (c SaddMember) Member(member ...string) SaddMember { + c.cs.s = append(c.cs.s, member...) + return c +} + +func (c SaddMember) Build() Completed { + return Completed(c) +} + +type Save Completed + +func (b Builder) Save() (c Save) { + c = Save{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SAVE") + return c +} + +func (c Save) Build() Completed { + return Completed(c) +} + +type Scan Completed + +func (b Builder) Scan() (c Scan) { + c = Scan{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SCAN") + return c +} + +func (c Scan) Cursor(cursor int64) ScanCursor { + c.cs.s = append(c.cs.s, strconv.FormatInt(cursor, 10)) + return (ScanCursor)(c) +} + +type ScanCount Completed + +func (c ScanCount) Type(typ string) ScanType { + c.cs.s = append(c.cs.s, "TYPE", typ) + return (ScanType)(c) +} + +func (c ScanCount) Build() Completed { + return Completed(c) +} + +type ScanCursor Completed + +func (c ScanCursor) Match(pattern string) ScanMatch { + c.cs.s = append(c.cs.s, "MATCH", pattern) + return (ScanMatch)(c) +} + +func (c ScanCursor) Count(count int64) ScanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (ScanCount)(c) +} + +func (c ScanCursor) Type(typ string) ScanType { + c.cs.s = append(c.cs.s, "TYPE", typ) + return (ScanType)(c) +} + +func (c ScanCursor) Build() Completed { + return Completed(c) +} + +type ScanMatch Completed + +func (c ScanMatch) Count(count int64) ScanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (ScanCount)(c) +} + +func (c ScanMatch) Type(typ string) ScanType { + c.cs.s = append(c.cs.s, "TYPE", typ) + return (ScanType)(c) +} + +func (c ScanMatch) Build() Completed { + return Completed(c) +} + +type ScanType Completed + +func (c ScanType) Build() Completed { + return Completed(c) +} + +type Scard Completed + +func (b Builder) Scard() (c Scard) { + c = Scard{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SCARD") + return c +} + +func (c Scard) Key(key string) ScardKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ScardKey)(c) +} + +type ScardKey Completed + +func (c ScardKey) Build() Completed { + return Completed(c) +} + +func (c ScardKey) Cache() Cacheable { + return Cacheable(c) +} + +type ScriptDebug Completed + +func (b Builder) ScriptDebug() (c ScriptDebug) { + c = ScriptDebug{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SCRIPT", "DEBUG") + return c +} + +func (c ScriptDebug) Yes() ScriptDebugModeYes { + c.cs.s = append(c.cs.s, "YES") + return (ScriptDebugModeYes)(c) +} + +func (c ScriptDebug) Sync() ScriptDebugModeSync { + c.cs.s = append(c.cs.s, "SYNC") + return (ScriptDebugModeSync)(c) +} + +func (c ScriptDebug) No() ScriptDebugModeNo { + c.cs.s = append(c.cs.s, "NO") + return (ScriptDebugModeNo)(c) +} + +type ScriptDebugModeNo Completed + +func (c ScriptDebugModeNo) Build() Completed { + return Completed(c) +} + +type ScriptDebugModeSync Completed + +func (c ScriptDebugModeSync) Build() Completed { + return Completed(c) +} + +type ScriptDebugModeYes Completed + +func (c ScriptDebugModeYes) Build() Completed { + return Completed(c) +} + +type ScriptExists Completed + +func (b Builder) ScriptExists() (c ScriptExists) { + c = ScriptExists{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SCRIPT", "EXISTS") + return c +} + +func (c ScriptExists) Sha1(sha1 ...string) ScriptExistsSha1 { + c.cs.s = append(c.cs.s, sha1...) + return (ScriptExistsSha1)(c) +} + +type ScriptExistsSha1 Completed + +func (c ScriptExistsSha1) Sha1(sha1 ...string) ScriptExistsSha1 { + c.cs.s = append(c.cs.s, sha1...) + return c +} + +func (c ScriptExistsSha1) Build() Completed { + return Completed(c) +} + +type ScriptFlush Completed + +func (b Builder) ScriptFlush() (c ScriptFlush) { + c = ScriptFlush{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SCRIPT", "FLUSH") + return c +} + +func (c ScriptFlush) Async() ScriptFlushAsync { + c.cs.s = append(c.cs.s, "ASYNC") + return (ScriptFlushAsync)(c) +} + +func (c ScriptFlush) Sync() ScriptFlushAsyncSync { + c.cs.s = append(c.cs.s, "SYNC") + return (ScriptFlushAsyncSync)(c) +} + +func (c ScriptFlush) Build() Completed { + return Completed(c) +} + +type ScriptFlushAsync Completed + +func (c ScriptFlushAsync) Build() Completed { + return Completed(c) +} + +type ScriptFlushAsyncSync Completed + +func (c ScriptFlushAsyncSync) Build() Completed { + return Completed(c) +} + +type ScriptKill Completed + +func (b Builder) ScriptKill() (c ScriptKill) { + c = ScriptKill{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SCRIPT", "KILL") + return c +} + +func (c ScriptKill) Build() Completed { + return Completed(c) +} + +type ScriptLoad Completed + +func (b Builder) ScriptLoad() (c ScriptLoad) { + c = ScriptLoad{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SCRIPT", "LOAD") + return c +} + +func (c ScriptLoad) Script(script string) ScriptLoadScript { + c.cs.s = append(c.cs.s, script) + return (ScriptLoadScript)(c) +} + +type ScriptLoadScript Completed + +func (c ScriptLoadScript) Build() Completed { + return Completed(c) +} + +type Sdiff Completed + +func (b Builder) Sdiff() (c Sdiff) { + c = Sdiff{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SDIFF") + return c +} + +func (c Sdiff) Key(key ...string) SdiffKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (SdiffKey)(c) +} + +type SdiffKey Completed + +func (c SdiffKey) Key(key ...string) SdiffKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c SdiffKey) Build() Completed { + return Completed(c) +} + +type Sdiffstore Completed + +func (b Builder) Sdiffstore() (c Sdiffstore) { + c = Sdiffstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SDIFFSTORE") + return c +} + +func (c Sdiffstore) Destination(destination string) SdiffstoreDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (SdiffstoreDestination)(c) +} + +type SdiffstoreDestination Completed + +func (c SdiffstoreDestination) Key(key ...string) SdiffstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (SdiffstoreKey)(c) +} + +type SdiffstoreKey Completed + +func (c SdiffstoreKey) Key(key ...string) SdiffstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c SdiffstoreKey) Build() Completed { + return Completed(c) +} + +type Select Completed + +func (b Builder) Select() (c Select) { + c = Select{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SELECT") + return c +} + +func (c Select) Index(index int64) SelectIndex { + c.cs.s = append(c.cs.s, strconv.FormatInt(index, 10)) + return (SelectIndex)(c) +} + +type SelectIndex Completed + +func (c SelectIndex) Build() Completed { + return Completed(c) +} + +type SentinelFailover Completed + +func (b Builder) SentinelFailover() (c SentinelFailover) { + c = SentinelFailover{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SENTINEL", "FAILOVER") + return c +} + +func (c SentinelFailover) Master(master string) SentinelFailoverMaster { + c.cs.s = append(c.cs.s, master) + return (SentinelFailoverMaster)(c) +} + +type SentinelFailoverMaster Completed + +func (c SentinelFailoverMaster) Build() Completed { + return Completed(c) +} + +type SentinelGetMasterAddrByName Completed + +func (b Builder) SentinelGetMasterAddrByName() (c SentinelGetMasterAddrByName) { + c = SentinelGetMasterAddrByName{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SENTINEL", "GET-MASTER-ADDR-BY-NAME") + return c +} + +func (c SentinelGetMasterAddrByName) Master(master string) SentinelGetMasterAddrByNameMaster { + c.cs.s = append(c.cs.s, master) + return (SentinelGetMasterAddrByNameMaster)(c) +} + +type SentinelGetMasterAddrByNameMaster Completed + +func (c SentinelGetMasterAddrByNameMaster) Build() Completed { + return Completed(c) +} + +type SentinelSentinels Completed + +func (b Builder) SentinelSentinels() (c SentinelSentinels) { + c = SentinelSentinels{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SENTINEL", "SENTINELS") + return c +} + +func (c SentinelSentinels) Master(master string) SentinelSentinelsMaster { + c.cs.s = append(c.cs.s, master) + return (SentinelSentinelsMaster)(c) +} + +type SentinelSentinelsMaster Completed + +func (c SentinelSentinelsMaster) Build() Completed { + return Completed(c) +} + +type Set Completed + +func (b Builder) Set() (c Set) { + c = Set{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SET") + return c +} + +func (c Set) Key(key string) SetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SetKey)(c) +} + +type SetConditionNx Completed + +func (c SetConditionNx) Get() SetGet { + c.cs.s = append(c.cs.s, "GET") + return (SetGet)(c) +} + +func (c SetConditionNx) ExSeconds(seconds int64) SetExpirationExSeconds { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(seconds, 10)) + return (SetExpirationExSeconds)(c) +} + +func (c SetConditionNx) PxMilliseconds(milliseconds int64) SetExpirationPxMilliseconds { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(milliseconds, 10)) + return (SetExpirationPxMilliseconds)(c) +} + +func (c SetConditionNx) ExatTimestamp(timestamp int64) SetExpirationExatTimestamp { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp, 10)) + return (SetExpirationExatTimestamp)(c) +} + +func (c SetConditionNx) PxatMillisecondsTimestamp(millisecondsTimestamp int64) SetExpirationPxatMillisecondsTimestamp { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(millisecondsTimestamp, 10)) + return (SetExpirationPxatMillisecondsTimestamp)(c) +} + +func (c SetConditionNx) Keepttl() SetExpirationKeepttl { + c.cs.s = append(c.cs.s, "KEEPTTL") + return (SetExpirationKeepttl)(c) +} + +func (c SetConditionNx) Build() Completed { + return Completed(c) +} + +type SetConditionXx Completed + +func (c SetConditionXx) Get() SetGet { + c.cs.s = append(c.cs.s, "GET") + return (SetGet)(c) +} + +func (c SetConditionXx) ExSeconds(seconds int64) SetExpirationExSeconds { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(seconds, 10)) + return (SetExpirationExSeconds)(c) +} + +func (c SetConditionXx) PxMilliseconds(milliseconds int64) SetExpirationPxMilliseconds { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(milliseconds, 10)) + return (SetExpirationPxMilliseconds)(c) +} + +func (c SetConditionXx) ExatTimestamp(timestamp int64) SetExpirationExatTimestamp { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp, 10)) + return (SetExpirationExatTimestamp)(c) +} + +func (c SetConditionXx) PxatMillisecondsTimestamp(millisecondsTimestamp int64) SetExpirationPxatMillisecondsTimestamp { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(millisecondsTimestamp, 10)) + return (SetExpirationPxatMillisecondsTimestamp)(c) +} + +func (c SetConditionXx) Keepttl() SetExpirationKeepttl { + c.cs.s = append(c.cs.s, "KEEPTTL") + return (SetExpirationKeepttl)(c) +} + +func (c SetConditionXx) Build() Completed { + return Completed(c) +} + +type SetExpirationExSeconds Completed + +func (c SetExpirationExSeconds) Build() Completed { + return Completed(c) +} + +type SetExpirationExatTimestamp Completed + +func (c SetExpirationExatTimestamp) Build() Completed { + return Completed(c) +} + +type SetExpirationKeepttl Completed + +func (c SetExpirationKeepttl) Build() Completed { + return Completed(c) +} + +type SetExpirationPxMilliseconds Completed + +func (c SetExpirationPxMilliseconds) Build() Completed { + return Completed(c) +} + +type SetExpirationPxatMillisecondsTimestamp Completed + +func (c SetExpirationPxatMillisecondsTimestamp) Build() Completed { + return Completed(c) +} + +type SetGet Completed + +func (c SetGet) ExSeconds(seconds int64) SetExpirationExSeconds { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(seconds, 10)) + return (SetExpirationExSeconds)(c) +} + +func (c SetGet) PxMilliseconds(milliseconds int64) SetExpirationPxMilliseconds { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(milliseconds, 10)) + return (SetExpirationPxMilliseconds)(c) +} + +func (c SetGet) ExatTimestamp(timestamp int64) SetExpirationExatTimestamp { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp, 10)) + return (SetExpirationExatTimestamp)(c) +} + +func (c SetGet) PxatMillisecondsTimestamp(millisecondsTimestamp int64) SetExpirationPxatMillisecondsTimestamp { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(millisecondsTimestamp, 10)) + return (SetExpirationPxatMillisecondsTimestamp)(c) +} + +func (c SetGet) Keepttl() SetExpirationKeepttl { + c.cs.s = append(c.cs.s, "KEEPTTL") + return (SetExpirationKeepttl)(c) +} + +func (c SetGet) Build() Completed { + return Completed(c) +} + +type SetKey Completed + +func (c SetKey) Value(value string) SetValue { + c.cs.s = append(c.cs.s, value) + return (SetValue)(c) +} + +type SetValue Completed + +func (c SetValue) Nx() SetConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (SetConditionNx)(c) +} + +func (c SetValue) Xx() SetConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (SetConditionXx)(c) +} + +func (c SetValue) Get() SetGet { + c.cs.s = append(c.cs.s, "GET") + return (SetGet)(c) +} + +func (c SetValue) ExSeconds(seconds int64) SetExpirationExSeconds { + c.cs.s = append(c.cs.s, "EX", strconv.FormatInt(seconds, 10)) + return (SetExpirationExSeconds)(c) +} + +func (c SetValue) PxMilliseconds(milliseconds int64) SetExpirationPxMilliseconds { + c.cs.s = append(c.cs.s, "PX", strconv.FormatInt(milliseconds, 10)) + return (SetExpirationPxMilliseconds)(c) +} + +func (c SetValue) ExatTimestamp(timestamp int64) SetExpirationExatTimestamp { + c.cs.s = append(c.cs.s, "EXAT", strconv.FormatInt(timestamp, 10)) + return (SetExpirationExatTimestamp)(c) +} + +func (c SetValue) PxatMillisecondsTimestamp(millisecondsTimestamp int64) SetExpirationPxatMillisecondsTimestamp { + c.cs.s = append(c.cs.s, "PXAT", strconv.FormatInt(millisecondsTimestamp, 10)) + return (SetExpirationPxatMillisecondsTimestamp)(c) +} + +func (c SetValue) Keepttl() SetExpirationKeepttl { + c.cs.s = append(c.cs.s, "KEEPTTL") + return (SetExpirationKeepttl)(c) +} + +func (c SetValue) Build() Completed { + return Completed(c) +} + +type Setbit Completed + +func (b Builder) Setbit() (c Setbit) { + c = Setbit{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SETBIT") + return c +} + +func (c Setbit) Key(key string) SetbitKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SetbitKey)(c) +} + +type SetbitKey Completed + +func (c SetbitKey) Offset(offset int64) SetbitOffset { + c.cs.s = append(c.cs.s, strconv.FormatInt(offset, 10)) + return (SetbitOffset)(c) +} + +type SetbitOffset Completed + +func (c SetbitOffset) Value(value int64) SetbitValue { + c.cs.s = append(c.cs.s, strconv.FormatInt(value, 10)) + return (SetbitValue)(c) +} + +type SetbitValue Completed + +func (c SetbitValue) Build() Completed { + return Completed(c) +} + +type Setex Completed + +func (b Builder) Setex() (c Setex) { + c = Setex{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SETEX") + return c +} + +func (c Setex) Key(key string) SetexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SetexKey)(c) +} + +type SetexKey Completed + +func (c SetexKey) Seconds(seconds int64) SetexSeconds { + c.cs.s = append(c.cs.s, strconv.FormatInt(seconds, 10)) + return (SetexSeconds)(c) +} + +type SetexSeconds Completed + +func (c SetexSeconds) Value(value string) SetexValue { + c.cs.s = append(c.cs.s, value) + return (SetexValue)(c) +} + +type SetexValue Completed + +func (c SetexValue) Build() Completed { + return Completed(c) +} + +type Setnx Completed + +func (b Builder) Setnx() (c Setnx) { + c = Setnx{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SETNX") + return c +} + +func (c Setnx) Key(key string) SetnxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SetnxKey)(c) +} + +type SetnxKey Completed + +func (c SetnxKey) Value(value string) SetnxValue { + c.cs.s = append(c.cs.s, value) + return (SetnxValue)(c) +} + +type SetnxValue Completed + +func (c SetnxValue) Build() Completed { + return Completed(c) +} + +type Setrange Completed + +func (b Builder) Setrange() (c Setrange) { + c = Setrange{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SETRANGE") + return c +} + +func (c Setrange) Key(key string) SetrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SetrangeKey)(c) +} + +type SetrangeKey Completed + +func (c SetrangeKey) Offset(offset int64) SetrangeOffset { + c.cs.s = append(c.cs.s, strconv.FormatInt(offset, 10)) + return (SetrangeOffset)(c) +} + +type SetrangeOffset Completed + +func (c SetrangeOffset) Value(value string) SetrangeValue { + c.cs.s = append(c.cs.s, value) + return (SetrangeValue)(c) +} + +type SetrangeValue Completed + +func (c SetrangeValue) Build() Completed { + return Completed(c) +} + +type Shutdown Completed + +func (b Builder) Shutdown() (c Shutdown) { + c = Shutdown{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SHUTDOWN") + return c +} + +func (c Shutdown) Nosave() ShutdownSaveModeNosave { + c.cs.s = append(c.cs.s, "NOSAVE") + return (ShutdownSaveModeNosave)(c) +} + +func (c Shutdown) Save() ShutdownSaveModeSave { + c.cs.s = append(c.cs.s, "SAVE") + return (ShutdownSaveModeSave)(c) +} + +func (c Shutdown) Now() ShutdownNow { + c.cs.s = append(c.cs.s, "NOW") + return (ShutdownNow)(c) +} + +func (c Shutdown) Force() ShutdownForce { + c.cs.s = append(c.cs.s, "FORCE") + return (ShutdownForce)(c) +} + +func (c Shutdown) Abort() ShutdownAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (ShutdownAbort)(c) +} + +func (c Shutdown) Build() Completed { + return Completed(c) +} + +type ShutdownAbort Completed + +func (c ShutdownAbort) Build() Completed { + return Completed(c) +} + +type ShutdownForce Completed + +func (c ShutdownForce) Abort() ShutdownAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (ShutdownAbort)(c) +} + +func (c ShutdownForce) Build() Completed { + return Completed(c) +} + +type ShutdownNow Completed + +func (c ShutdownNow) Force() ShutdownForce { + c.cs.s = append(c.cs.s, "FORCE") + return (ShutdownForce)(c) +} + +func (c ShutdownNow) Abort() ShutdownAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (ShutdownAbort)(c) +} + +func (c ShutdownNow) Build() Completed { + return Completed(c) +} + +type ShutdownSaveModeNosave Completed + +func (c ShutdownSaveModeNosave) Now() ShutdownNow { + c.cs.s = append(c.cs.s, "NOW") + return (ShutdownNow)(c) +} + +func (c ShutdownSaveModeNosave) Force() ShutdownForce { + c.cs.s = append(c.cs.s, "FORCE") + return (ShutdownForce)(c) +} + +func (c ShutdownSaveModeNosave) Abort() ShutdownAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (ShutdownAbort)(c) +} + +func (c ShutdownSaveModeNosave) Build() Completed { + return Completed(c) +} + +type ShutdownSaveModeSave Completed + +func (c ShutdownSaveModeSave) Now() ShutdownNow { + c.cs.s = append(c.cs.s, "NOW") + return (ShutdownNow)(c) +} + +func (c ShutdownSaveModeSave) Force() ShutdownForce { + c.cs.s = append(c.cs.s, "FORCE") + return (ShutdownForce)(c) +} + +func (c ShutdownSaveModeSave) Abort() ShutdownAbort { + c.cs.s = append(c.cs.s, "ABORT") + return (ShutdownAbort)(c) +} + +func (c ShutdownSaveModeSave) Build() Completed { + return Completed(c) +} + +type Sinter Completed + +func (b Builder) Sinter() (c Sinter) { + c = Sinter{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SINTER") + return c +} + +func (c Sinter) Key(key ...string) SinterKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (SinterKey)(c) +} + +type SinterKey Completed + +func (c SinterKey) Key(key ...string) SinterKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c SinterKey) Build() Completed { + return Completed(c) +} + +type Sintercard Completed + +func (b Builder) Sintercard() (c Sintercard) { + c = Sintercard{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SINTERCARD") + return c +} + +func (c Sintercard) Numkeys(numkeys int64) SintercardNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (SintercardNumkeys)(c) +} + +type SintercardKey Completed + +func (c SintercardKey) Key(key ...string) SintercardKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c SintercardKey) Limit(limit int64) SintercardLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(limit, 10)) + return (SintercardLimit)(c) +} + +func (c SintercardKey) Build() Completed { + return Completed(c) +} + +type SintercardLimit Completed + +func (c SintercardLimit) Build() Completed { + return Completed(c) +} + +type SintercardNumkeys Completed + +func (c SintercardNumkeys) Key(key ...string) SintercardKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (SintercardKey)(c) +} + +type Sinterstore Completed + +func (b Builder) Sinterstore() (c Sinterstore) { + c = Sinterstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SINTERSTORE") + return c +} + +func (c Sinterstore) Destination(destination string) SinterstoreDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (SinterstoreDestination)(c) +} + +type SinterstoreDestination Completed + +func (c SinterstoreDestination) Key(key ...string) SinterstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (SinterstoreKey)(c) +} + +type SinterstoreKey Completed + +func (c SinterstoreKey) Key(key ...string) SinterstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c SinterstoreKey) Build() Completed { + return Completed(c) +} + +type Sismember Completed + +func (b Builder) Sismember() (c Sismember) { + c = Sismember{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SISMEMBER") + return c +} + +func (c Sismember) Key(key string) SismemberKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SismemberKey)(c) +} + +type SismemberKey Completed + +func (c SismemberKey) Member(member string) SismemberMember { + c.cs.s = append(c.cs.s, member) + return (SismemberMember)(c) +} + +type SismemberMember Completed + +func (c SismemberMember) Build() Completed { + return Completed(c) +} + +func (c SismemberMember) Cache() Cacheable { + return Cacheable(c) +} + +type Slaveof Completed + +func (b Builder) Slaveof() (c Slaveof) { + c = Slaveof{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SLAVEOF") + return c +} + +func (c Slaveof) Host(host string) SlaveofHost { + c.cs.s = append(c.cs.s, host) + return (SlaveofHost)(c) +} + +type SlaveofHost Completed + +func (c SlaveofHost) Port(port int64) SlaveofPort { + c.cs.s = append(c.cs.s, strconv.FormatInt(port, 10)) + return (SlaveofPort)(c) +} + +type SlaveofPort Completed + +func (c SlaveofPort) Build() Completed { + return Completed(c) +} + +type SlowlogGet Completed + +func (b Builder) SlowlogGet() (c SlowlogGet) { + c = SlowlogGet{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SLOWLOG", "GET") + return c +} + +func (c SlowlogGet) Count(count int64) SlowlogGetCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (SlowlogGetCount)(c) +} + +func (c SlowlogGet) Build() Completed { + return Completed(c) +} + +type SlowlogGetCount Completed + +func (c SlowlogGetCount) Build() Completed { + return Completed(c) +} + +type SlowlogHelp Completed + +func (b Builder) SlowlogHelp() (c SlowlogHelp) { + c = SlowlogHelp{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SLOWLOG", "HELP") + return c +} + +func (c SlowlogHelp) Build() Completed { + return Completed(c) +} + +type SlowlogLen Completed + +func (b Builder) SlowlogLen() (c SlowlogLen) { + c = SlowlogLen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SLOWLOG", "LEN") + return c +} + +func (c SlowlogLen) Build() Completed { + return Completed(c) +} + +type SlowlogReset Completed + +func (b Builder) SlowlogReset() (c SlowlogReset) { + c = SlowlogReset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SLOWLOG", "RESET") + return c +} + +func (c SlowlogReset) Build() Completed { + return Completed(c) +} + +type Smembers Completed + +func (b Builder) Smembers() (c Smembers) { + c = Smembers{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SMEMBERS") + return c +} + +func (c Smembers) Key(key string) SmembersKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SmembersKey)(c) +} + +type SmembersKey Completed + +func (c SmembersKey) Build() Completed { + return Completed(c) +} + +func (c SmembersKey) Cache() Cacheable { + return Cacheable(c) +} + +type Smismember Completed + +func (b Builder) Smismember() (c Smismember) { + c = Smismember{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SMISMEMBER") + return c +} + +func (c Smismember) Key(key string) SmismemberKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SmismemberKey)(c) +} + +type SmismemberKey Completed + +func (c SmismemberKey) Member(member ...string) SmismemberMember { + c.cs.s = append(c.cs.s, member...) + return (SmismemberMember)(c) +} + +type SmismemberMember Completed + +func (c SmismemberMember) Member(member ...string) SmismemberMember { + c.cs.s = append(c.cs.s, member...) + return c +} + +func (c SmismemberMember) Build() Completed { + return Completed(c) +} + +func (c SmismemberMember) Cache() Cacheable { + return Cacheable(c) +} + +type Smove Completed + +func (b Builder) Smove() (c Smove) { + c = Smove{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SMOVE") + return c +} + +func (c Smove) Source(source string) SmoveSource { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(source) + } else { + c.ks = check(c.ks, slot(source)) + } + c.cs.s = append(c.cs.s, source) + return (SmoveSource)(c) +} + +type SmoveDestination Completed + +func (c SmoveDestination) Member(member string) SmoveMember { + c.cs.s = append(c.cs.s, member) + return (SmoveMember)(c) +} + +type SmoveMember Completed + +func (c SmoveMember) Build() Completed { + return Completed(c) +} + +type SmoveSource Completed + +func (c SmoveSource) Destination(destination string) SmoveDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (SmoveDestination)(c) +} + +type Sort Completed + +func (b Builder) Sort() (c Sort) { + c = Sort{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SORT") + return c +} + +func (c Sort) Key(key string) SortKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SortKey)(c) +} + +type SortBy Completed + +func (c SortBy) Limit(offset int64, count int64) SortLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (SortLimit)(c) +} + +func (c SortBy) Get() SortGet { + return (SortGet)(c) +} + +func (c SortBy) Asc() SortOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortOrderAsc)(c) +} + +func (c SortBy) Desc() SortOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortOrderDesc)(c) +} + +func (c SortBy) Alpha() SortSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortSortingAlpha)(c) +} + +func (c SortBy) Store(destination string) SortStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, "STORE", destination) + return (SortStore)(c) +} + +func (c SortBy) Build() Completed { + return Completed(c) +} + +type SortGet Completed + +func (c SortGet) Get(pattern string) SortGet { + c.cs.s = append(c.cs.s, "GET", pattern) + return c +} + +func (c SortGet) Asc() SortOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortOrderAsc)(c) +} + +func (c SortGet) Desc() SortOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortOrderDesc)(c) +} + +func (c SortGet) Alpha() SortSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortSortingAlpha)(c) +} + +func (c SortGet) Store(destination string) SortStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, "STORE", destination) + return (SortStore)(c) +} + +func (c SortGet) Build() Completed { + return Completed(c) +} + +type SortKey Completed + +func (c SortKey) By(pattern string) SortBy { + c.cs.s = append(c.cs.s, "BY", pattern) + return (SortBy)(c) +} + +func (c SortKey) Limit(offset int64, count int64) SortLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (SortLimit)(c) +} + +func (c SortKey) Get() SortGet { + return (SortGet)(c) +} + +func (c SortKey) Asc() SortOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortOrderAsc)(c) +} + +func (c SortKey) Desc() SortOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortOrderDesc)(c) +} + +func (c SortKey) Alpha() SortSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortSortingAlpha)(c) +} + +func (c SortKey) Store(destination string) SortStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, "STORE", destination) + return (SortStore)(c) +} + +func (c SortKey) Build() Completed { + return Completed(c) +} + +type SortLimit Completed + +func (c SortLimit) Get() SortGet { + return (SortGet)(c) +} + +func (c SortLimit) Asc() SortOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortOrderAsc)(c) +} + +func (c SortLimit) Desc() SortOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortOrderDesc)(c) +} + +func (c SortLimit) Alpha() SortSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortSortingAlpha)(c) +} + +func (c SortLimit) Store(destination string) SortStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, "STORE", destination) + return (SortStore)(c) +} + +func (c SortLimit) Build() Completed { + return Completed(c) +} + +type SortOrderAsc Completed + +func (c SortOrderAsc) Alpha() SortSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortSortingAlpha)(c) +} + +func (c SortOrderAsc) Store(destination string) SortStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, "STORE", destination) + return (SortStore)(c) +} + +func (c SortOrderAsc) Build() Completed { + return Completed(c) +} + +type SortOrderDesc Completed + +func (c SortOrderDesc) Alpha() SortSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortSortingAlpha)(c) +} + +func (c SortOrderDesc) Store(destination string) SortStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, "STORE", destination) + return (SortStore)(c) +} + +func (c SortOrderDesc) Build() Completed { + return Completed(c) +} + +type SortRo Completed + +func (b Builder) SortRo() (c SortRo) { + c = SortRo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SORT_RO") + return c +} + +func (c SortRo) Key(key string) SortRoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SortRoKey)(c) +} + +type SortRoBy Completed + +func (c SortRoBy) Limit(offset int64, count int64) SortRoLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (SortRoLimit)(c) +} + +func (c SortRoBy) Get() SortRoGet { + return (SortRoGet)(c) +} + +func (c SortRoBy) Asc() SortRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortRoOrderAsc)(c) +} + +func (c SortRoBy) Desc() SortRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortRoOrderDesc)(c) +} + +func (c SortRoBy) Alpha() SortRoSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortRoSortingAlpha)(c) +} + +func (c SortRoBy) Build() Completed { + return Completed(c) +} + +func (c SortRoBy) Cache() Cacheable { + return Cacheable(c) +} + +type SortRoGet Completed + +func (c SortRoGet) Get(pattern string) SortRoGet { + c.cs.s = append(c.cs.s, "GET", pattern) + return c +} + +func (c SortRoGet) Asc() SortRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortRoOrderAsc)(c) +} + +func (c SortRoGet) Desc() SortRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortRoOrderDesc)(c) +} + +func (c SortRoGet) Alpha() SortRoSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortRoSortingAlpha)(c) +} + +func (c SortRoGet) Build() Completed { + return Completed(c) +} + +func (c SortRoGet) Cache() Cacheable { + return Cacheable(c) +} + +type SortRoKey Completed + +func (c SortRoKey) By(pattern string) SortRoBy { + c.cs.s = append(c.cs.s, "BY", pattern) + return (SortRoBy)(c) +} + +func (c SortRoKey) Limit(offset int64, count int64) SortRoLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (SortRoLimit)(c) +} + +func (c SortRoKey) Get() SortRoGet { + return (SortRoGet)(c) +} + +func (c SortRoKey) Asc() SortRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortRoOrderAsc)(c) +} + +func (c SortRoKey) Desc() SortRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortRoOrderDesc)(c) +} + +func (c SortRoKey) Alpha() SortRoSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortRoSortingAlpha)(c) +} + +func (c SortRoKey) Build() Completed { + return Completed(c) +} + +func (c SortRoKey) Cache() Cacheable { + return Cacheable(c) +} + +type SortRoLimit Completed + +func (c SortRoLimit) Get() SortRoGet { + return (SortRoGet)(c) +} + +func (c SortRoLimit) Asc() SortRoOrderAsc { + c.cs.s = append(c.cs.s, "ASC") + return (SortRoOrderAsc)(c) +} + +func (c SortRoLimit) Desc() SortRoOrderDesc { + c.cs.s = append(c.cs.s, "DESC") + return (SortRoOrderDesc)(c) +} + +func (c SortRoLimit) Alpha() SortRoSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortRoSortingAlpha)(c) +} + +func (c SortRoLimit) Build() Completed { + return Completed(c) +} + +func (c SortRoLimit) Cache() Cacheable { + return Cacheable(c) +} + +type SortRoOrderAsc Completed + +func (c SortRoOrderAsc) Alpha() SortRoSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortRoSortingAlpha)(c) +} + +func (c SortRoOrderAsc) Build() Completed { + return Completed(c) +} + +func (c SortRoOrderAsc) Cache() Cacheable { + return Cacheable(c) +} + +type SortRoOrderDesc Completed + +func (c SortRoOrderDesc) Alpha() SortRoSortingAlpha { + c.cs.s = append(c.cs.s, "ALPHA") + return (SortRoSortingAlpha)(c) +} + +func (c SortRoOrderDesc) Build() Completed { + return Completed(c) +} + +func (c SortRoOrderDesc) Cache() Cacheable { + return Cacheable(c) +} + +type SortRoSortingAlpha Completed + +func (c SortRoSortingAlpha) Build() Completed { + return Completed(c) +} + +func (c SortRoSortingAlpha) Cache() Cacheable { + return Cacheable(c) +} + +type SortSortingAlpha Completed + +func (c SortSortingAlpha) Store(destination string) SortStore { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, "STORE", destination) + return (SortStore)(c) +} + +func (c SortSortingAlpha) Build() Completed { + return Completed(c) +} + +type SortStore Completed + +func (c SortStore) Build() Completed { + return Completed(c) +} + +type Spop Completed + +func (b Builder) Spop() (c Spop) { + c = Spop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SPOP") + return c +} + +func (c Spop) Key(key string) SpopKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SpopKey)(c) +} + +type SpopCount Completed + +func (c SpopCount) Build() Completed { + return Completed(c) +} + +type SpopKey Completed + +func (c SpopKey) Count(count int64) SpopCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (SpopCount)(c) +} + +func (c SpopKey) Build() Completed { + return Completed(c) +} + +type Spublish Completed + +func (b Builder) Spublish() (c Spublish) { + c = Spublish{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SPUBLISH") + return c +} + +func (c Spublish) Channel(channel string) SpublishChannel { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(channel) + } else { + c.ks = check(c.ks, slot(channel)) + } + c.cs.s = append(c.cs.s, channel) + return (SpublishChannel)(c) +} + +type SpublishChannel Completed + +func (c SpublishChannel) Message(message string) SpublishMessage { + c.cs.s = append(c.cs.s, message) + return (SpublishMessage)(c) +} + +type SpublishMessage Completed + +func (c SpublishMessage) Build() Completed { + return Completed(c) +} + +type Srandmember Completed + +func (b Builder) Srandmember() (c Srandmember) { + c = Srandmember{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SRANDMEMBER") + return c +} + +func (c Srandmember) Key(key string) SrandmemberKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SrandmemberKey)(c) +} + +type SrandmemberCount Completed + +func (c SrandmemberCount) Build() Completed { + return Completed(c) +} + +type SrandmemberKey Completed + +func (c SrandmemberKey) Count(count int64) SrandmemberCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (SrandmemberCount)(c) +} + +func (c SrandmemberKey) Build() Completed { + return Completed(c) +} + +type Srem Completed + +func (b Builder) Srem() (c Srem) { + c = Srem{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SREM") + return c +} + +func (c Srem) Key(key string) SremKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SremKey)(c) +} + +type SremKey Completed + +func (c SremKey) Member(member ...string) SremMember { + c.cs.s = append(c.cs.s, member...) + return (SremMember)(c) +} + +type SremMember Completed + +func (c SremMember) Member(member ...string) SremMember { + c.cs.s = append(c.cs.s, member...) + return c +} + +func (c SremMember) Build() Completed { + return Completed(c) +} + +type Sscan Completed + +func (b Builder) Sscan() (c Sscan) { + c = Sscan{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SSCAN") + return c +} + +func (c Sscan) Key(key string) SscanKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (SscanKey)(c) +} + +type SscanCount Completed + +func (c SscanCount) Build() Completed { + return Completed(c) +} + +type SscanCursor Completed + +func (c SscanCursor) Match(pattern string) SscanMatch { + c.cs.s = append(c.cs.s, "MATCH", pattern) + return (SscanMatch)(c) +} + +func (c SscanCursor) Count(count int64) SscanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (SscanCount)(c) +} + +func (c SscanCursor) Build() Completed { + return Completed(c) +} + +type SscanKey Completed + +func (c SscanKey) Cursor(cursor int64) SscanCursor { + c.cs.s = append(c.cs.s, strconv.FormatInt(cursor, 10)) + return (SscanCursor)(c) +} + +type SscanMatch Completed + +func (c SscanMatch) Count(count int64) SscanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (SscanCount)(c) +} + +func (c SscanMatch) Build() Completed { + return Completed(c) +} + +type Ssubscribe Completed + +func (b Builder) Ssubscribe() (c Ssubscribe) { + c = Ssubscribe{cs: get(), ks: b.ks, cf: noRetTag} + c.cs.s = append(c.cs.s, "SSUBSCRIBE") + return c +} + +func (c Ssubscribe) Channel(channel ...string) SsubscribeChannel { + if c.ks&NoSlot == NoSlot { + for _, k := range channel { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range channel { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, channel...) + return (SsubscribeChannel)(c) +} + +type SsubscribeChannel Completed + +func (c SsubscribeChannel) Channel(channel ...string) SsubscribeChannel { + if c.ks&NoSlot == NoSlot { + for _, k := range channel { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range channel { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, channel...) + return c +} + +func (c SsubscribeChannel) Build() Completed { + return Completed(c) +} + +type Strlen Completed + +func (b Builder) Strlen() (c Strlen) { + c = Strlen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "STRLEN") + return c +} + +func (c Strlen) Key(key string) StrlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (StrlenKey)(c) +} + +type StrlenKey Completed + +func (c StrlenKey) Build() Completed { + return Completed(c) +} + +func (c StrlenKey) Cache() Cacheable { + return Cacheable(c) +} + +type Subscribe Completed + +func (b Builder) Subscribe() (c Subscribe) { + c = Subscribe{cs: get(), ks: b.ks, cf: noRetTag} + c.cs.s = append(c.cs.s, "SUBSCRIBE") + return c +} + +func (c Subscribe) Channel(channel ...string) SubscribeChannel { + c.cs.s = append(c.cs.s, channel...) + return (SubscribeChannel)(c) +} + +type SubscribeChannel Completed + +func (c SubscribeChannel) Channel(channel ...string) SubscribeChannel { + c.cs.s = append(c.cs.s, channel...) + return c +} + +func (c SubscribeChannel) Build() Completed { + return Completed(c) +} + +type Sunion Completed + +func (b Builder) Sunion() (c Sunion) { + c = Sunion{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "SUNION") + return c +} + +func (c Sunion) Key(key ...string) SunionKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (SunionKey)(c) +} + +type SunionKey Completed + +func (c SunionKey) Key(key ...string) SunionKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c SunionKey) Build() Completed { + return Completed(c) +} + +type Sunionstore Completed + +func (b Builder) Sunionstore() (c Sunionstore) { + c = Sunionstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SUNIONSTORE") + return c +} + +func (c Sunionstore) Destination(destination string) SunionstoreDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (SunionstoreDestination)(c) +} + +type SunionstoreDestination Completed + +func (c SunionstoreDestination) Key(key ...string) SunionstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (SunionstoreKey)(c) +} + +type SunionstoreKey Completed + +func (c SunionstoreKey) Key(key ...string) SunionstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c SunionstoreKey) Build() Completed { + return Completed(c) +} + +type Sunsubscribe Completed + +func (b Builder) Sunsubscribe() (c Sunsubscribe) { + c = Sunsubscribe{cs: get(), ks: b.ks, cf: noRetTag} + c.cs.s = append(c.cs.s, "SUNSUBSCRIBE") + return c +} + +func (c Sunsubscribe) Channel(channel ...string) SunsubscribeChannel { + if c.ks&NoSlot == NoSlot { + for _, k := range channel { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range channel { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, channel...) + return (SunsubscribeChannel)(c) +} + +func (c Sunsubscribe) Build() Completed { + return Completed(c) +} + +type SunsubscribeChannel Completed + +func (c SunsubscribeChannel) Channel(channel ...string) SunsubscribeChannel { + if c.ks&NoSlot == NoSlot { + for _, k := range channel { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range channel { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, channel...) + return c +} + +func (c SunsubscribeChannel) Build() Completed { + return Completed(c) +} + +type Swapdb Completed + +func (b Builder) Swapdb() (c Swapdb) { + c = Swapdb{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SWAPDB") + return c +} + +func (c Swapdb) Index1(index1 int64) SwapdbIndex1 { + c.cs.s = append(c.cs.s, strconv.FormatInt(index1, 10)) + return (SwapdbIndex1)(c) +} + +type SwapdbIndex1 Completed + +func (c SwapdbIndex1) Index2(index2 int64) SwapdbIndex2 { + c.cs.s = append(c.cs.s, strconv.FormatInt(index2, 10)) + return (SwapdbIndex2)(c) +} + +type SwapdbIndex2 Completed + +func (c SwapdbIndex2) Build() Completed { + return Completed(c) +} + +type Sync Completed + +func (b Builder) Sync() (c Sync) { + c = Sync{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "SYNC") + return c +} + +func (c Sync) Build() Completed { + return Completed(c) +} + +type TdigestAdd Completed + +func (b Builder) TdigestAdd() (c TdigestAdd) { + c = TdigestAdd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.ADD") + return c +} + +func (c TdigestAdd) Key(key string) TdigestAddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestAddKey)(c) +} + +type TdigestAddKey Completed + +func (c TdigestAddKey) Value(value float64) TdigestAddValuesValue { + c.cs.s = append(c.cs.s, strconv.FormatFloat(value, 'f', -1, 64)) + return (TdigestAddValuesValue)(c) +} + +type TdigestAddValuesValue Completed + +func (c TdigestAddValuesValue) Value(value float64) TdigestAddValuesValue { + c.cs.s = append(c.cs.s, strconv.FormatFloat(value, 'f', -1, 64)) + return c +} + +func (c TdigestAddValuesValue) Build() Completed { + return Completed(c) +} + +type TdigestByrank Completed + +func (b Builder) TdigestByrank() (c TdigestByrank) { + c = TdigestByrank{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.BYRANK") + return c +} + +func (c TdigestByrank) Key(key string) TdigestByrankKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestByrankKey)(c) +} + +type TdigestByrankKey Completed + +func (c TdigestByrankKey) Rank(rank ...float64) TdigestByrankRank { + for _, n := range rank { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return (TdigestByrankRank)(c) +} + +type TdigestByrankRank Completed + +func (c TdigestByrankRank) Rank(rank ...float64) TdigestByrankRank { + for _, n := range rank { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return c +} + +func (c TdigestByrankRank) Build() Completed { + return Completed(c) +} + +type TdigestByrevrank Completed + +func (b Builder) TdigestByrevrank() (c TdigestByrevrank) { + c = TdigestByrevrank{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.BYREVRANK") + return c +} + +func (c TdigestByrevrank) Key(key string) TdigestByrevrankKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestByrevrankKey)(c) +} + +type TdigestByrevrankKey Completed + +func (c TdigestByrevrankKey) ReverseRank(reverseRank ...float64) TdigestByrevrankReverseRank { + for _, n := range reverseRank { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return (TdigestByrevrankReverseRank)(c) +} + +type TdigestByrevrankReverseRank Completed + +func (c TdigestByrevrankReverseRank) ReverseRank(reverseRank ...float64) TdigestByrevrankReverseRank { + for _, n := range reverseRank { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return c +} + +func (c TdigestByrevrankReverseRank) Build() Completed { + return Completed(c) +} + +type TdigestCdf Completed + +func (b Builder) TdigestCdf() (c TdigestCdf) { + c = TdigestCdf{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.CDF") + return c +} + +func (c TdigestCdf) Key(key string) TdigestCdfKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestCdfKey)(c) +} + +type TdigestCdfKey Completed + +func (c TdigestCdfKey) Value(value ...float64) TdigestCdfValue { + for _, n := range value { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return (TdigestCdfValue)(c) +} + +type TdigestCdfValue Completed + +func (c TdigestCdfValue) Value(value ...float64) TdigestCdfValue { + for _, n := range value { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return c +} + +func (c TdigestCdfValue) Build() Completed { + return Completed(c) +} + +type TdigestCreate Completed + +func (b Builder) TdigestCreate() (c TdigestCreate) { + c = TdigestCreate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.CREATE") + return c +} + +func (c TdigestCreate) Key(key string) TdigestCreateKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestCreateKey)(c) +} + +type TdigestCreateCompression Completed + +func (c TdigestCreateCompression) Build() Completed { + return Completed(c) +} + +type TdigestCreateKey Completed + +func (c TdigestCreateKey) Compression(compression int64) TdigestCreateCompression { + c.cs.s = append(c.cs.s, "COMPRESSION", strconv.FormatInt(compression, 10)) + return (TdigestCreateCompression)(c) +} + +func (c TdigestCreateKey) Build() Completed { + return Completed(c) +} + +type TdigestInfo Completed + +func (b Builder) TdigestInfo() (c TdigestInfo) { + c = TdigestInfo{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.INFO") + return c +} + +func (c TdigestInfo) Key(key string) TdigestInfoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestInfoKey)(c) +} + +type TdigestInfoKey Completed + +func (c TdigestInfoKey) Build() Completed { + return Completed(c) +} + +type TdigestMax Completed + +func (b Builder) TdigestMax() (c TdigestMax) { + c = TdigestMax{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.MAX") + return c +} + +func (c TdigestMax) Key(key string) TdigestMaxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestMaxKey)(c) +} + +type TdigestMaxKey Completed + +func (c TdigestMaxKey) Build() Completed { + return Completed(c) +} + +type TdigestMerge Completed + +func (b Builder) TdigestMerge() (c TdigestMerge) { + c = TdigestMerge{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.MERGE") + return c +} + +func (c TdigestMerge) DestinationKey(destinationKey string) TdigestMergeDestinationKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destinationKey) + } else { + c.ks = check(c.ks, slot(destinationKey)) + } + c.cs.s = append(c.cs.s, destinationKey) + return (TdigestMergeDestinationKey)(c) +} + +type TdigestMergeConfigCompression Completed + +func (c TdigestMergeConfigCompression) Override() TdigestMergeOverride { + c.cs.s = append(c.cs.s, "OVERRIDE") + return (TdigestMergeOverride)(c) +} + +func (c TdigestMergeConfigCompression) Build() Completed { + return Completed(c) +} + +type TdigestMergeDestinationKey Completed + +func (c TdigestMergeDestinationKey) Numkeys(numkeys int64) TdigestMergeNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (TdigestMergeNumkeys)(c) +} + +type TdigestMergeNumkeys Completed + +func (c TdigestMergeNumkeys) SourceKey(sourceKey ...string) TdigestMergeSourceKey { + if c.ks&NoSlot == NoSlot { + for _, k := range sourceKey { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range sourceKey { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, sourceKey...) + return (TdigestMergeSourceKey)(c) +} + +type TdigestMergeOverride Completed + +func (c TdigestMergeOverride) Build() Completed { + return Completed(c) +} + +type TdigestMergeSourceKey Completed + +func (c TdigestMergeSourceKey) SourceKey(sourceKey ...string) TdigestMergeSourceKey { + if c.ks&NoSlot == NoSlot { + for _, k := range sourceKey { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range sourceKey { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, sourceKey...) + return c +} + +func (c TdigestMergeSourceKey) Compression(compression int64) TdigestMergeConfigCompression { + c.cs.s = append(c.cs.s, "COMPRESSION", strconv.FormatInt(compression, 10)) + return (TdigestMergeConfigCompression)(c) +} + +func (c TdigestMergeSourceKey) Override() TdigestMergeOverride { + c.cs.s = append(c.cs.s, "OVERRIDE") + return (TdigestMergeOverride)(c) +} + +func (c TdigestMergeSourceKey) Build() Completed { + return Completed(c) +} + +type TdigestMin Completed + +func (b Builder) TdigestMin() (c TdigestMin) { + c = TdigestMin{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.MIN") + return c +} + +func (c TdigestMin) Key(key string) TdigestMinKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestMinKey)(c) +} + +type TdigestMinKey Completed + +func (c TdigestMinKey) Build() Completed { + return Completed(c) +} + +type TdigestQuantile Completed + +func (b Builder) TdigestQuantile() (c TdigestQuantile) { + c = TdigestQuantile{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.QUANTILE") + return c +} + +func (c TdigestQuantile) Key(key string) TdigestQuantileKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestQuantileKey)(c) +} + +type TdigestQuantileKey Completed + +func (c TdigestQuantileKey) Quantile(quantile ...float64) TdigestQuantileQuantile { + for _, n := range quantile { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return (TdigestQuantileQuantile)(c) +} + +type TdigestQuantileQuantile Completed + +func (c TdigestQuantileQuantile) Quantile(quantile ...float64) TdigestQuantileQuantile { + for _, n := range quantile { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return c +} + +func (c TdigestQuantileQuantile) Build() Completed { + return Completed(c) +} + +type TdigestRank Completed + +func (b Builder) TdigestRank() (c TdigestRank) { + c = TdigestRank{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.RANK") + return c +} + +func (c TdigestRank) Key(key string) TdigestRankKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestRankKey)(c) +} + +type TdigestRankKey Completed + +func (c TdigestRankKey) Value(value ...float64) TdigestRankValue { + for _, n := range value { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return (TdigestRankValue)(c) +} + +type TdigestRankValue Completed + +func (c TdigestRankValue) Value(value ...float64) TdigestRankValue { + for _, n := range value { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return c +} + +func (c TdigestRankValue) Build() Completed { + return Completed(c) +} + +type TdigestReset Completed + +func (b Builder) TdigestReset() (c TdigestReset) { + c = TdigestReset{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.RESET") + return c +} + +func (c TdigestReset) Key(key string) TdigestResetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestResetKey)(c) +} + +type TdigestResetKey Completed + +func (c TdigestResetKey) Build() Completed { + return Completed(c) +} + +type TdigestRevrank Completed + +func (b Builder) TdigestRevrank() (c TdigestRevrank) { + c = TdigestRevrank{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.REVRANK") + return c +} + +func (c TdigestRevrank) Key(key string) TdigestRevrankKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestRevrankKey)(c) +} + +type TdigestRevrankKey Completed + +func (c TdigestRevrankKey) Value(value ...float64) TdigestRevrankValue { + for _, n := range value { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return (TdigestRevrankValue)(c) +} + +type TdigestRevrankValue Completed + +func (c TdigestRevrankValue) Value(value ...float64) TdigestRevrankValue { + for _, n := range value { + c.cs.s = append(c.cs.s, strconv.FormatFloat(n, 'f', -1, 64)) + } + return c +} + +func (c TdigestRevrankValue) Build() Completed { + return Completed(c) +} + +type TdigestTrimmedMean Completed + +func (b Builder) TdigestTrimmedMean() (c TdigestTrimmedMean) { + c = TdigestTrimmedMean{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TDIGEST.TRIMMED_MEAN") + return c +} + +func (c TdigestTrimmedMean) Key(key string) TdigestTrimmedMeanKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TdigestTrimmedMeanKey)(c) +} + +type TdigestTrimmedMeanHighCutQuantile Completed + +func (c TdigestTrimmedMeanHighCutQuantile) Build() Completed { + return Completed(c) +} + +type TdigestTrimmedMeanKey Completed + +func (c TdigestTrimmedMeanKey) LowCutQuantile(lowCutQuantile float64) TdigestTrimmedMeanLowCutQuantile { + c.cs.s = append(c.cs.s, strconv.FormatFloat(lowCutQuantile, 'f', -1, 64)) + return (TdigestTrimmedMeanLowCutQuantile)(c) +} + +type TdigestTrimmedMeanLowCutQuantile Completed + +func (c TdigestTrimmedMeanLowCutQuantile) HighCutQuantile(highCutQuantile float64) TdigestTrimmedMeanHighCutQuantile { + c.cs.s = append(c.cs.s, strconv.FormatFloat(highCutQuantile, 'f', -1, 64)) + return (TdigestTrimmedMeanHighCutQuantile)(c) +} + +type Time Completed + +func (b Builder) Time() (c Time) { + c = Time{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TIME") + return c +} + +func (c Time) Build() Completed { + return Completed(c) +} + +type TopkAdd Completed + +func (b Builder) TopkAdd() (c TopkAdd) { + c = TopkAdd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TOPK.ADD") + return c +} + +func (c TopkAdd) Key(key string) TopkAddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TopkAddKey)(c) +} + +type TopkAddItems Completed + +func (c TopkAddItems) Items(items ...string) TopkAddItems { + c.cs.s = append(c.cs.s, items...) + return c +} + +func (c TopkAddItems) Build() Completed { + return Completed(c) +} + +type TopkAddKey Completed + +func (c TopkAddKey) Items(items ...string) TopkAddItems { + c.cs.s = append(c.cs.s, items...) + return (TopkAddItems)(c) +} + +type TopkCount Completed + +func (b Builder) TopkCount() (c TopkCount) { + c = TopkCount{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TOPK.COUNT") + return c +} + +func (c TopkCount) Key(key string) TopkCountKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TopkCountKey)(c) +} + +type TopkCountItem Completed + +func (c TopkCountItem) Item(item ...string) TopkCountItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c TopkCountItem) Build() Completed { + return Completed(c) +} + +type TopkCountKey Completed + +func (c TopkCountKey) Item(item ...string) TopkCountItem { + c.cs.s = append(c.cs.s, item...) + return (TopkCountItem)(c) +} + +type TopkIncrby Completed + +func (b Builder) TopkIncrby() (c TopkIncrby) { + c = TopkIncrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TOPK.INCRBY") + return c +} + +func (c TopkIncrby) Key(key string) TopkIncrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TopkIncrbyKey)(c) +} + +type TopkIncrbyItemsIncrement Completed + +func (c TopkIncrbyItemsIncrement) Item(item string) TopkIncrbyItemsItem { + c.cs.s = append(c.cs.s, item) + return (TopkIncrbyItemsItem)(c) +} + +func (c TopkIncrbyItemsIncrement) Build() Completed { + return Completed(c) +} + +type TopkIncrbyItemsItem Completed + +func (c TopkIncrbyItemsItem) Increment(increment int64) TopkIncrbyItemsIncrement { + c.cs.s = append(c.cs.s, strconv.FormatInt(increment, 10)) + return (TopkIncrbyItemsIncrement)(c) +} + +type TopkIncrbyKey Completed + +func (c TopkIncrbyKey) Item(item string) TopkIncrbyItemsItem { + c.cs.s = append(c.cs.s, item) + return (TopkIncrbyItemsItem)(c) +} + +type TopkInfo Completed + +func (b Builder) TopkInfo() (c TopkInfo) { + c = TopkInfo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TOPK.INFO") + return c +} + +func (c TopkInfo) Key(key string) TopkInfoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TopkInfoKey)(c) +} + +type TopkInfoKey Completed + +func (c TopkInfoKey) Build() Completed { + return Completed(c) +} + +func (c TopkInfoKey) Cache() Cacheable { + return Cacheable(c) +} + +type TopkList Completed + +func (b Builder) TopkList() (c TopkList) { + c = TopkList{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TOPK.LIST") + return c +} + +func (c TopkList) Key(key string) TopkListKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TopkListKey)(c) +} + +type TopkListKey Completed + +func (c TopkListKey) Withcount() TopkListWithcount { + c.cs.s = append(c.cs.s, "WITHCOUNT") + return (TopkListWithcount)(c) +} + +func (c TopkListKey) Build() Completed { + return Completed(c) +} + +func (c TopkListKey) Cache() Cacheable { + return Cacheable(c) +} + +type TopkListWithcount Completed + +func (c TopkListWithcount) Build() Completed { + return Completed(c) +} + +func (c TopkListWithcount) Cache() Cacheable { + return Cacheable(c) +} + +type TopkQuery Completed + +func (b Builder) TopkQuery() (c TopkQuery) { + c = TopkQuery{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TOPK.QUERY") + return c +} + +func (c TopkQuery) Key(key string) TopkQueryKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TopkQueryKey)(c) +} + +type TopkQueryItem Completed + +func (c TopkQueryItem) Item(item ...string) TopkQueryItem { + c.cs.s = append(c.cs.s, item...) + return c +} + +func (c TopkQueryItem) Build() Completed { + return Completed(c) +} + +func (c TopkQueryItem) Cache() Cacheable { + return Cacheable(c) +} + +type TopkQueryKey Completed + +func (c TopkQueryKey) Item(item ...string) TopkQueryItem { + c.cs.s = append(c.cs.s, item...) + return (TopkQueryItem)(c) +} + +type TopkReserve Completed + +func (b Builder) TopkReserve() (c TopkReserve) { + c = TopkReserve{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TOPK.RESERVE") + return c +} + +func (c TopkReserve) Key(key string) TopkReserveKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TopkReserveKey)(c) +} + +type TopkReserveKey Completed + +func (c TopkReserveKey) Topk(topk int64) TopkReserveTopk { + c.cs.s = append(c.cs.s, strconv.FormatInt(topk, 10)) + return (TopkReserveTopk)(c) +} + +type TopkReserveParamsDecay Completed + +func (c TopkReserveParamsDecay) Build() Completed { + return Completed(c) +} + +type TopkReserveParamsDepth Completed + +func (c TopkReserveParamsDepth) Decay(decay float64) TopkReserveParamsDecay { + c.cs.s = append(c.cs.s, strconv.FormatFloat(decay, 'f', -1, 64)) + return (TopkReserveParamsDecay)(c) +} + +type TopkReserveParamsWidth Completed + +func (c TopkReserveParamsWidth) Depth(depth int64) TopkReserveParamsDepth { + c.cs.s = append(c.cs.s, strconv.FormatInt(depth, 10)) + return (TopkReserveParamsDepth)(c) +} + +type TopkReserveTopk Completed + +func (c TopkReserveTopk) Width(width int64) TopkReserveParamsWidth { + c.cs.s = append(c.cs.s, strconv.FormatInt(width, 10)) + return (TopkReserveParamsWidth)(c) +} + +func (c TopkReserveTopk) Build() Completed { + return Completed(c) +} + +type Touch Completed + +func (b Builder) Touch() (c Touch) { + c = Touch{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TOUCH") + return c +} + +func (c Touch) Key(key ...string) TouchKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (TouchKey)(c) +} + +type TouchKey Completed + +func (c TouchKey) Key(key ...string) TouchKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c TouchKey) Build() Completed { + return Completed(c) +} + +type TsAdd Completed + +func (b Builder) TsAdd() (c TsAdd) { + c = TsAdd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.ADD") + return c +} + +func (c TsAdd) Key(key string) TsAddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsAddKey)(c) +} + +type TsAddChunkSize Completed + +func (c TsAddChunkSize) OnDuplicateBlock() TsAddOnDuplicateBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "BLOCK") + return (TsAddOnDuplicateBlock)(c) +} + +func (c TsAddChunkSize) OnDuplicateFirst() TsAddOnDuplicateFirst { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "FIRST") + return (TsAddOnDuplicateFirst)(c) +} + +func (c TsAddChunkSize) OnDuplicateLast() TsAddOnDuplicateLast { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "LAST") + return (TsAddOnDuplicateLast)(c) +} + +func (c TsAddChunkSize) OnDuplicateMin() TsAddOnDuplicateMin { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MIN") + return (TsAddOnDuplicateMin)(c) +} + +func (c TsAddChunkSize) OnDuplicateMax() TsAddOnDuplicateMax { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MAX") + return (TsAddOnDuplicateMax)(c) +} + +func (c TsAddChunkSize) OnDuplicateSum() TsAddOnDuplicateSum { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "SUM") + return (TsAddOnDuplicateSum)(c) +} + +func (c TsAddChunkSize) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddChunkSize) Build() Completed { + return Completed(c) +} + +type TsAddEncodingCompressed Completed + +func (c TsAddEncodingCompressed) ChunkSize(size int64) TsAddChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsAddChunkSize)(c) +} + +func (c TsAddEncodingCompressed) OnDuplicateBlock() TsAddOnDuplicateBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "BLOCK") + return (TsAddOnDuplicateBlock)(c) +} + +func (c TsAddEncodingCompressed) OnDuplicateFirst() TsAddOnDuplicateFirst { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "FIRST") + return (TsAddOnDuplicateFirst)(c) +} + +func (c TsAddEncodingCompressed) OnDuplicateLast() TsAddOnDuplicateLast { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "LAST") + return (TsAddOnDuplicateLast)(c) +} + +func (c TsAddEncodingCompressed) OnDuplicateMin() TsAddOnDuplicateMin { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MIN") + return (TsAddOnDuplicateMin)(c) +} + +func (c TsAddEncodingCompressed) OnDuplicateMax() TsAddOnDuplicateMax { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MAX") + return (TsAddOnDuplicateMax)(c) +} + +func (c TsAddEncodingCompressed) OnDuplicateSum() TsAddOnDuplicateSum { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "SUM") + return (TsAddOnDuplicateSum)(c) +} + +func (c TsAddEncodingCompressed) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddEncodingCompressed) Build() Completed { + return Completed(c) +} + +type TsAddEncodingUncompressed Completed + +func (c TsAddEncodingUncompressed) ChunkSize(size int64) TsAddChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsAddChunkSize)(c) +} + +func (c TsAddEncodingUncompressed) OnDuplicateBlock() TsAddOnDuplicateBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "BLOCK") + return (TsAddOnDuplicateBlock)(c) +} + +func (c TsAddEncodingUncompressed) OnDuplicateFirst() TsAddOnDuplicateFirst { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "FIRST") + return (TsAddOnDuplicateFirst)(c) +} + +func (c TsAddEncodingUncompressed) OnDuplicateLast() TsAddOnDuplicateLast { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "LAST") + return (TsAddOnDuplicateLast)(c) +} + +func (c TsAddEncodingUncompressed) OnDuplicateMin() TsAddOnDuplicateMin { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MIN") + return (TsAddOnDuplicateMin)(c) +} + +func (c TsAddEncodingUncompressed) OnDuplicateMax() TsAddOnDuplicateMax { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MAX") + return (TsAddOnDuplicateMax)(c) +} + +func (c TsAddEncodingUncompressed) OnDuplicateSum() TsAddOnDuplicateSum { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "SUM") + return (TsAddOnDuplicateSum)(c) +} + +func (c TsAddEncodingUncompressed) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddEncodingUncompressed) Build() Completed { + return Completed(c) +} + +type TsAddKey Completed + +func (c TsAddKey) Timestamp(timestamp int64) TsAddTimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(timestamp, 10)) + return (TsAddTimestamp)(c) +} + +type TsAddLabels Completed + +func (c TsAddLabels) Labels(label string, value string) TsAddLabels { + c.cs.s = append(c.cs.s, label, value) + return c +} + +func (c TsAddLabels) Build() Completed { + return Completed(c) +} + +type TsAddOnDuplicateBlock Completed + +func (c TsAddOnDuplicateBlock) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddOnDuplicateBlock) Build() Completed { + return Completed(c) +} + +type TsAddOnDuplicateFirst Completed + +func (c TsAddOnDuplicateFirst) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddOnDuplicateFirst) Build() Completed { + return Completed(c) +} + +type TsAddOnDuplicateLast Completed + +func (c TsAddOnDuplicateLast) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddOnDuplicateLast) Build() Completed { + return Completed(c) +} + +type TsAddOnDuplicateMax Completed + +func (c TsAddOnDuplicateMax) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddOnDuplicateMax) Build() Completed { + return Completed(c) +} + +type TsAddOnDuplicateMin Completed + +func (c TsAddOnDuplicateMin) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddOnDuplicateMin) Build() Completed { + return Completed(c) +} + +type TsAddOnDuplicateSum Completed + +func (c TsAddOnDuplicateSum) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddOnDuplicateSum) Build() Completed { + return Completed(c) +} + +type TsAddRetention Completed + +func (c TsAddRetention) EncodingUncompressed() TsAddEncodingUncompressed { + c.cs.s = append(c.cs.s, "ENCODING", "UNCOMPRESSED") + return (TsAddEncodingUncompressed)(c) +} + +func (c TsAddRetention) EncodingCompressed() TsAddEncodingCompressed { + c.cs.s = append(c.cs.s, "ENCODING", "COMPRESSED") + return (TsAddEncodingCompressed)(c) +} + +func (c TsAddRetention) ChunkSize(size int64) TsAddChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsAddChunkSize)(c) +} + +func (c TsAddRetention) OnDuplicateBlock() TsAddOnDuplicateBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "BLOCK") + return (TsAddOnDuplicateBlock)(c) +} + +func (c TsAddRetention) OnDuplicateFirst() TsAddOnDuplicateFirst { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "FIRST") + return (TsAddOnDuplicateFirst)(c) +} + +func (c TsAddRetention) OnDuplicateLast() TsAddOnDuplicateLast { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "LAST") + return (TsAddOnDuplicateLast)(c) +} + +func (c TsAddRetention) OnDuplicateMin() TsAddOnDuplicateMin { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MIN") + return (TsAddOnDuplicateMin)(c) +} + +func (c TsAddRetention) OnDuplicateMax() TsAddOnDuplicateMax { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MAX") + return (TsAddOnDuplicateMax)(c) +} + +func (c TsAddRetention) OnDuplicateSum() TsAddOnDuplicateSum { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "SUM") + return (TsAddOnDuplicateSum)(c) +} + +func (c TsAddRetention) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddRetention) Build() Completed { + return Completed(c) +} + +type TsAddTimestamp Completed + +func (c TsAddTimestamp) Value(value float64) TsAddValue { + c.cs.s = append(c.cs.s, strconv.FormatFloat(value, 'f', -1, 64)) + return (TsAddValue)(c) +} + +type TsAddValue Completed + +func (c TsAddValue) Retention(retentionperiod int64) TsAddRetention { + c.cs.s = append(c.cs.s, "RETENTION", strconv.FormatInt(retentionperiod, 10)) + return (TsAddRetention)(c) +} + +func (c TsAddValue) EncodingUncompressed() TsAddEncodingUncompressed { + c.cs.s = append(c.cs.s, "ENCODING", "UNCOMPRESSED") + return (TsAddEncodingUncompressed)(c) +} + +func (c TsAddValue) EncodingCompressed() TsAddEncodingCompressed { + c.cs.s = append(c.cs.s, "ENCODING", "COMPRESSED") + return (TsAddEncodingCompressed)(c) +} + +func (c TsAddValue) ChunkSize(size int64) TsAddChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsAddChunkSize)(c) +} + +func (c TsAddValue) OnDuplicateBlock() TsAddOnDuplicateBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "BLOCK") + return (TsAddOnDuplicateBlock)(c) +} + +func (c TsAddValue) OnDuplicateFirst() TsAddOnDuplicateFirst { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "FIRST") + return (TsAddOnDuplicateFirst)(c) +} + +func (c TsAddValue) OnDuplicateLast() TsAddOnDuplicateLast { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "LAST") + return (TsAddOnDuplicateLast)(c) +} + +func (c TsAddValue) OnDuplicateMin() TsAddOnDuplicateMin { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MIN") + return (TsAddOnDuplicateMin)(c) +} + +func (c TsAddValue) OnDuplicateMax() TsAddOnDuplicateMax { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "MAX") + return (TsAddOnDuplicateMax)(c) +} + +func (c TsAddValue) OnDuplicateSum() TsAddOnDuplicateSum { + c.cs.s = append(c.cs.s, "ON_DUPLICATE", "SUM") + return (TsAddOnDuplicateSum)(c) +} + +func (c TsAddValue) Labels() TsAddLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAddLabels)(c) +} + +func (c TsAddValue) Build() Completed { + return Completed(c) +} + +type TsAlter Completed + +func (b Builder) TsAlter() (c TsAlter) { + c = TsAlter{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.ALTER") + return c +} + +func (c TsAlter) Key(key string) TsAlterKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsAlterKey)(c) +} + +type TsAlterChunkSize Completed + +func (c TsAlterChunkSize) DuplicatePolicyBlock() TsAlterDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsAlterDuplicatePolicyBlock)(c) +} + +func (c TsAlterChunkSize) DuplicatePolicyFirst() TsAlterDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsAlterDuplicatePolicyFirst)(c) +} + +func (c TsAlterChunkSize) DuplicatePolicyLast() TsAlterDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsAlterDuplicatePolicyLast)(c) +} + +func (c TsAlterChunkSize) DuplicatePolicyMin() TsAlterDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsAlterDuplicatePolicyMin)(c) +} + +func (c TsAlterChunkSize) DuplicatePolicyMax() TsAlterDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsAlterDuplicatePolicyMax)(c) +} + +func (c TsAlterChunkSize) DuplicatePolicySum() TsAlterDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsAlterDuplicatePolicySum)(c) +} + +func (c TsAlterChunkSize) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterChunkSize) Build() Completed { + return Completed(c) +} + +type TsAlterDuplicatePolicyBlock Completed + +func (c TsAlterDuplicatePolicyBlock) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterDuplicatePolicyBlock) Build() Completed { + return Completed(c) +} + +type TsAlterDuplicatePolicyFirst Completed + +func (c TsAlterDuplicatePolicyFirst) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterDuplicatePolicyFirst) Build() Completed { + return Completed(c) +} + +type TsAlterDuplicatePolicyLast Completed + +func (c TsAlterDuplicatePolicyLast) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterDuplicatePolicyLast) Build() Completed { + return Completed(c) +} + +type TsAlterDuplicatePolicyMax Completed + +func (c TsAlterDuplicatePolicyMax) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterDuplicatePolicyMax) Build() Completed { + return Completed(c) +} + +type TsAlterDuplicatePolicyMin Completed + +func (c TsAlterDuplicatePolicyMin) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterDuplicatePolicyMin) Build() Completed { + return Completed(c) +} + +type TsAlterDuplicatePolicySum Completed + +func (c TsAlterDuplicatePolicySum) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterDuplicatePolicySum) Build() Completed { + return Completed(c) +} + +type TsAlterKey Completed + +func (c TsAlterKey) Retention(retentionperiod int64) TsAlterRetention { + c.cs.s = append(c.cs.s, "RETENTION", strconv.FormatInt(retentionperiod, 10)) + return (TsAlterRetention)(c) +} + +func (c TsAlterKey) ChunkSize(size int64) TsAlterChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsAlterChunkSize)(c) +} + +func (c TsAlterKey) DuplicatePolicyBlock() TsAlterDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsAlterDuplicatePolicyBlock)(c) +} + +func (c TsAlterKey) DuplicatePolicyFirst() TsAlterDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsAlterDuplicatePolicyFirst)(c) +} + +func (c TsAlterKey) DuplicatePolicyLast() TsAlterDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsAlterDuplicatePolicyLast)(c) +} + +func (c TsAlterKey) DuplicatePolicyMin() TsAlterDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsAlterDuplicatePolicyMin)(c) +} + +func (c TsAlterKey) DuplicatePolicyMax() TsAlterDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsAlterDuplicatePolicyMax)(c) +} + +func (c TsAlterKey) DuplicatePolicySum() TsAlterDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsAlterDuplicatePolicySum)(c) +} + +func (c TsAlterKey) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterKey) Build() Completed { + return Completed(c) +} + +type TsAlterLabels Completed + +func (c TsAlterLabels) Labels(label string, value string) TsAlterLabels { + c.cs.s = append(c.cs.s, label, value) + return c +} + +func (c TsAlterLabels) Build() Completed { + return Completed(c) +} + +type TsAlterRetention Completed + +func (c TsAlterRetention) ChunkSize(size int64) TsAlterChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsAlterChunkSize)(c) +} + +func (c TsAlterRetention) DuplicatePolicyBlock() TsAlterDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsAlterDuplicatePolicyBlock)(c) +} + +func (c TsAlterRetention) DuplicatePolicyFirst() TsAlterDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsAlterDuplicatePolicyFirst)(c) +} + +func (c TsAlterRetention) DuplicatePolicyLast() TsAlterDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsAlterDuplicatePolicyLast)(c) +} + +func (c TsAlterRetention) DuplicatePolicyMin() TsAlterDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsAlterDuplicatePolicyMin)(c) +} + +func (c TsAlterRetention) DuplicatePolicyMax() TsAlterDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsAlterDuplicatePolicyMax)(c) +} + +func (c TsAlterRetention) DuplicatePolicySum() TsAlterDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsAlterDuplicatePolicySum)(c) +} + +func (c TsAlterRetention) Labels() TsAlterLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsAlterLabels)(c) +} + +func (c TsAlterRetention) Build() Completed { + return Completed(c) +} + +type TsCreate Completed + +func (b Builder) TsCreate() (c TsCreate) { + c = TsCreate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.CREATE") + return c +} + +func (c TsCreate) Key(key string) TsCreateKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsCreateKey)(c) +} + +type TsCreateChunkSize Completed + +func (c TsCreateChunkSize) DuplicatePolicyBlock() TsCreateDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsCreateDuplicatePolicyBlock)(c) +} + +func (c TsCreateChunkSize) DuplicatePolicyFirst() TsCreateDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsCreateDuplicatePolicyFirst)(c) +} + +func (c TsCreateChunkSize) DuplicatePolicyLast() TsCreateDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsCreateDuplicatePolicyLast)(c) +} + +func (c TsCreateChunkSize) DuplicatePolicyMin() TsCreateDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsCreateDuplicatePolicyMin)(c) +} + +func (c TsCreateChunkSize) DuplicatePolicyMax() TsCreateDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsCreateDuplicatePolicyMax)(c) +} + +func (c TsCreateChunkSize) DuplicatePolicySum() TsCreateDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsCreateDuplicatePolicySum)(c) +} + +func (c TsCreateChunkSize) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateChunkSize) Build() Completed { + return Completed(c) +} + +type TsCreateDuplicatePolicyBlock Completed + +func (c TsCreateDuplicatePolicyBlock) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateDuplicatePolicyBlock) Build() Completed { + return Completed(c) +} + +type TsCreateDuplicatePolicyFirst Completed + +func (c TsCreateDuplicatePolicyFirst) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateDuplicatePolicyFirst) Build() Completed { + return Completed(c) +} + +type TsCreateDuplicatePolicyLast Completed + +func (c TsCreateDuplicatePolicyLast) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateDuplicatePolicyLast) Build() Completed { + return Completed(c) +} + +type TsCreateDuplicatePolicyMax Completed + +func (c TsCreateDuplicatePolicyMax) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateDuplicatePolicyMax) Build() Completed { + return Completed(c) +} + +type TsCreateDuplicatePolicyMin Completed + +func (c TsCreateDuplicatePolicyMin) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateDuplicatePolicyMin) Build() Completed { + return Completed(c) +} + +type TsCreateDuplicatePolicySum Completed + +func (c TsCreateDuplicatePolicySum) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateDuplicatePolicySum) Build() Completed { + return Completed(c) +} + +type TsCreateEncodingCompressed Completed + +func (c TsCreateEncodingCompressed) ChunkSize(size int64) TsCreateChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsCreateChunkSize)(c) +} + +func (c TsCreateEncodingCompressed) DuplicatePolicyBlock() TsCreateDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsCreateDuplicatePolicyBlock)(c) +} + +func (c TsCreateEncodingCompressed) DuplicatePolicyFirst() TsCreateDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsCreateDuplicatePolicyFirst)(c) +} + +func (c TsCreateEncodingCompressed) DuplicatePolicyLast() TsCreateDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsCreateDuplicatePolicyLast)(c) +} + +func (c TsCreateEncodingCompressed) DuplicatePolicyMin() TsCreateDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsCreateDuplicatePolicyMin)(c) +} + +func (c TsCreateEncodingCompressed) DuplicatePolicyMax() TsCreateDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsCreateDuplicatePolicyMax)(c) +} + +func (c TsCreateEncodingCompressed) DuplicatePolicySum() TsCreateDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsCreateDuplicatePolicySum)(c) +} + +func (c TsCreateEncodingCompressed) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateEncodingCompressed) Build() Completed { + return Completed(c) +} + +type TsCreateEncodingUncompressed Completed + +func (c TsCreateEncodingUncompressed) ChunkSize(size int64) TsCreateChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsCreateChunkSize)(c) +} + +func (c TsCreateEncodingUncompressed) DuplicatePolicyBlock() TsCreateDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsCreateDuplicatePolicyBlock)(c) +} + +func (c TsCreateEncodingUncompressed) DuplicatePolicyFirst() TsCreateDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsCreateDuplicatePolicyFirst)(c) +} + +func (c TsCreateEncodingUncompressed) DuplicatePolicyLast() TsCreateDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsCreateDuplicatePolicyLast)(c) +} + +func (c TsCreateEncodingUncompressed) DuplicatePolicyMin() TsCreateDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsCreateDuplicatePolicyMin)(c) +} + +func (c TsCreateEncodingUncompressed) DuplicatePolicyMax() TsCreateDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsCreateDuplicatePolicyMax)(c) +} + +func (c TsCreateEncodingUncompressed) DuplicatePolicySum() TsCreateDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsCreateDuplicatePolicySum)(c) +} + +func (c TsCreateEncodingUncompressed) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateEncodingUncompressed) Build() Completed { + return Completed(c) +} + +type TsCreateKey Completed + +func (c TsCreateKey) Retention(retentionperiod int64) TsCreateRetention { + c.cs.s = append(c.cs.s, "RETENTION", strconv.FormatInt(retentionperiod, 10)) + return (TsCreateRetention)(c) +} + +func (c TsCreateKey) EncodingUncompressed() TsCreateEncodingUncompressed { + c.cs.s = append(c.cs.s, "ENCODING", "UNCOMPRESSED") + return (TsCreateEncodingUncompressed)(c) +} + +func (c TsCreateKey) EncodingCompressed() TsCreateEncodingCompressed { + c.cs.s = append(c.cs.s, "ENCODING", "COMPRESSED") + return (TsCreateEncodingCompressed)(c) +} + +func (c TsCreateKey) ChunkSize(size int64) TsCreateChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsCreateChunkSize)(c) +} + +func (c TsCreateKey) DuplicatePolicyBlock() TsCreateDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsCreateDuplicatePolicyBlock)(c) +} + +func (c TsCreateKey) DuplicatePolicyFirst() TsCreateDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsCreateDuplicatePolicyFirst)(c) +} + +func (c TsCreateKey) DuplicatePolicyLast() TsCreateDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsCreateDuplicatePolicyLast)(c) +} + +func (c TsCreateKey) DuplicatePolicyMin() TsCreateDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsCreateDuplicatePolicyMin)(c) +} + +func (c TsCreateKey) DuplicatePolicyMax() TsCreateDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsCreateDuplicatePolicyMax)(c) +} + +func (c TsCreateKey) DuplicatePolicySum() TsCreateDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsCreateDuplicatePolicySum)(c) +} + +func (c TsCreateKey) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateKey) Build() Completed { + return Completed(c) +} + +type TsCreateLabels Completed + +func (c TsCreateLabels) Labels(label string, value string) TsCreateLabels { + c.cs.s = append(c.cs.s, label, value) + return c +} + +func (c TsCreateLabels) Build() Completed { + return Completed(c) +} + +type TsCreateRetention Completed + +func (c TsCreateRetention) EncodingUncompressed() TsCreateEncodingUncompressed { + c.cs.s = append(c.cs.s, "ENCODING", "UNCOMPRESSED") + return (TsCreateEncodingUncompressed)(c) +} + +func (c TsCreateRetention) EncodingCompressed() TsCreateEncodingCompressed { + c.cs.s = append(c.cs.s, "ENCODING", "COMPRESSED") + return (TsCreateEncodingCompressed)(c) +} + +func (c TsCreateRetention) ChunkSize(size int64) TsCreateChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsCreateChunkSize)(c) +} + +func (c TsCreateRetention) DuplicatePolicyBlock() TsCreateDuplicatePolicyBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "BLOCK") + return (TsCreateDuplicatePolicyBlock)(c) +} + +func (c TsCreateRetention) DuplicatePolicyFirst() TsCreateDuplicatePolicyFirst { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "FIRST") + return (TsCreateDuplicatePolicyFirst)(c) +} + +func (c TsCreateRetention) DuplicatePolicyLast() TsCreateDuplicatePolicyLast { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "LAST") + return (TsCreateDuplicatePolicyLast)(c) +} + +func (c TsCreateRetention) DuplicatePolicyMin() TsCreateDuplicatePolicyMin { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MIN") + return (TsCreateDuplicatePolicyMin)(c) +} + +func (c TsCreateRetention) DuplicatePolicyMax() TsCreateDuplicatePolicyMax { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "MAX") + return (TsCreateDuplicatePolicyMax)(c) +} + +func (c TsCreateRetention) DuplicatePolicySum() TsCreateDuplicatePolicySum { + c.cs.s = append(c.cs.s, "DUPLICATE_POLICY", "SUM") + return (TsCreateDuplicatePolicySum)(c) +} + +func (c TsCreateRetention) Labels() TsCreateLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsCreateLabels)(c) +} + +func (c TsCreateRetention) Build() Completed { + return Completed(c) +} + +type TsCreaterule Completed + +func (b Builder) TsCreaterule() (c TsCreaterule) { + c = TsCreaterule{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.CREATERULE") + return c +} + +func (c TsCreaterule) Sourcekey(sourcekey string) TsCreateruleSourcekey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(sourcekey) + } else { + c.ks = check(c.ks, slot(sourcekey)) + } + c.cs.s = append(c.cs.s, sourcekey) + return (TsCreateruleSourcekey)(c) +} + +type TsCreateruleAggregationAvg Completed + +func (c TsCreateruleAggregationAvg) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationCount Completed + +func (c TsCreateruleAggregationCount) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationFirst Completed + +func (c TsCreateruleAggregationFirst) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationLast Completed + +func (c TsCreateruleAggregationLast) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationMax Completed + +func (c TsCreateruleAggregationMax) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationMin Completed + +func (c TsCreateruleAggregationMin) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationRange Completed + +func (c TsCreateruleAggregationRange) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationStdP Completed + +func (c TsCreateruleAggregationStdP) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationStdS Completed + +func (c TsCreateruleAggregationStdS) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationSum Completed + +func (c TsCreateruleAggregationSum) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationTwa Completed + +func (c TsCreateruleAggregationTwa) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationVarP Completed + +func (c TsCreateruleAggregationVarP) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAggregationVarS Completed + +func (c TsCreateruleAggregationVarS) Bucketduration(bucketduration int64) TsCreateruleBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsCreateruleBucketduration)(c) +} + +type TsCreateruleAligntimestamp Completed + +func (c TsCreateruleAligntimestamp) Build() Completed { + return Completed(c) +} + +type TsCreateruleBucketduration Completed + +func (c TsCreateruleBucketduration) Aligntimestamp(aligntimestamp int64) TsCreateruleAligntimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(aligntimestamp, 10)) + return (TsCreateruleAligntimestamp)(c) +} + +func (c TsCreateruleBucketduration) Build() Completed { + return Completed(c) +} + +type TsCreateruleDestkey Completed + +func (c TsCreateruleDestkey) AggregationAvg() TsCreateruleAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsCreateruleAggregationAvg)(c) +} + +func (c TsCreateruleDestkey) AggregationSum() TsCreateruleAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsCreateruleAggregationSum)(c) +} + +func (c TsCreateruleDestkey) AggregationMin() TsCreateruleAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsCreateruleAggregationMin)(c) +} + +func (c TsCreateruleDestkey) AggregationMax() TsCreateruleAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsCreateruleAggregationMax)(c) +} + +func (c TsCreateruleDestkey) AggregationRange() TsCreateruleAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsCreateruleAggregationRange)(c) +} + +func (c TsCreateruleDestkey) AggregationCount() TsCreateruleAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsCreateruleAggregationCount)(c) +} + +func (c TsCreateruleDestkey) AggregationFirst() TsCreateruleAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsCreateruleAggregationFirst)(c) +} + +func (c TsCreateruleDestkey) AggregationLast() TsCreateruleAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsCreateruleAggregationLast)(c) +} + +func (c TsCreateruleDestkey) AggregationStdP() TsCreateruleAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsCreateruleAggregationStdP)(c) +} + +func (c TsCreateruleDestkey) AggregationStdS() TsCreateruleAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsCreateruleAggregationStdS)(c) +} + +func (c TsCreateruleDestkey) AggregationVarP() TsCreateruleAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsCreateruleAggregationVarP)(c) +} + +func (c TsCreateruleDestkey) AggregationVarS() TsCreateruleAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsCreateruleAggregationVarS)(c) +} + +func (c TsCreateruleDestkey) AggregationTwa() TsCreateruleAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsCreateruleAggregationTwa)(c) +} + +type TsCreateruleSourcekey Completed + +func (c TsCreateruleSourcekey) Destkey(destkey string) TsCreateruleDestkey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destkey) + } else { + c.ks = check(c.ks, slot(destkey)) + } + c.cs.s = append(c.cs.s, destkey) + return (TsCreateruleDestkey)(c) +} + +type TsDecrby Completed + +func (b Builder) TsDecrby() (c TsDecrby) { + c = TsDecrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.DECRBY") + return c +} + +func (c TsDecrby) Key(key string) TsDecrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsDecrbyKey)(c) +} + +type TsDecrbyChunkSize Completed + +func (c TsDecrbyChunkSize) Labels() TsDecrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsDecrbyLabels)(c) +} + +func (c TsDecrbyChunkSize) Build() Completed { + return Completed(c) +} + +type TsDecrbyKey Completed + +func (c TsDecrbyKey) Value(value float64) TsDecrbyValue { + c.cs.s = append(c.cs.s, strconv.FormatFloat(value, 'f', -1, 64)) + return (TsDecrbyValue)(c) +} + +type TsDecrbyLabels Completed + +func (c TsDecrbyLabels) Labels(label string, value string) TsDecrbyLabels { + c.cs.s = append(c.cs.s, label, value) + return c +} + +func (c TsDecrbyLabels) Build() Completed { + return Completed(c) +} + +type TsDecrbyRetention Completed + +func (c TsDecrbyRetention) Uncompressed() TsDecrbyUncompressed { + c.cs.s = append(c.cs.s, "UNCOMPRESSED") + return (TsDecrbyUncompressed)(c) +} + +func (c TsDecrbyRetention) ChunkSize(size int64) TsDecrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsDecrbyChunkSize)(c) +} + +func (c TsDecrbyRetention) Labels() TsDecrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsDecrbyLabels)(c) +} + +func (c TsDecrbyRetention) Build() Completed { + return Completed(c) +} + +type TsDecrbyTimestamp Completed + +func (c TsDecrbyTimestamp) Retention(retentionperiod int64) TsDecrbyRetention { + c.cs.s = append(c.cs.s, "RETENTION", strconv.FormatInt(retentionperiod, 10)) + return (TsDecrbyRetention)(c) +} + +func (c TsDecrbyTimestamp) Uncompressed() TsDecrbyUncompressed { + c.cs.s = append(c.cs.s, "UNCOMPRESSED") + return (TsDecrbyUncompressed)(c) +} + +func (c TsDecrbyTimestamp) ChunkSize(size int64) TsDecrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsDecrbyChunkSize)(c) +} + +func (c TsDecrbyTimestamp) Labels() TsDecrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsDecrbyLabels)(c) +} + +func (c TsDecrbyTimestamp) Build() Completed { + return Completed(c) +} + +type TsDecrbyUncompressed Completed + +func (c TsDecrbyUncompressed) ChunkSize(size int64) TsDecrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsDecrbyChunkSize)(c) +} + +func (c TsDecrbyUncompressed) Labels() TsDecrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsDecrbyLabels)(c) +} + +func (c TsDecrbyUncompressed) Build() Completed { + return Completed(c) +} + +type TsDecrbyValue Completed + +func (c TsDecrbyValue) Timestamp(timestamp int64) TsDecrbyTimestamp { + c.cs.s = append(c.cs.s, "TIMESTAMP", strconv.FormatInt(timestamp, 10)) + return (TsDecrbyTimestamp)(c) +} + +func (c TsDecrbyValue) Retention(retentionperiod int64) TsDecrbyRetention { + c.cs.s = append(c.cs.s, "RETENTION", strconv.FormatInt(retentionperiod, 10)) + return (TsDecrbyRetention)(c) +} + +func (c TsDecrbyValue) Uncompressed() TsDecrbyUncompressed { + c.cs.s = append(c.cs.s, "UNCOMPRESSED") + return (TsDecrbyUncompressed)(c) +} + +func (c TsDecrbyValue) ChunkSize(size int64) TsDecrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsDecrbyChunkSize)(c) +} + +func (c TsDecrbyValue) Labels() TsDecrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsDecrbyLabels)(c) +} + +func (c TsDecrbyValue) Build() Completed { + return Completed(c) +} + +type TsDel Completed + +func (b Builder) TsDel() (c TsDel) { + c = TsDel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.DEL") + return c +} + +func (c TsDel) Key(key string) TsDelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsDelKey)(c) +} + +type TsDelFromTimestamp Completed + +func (c TsDelFromTimestamp) ToTimestamp(toTimestamp int64) TsDelToTimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(toTimestamp, 10)) + return (TsDelToTimestamp)(c) +} + +type TsDelKey Completed + +func (c TsDelKey) FromTimestamp(fromTimestamp int64) TsDelFromTimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(fromTimestamp, 10)) + return (TsDelFromTimestamp)(c) +} + +type TsDelToTimestamp Completed + +func (c TsDelToTimestamp) Build() Completed { + return Completed(c) +} + +type TsDeleterule Completed + +func (b Builder) TsDeleterule() (c TsDeleterule) { + c = TsDeleterule{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.DELETERULE") + return c +} + +func (c TsDeleterule) Sourcekey(sourcekey string) TsDeleteruleSourcekey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(sourcekey) + } else { + c.ks = check(c.ks, slot(sourcekey)) + } + c.cs.s = append(c.cs.s, sourcekey) + return (TsDeleteruleSourcekey)(c) +} + +type TsDeleteruleDestkey Completed + +func (c TsDeleteruleDestkey) Build() Completed { + return Completed(c) +} + +type TsDeleteruleSourcekey Completed + +func (c TsDeleteruleSourcekey) Destkey(destkey string) TsDeleteruleDestkey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destkey) + } else { + c.ks = check(c.ks, slot(destkey)) + } + c.cs.s = append(c.cs.s, destkey) + return (TsDeleteruleDestkey)(c) +} + +type TsGet Completed + +func (b Builder) TsGet() (c TsGet) { + c = TsGet{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TS.GET") + return c +} + +func (c TsGet) Key(key string) TsGetKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsGetKey)(c) +} + +type TsGetKey Completed + +func (c TsGetKey) Latest() TsGetLatest { + c.cs.s = append(c.cs.s, "LATEST") + return (TsGetLatest)(c) +} + +func (c TsGetKey) Build() Completed { + return Completed(c) +} + +type TsGetLatest Completed + +func (c TsGetLatest) Build() Completed { + return Completed(c) +} + +type TsIncrby Completed + +func (b Builder) TsIncrby() (c TsIncrby) { + c = TsIncrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.INCRBY") + return c +} + +func (c TsIncrby) Key(key string) TsIncrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsIncrbyKey)(c) +} + +type TsIncrbyChunkSize Completed + +func (c TsIncrbyChunkSize) Labels() TsIncrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsIncrbyLabels)(c) +} + +func (c TsIncrbyChunkSize) Build() Completed { + return Completed(c) +} + +type TsIncrbyKey Completed + +func (c TsIncrbyKey) Value(value float64) TsIncrbyValue { + c.cs.s = append(c.cs.s, strconv.FormatFloat(value, 'f', -1, 64)) + return (TsIncrbyValue)(c) +} + +type TsIncrbyLabels Completed + +func (c TsIncrbyLabels) Labels(label string, value string) TsIncrbyLabels { + c.cs.s = append(c.cs.s, label, value) + return c +} + +func (c TsIncrbyLabels) Build() Completed { + return Completed(c) +} + +type TsIncrbyRetention Completed + +func (c TsIncrbyRetention) Uncompressed() TsIncrbyUncompressed { + c.cs.s = append(c.cs.s, "UNCOMPRESSED") + return (TsIncrbyUncompressed)(c) +} + +func (c TsIncrbyRetention) ChunkSize(size int64) TsIncrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsIncrbyChunkSize)(c) +} + +func (c TsIncrbyRetention) Labels() TsIncrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsIncrbyLabels)(c) +} + +func (c TsIncrbyRetention) Build() Completed { + return Completed(c) +} + +type TsIncrbyTimestamp Completed + +func (c TsIncrbyTimestamp) Retention(retentionperiod int64) TsIncrbyRetention { + c.cs.s = append(c.cs.s, "RETENTION", strconv.FormatInt(retentionperiod, 10)) + return (TsIncrbyRetention)(c) +} + +func (c TsIncrbyTimestamp) Uncompressed() TsIncrbyUncompressed { + c.cs.s = append(c.cs.s, "UNCOMPRESSED") + return (TsIncrbyUncompressed)(c) +} + +func (c TsIncrbyTimestamp) ChunkSize(size int64) TsIncrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsIncrbyChunkSize)(c) +} + +func (c TsIncrbyTimestamp) Labels() TsIncrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsIncrbyLabels)(c) +} + +func (c TsIncrbyTimestamp) Build() Completed { + return Completed(c) +} + +type TsIncrbyUncompressed Completed + +func (c TsIncrbyUncompressed) ChunkSize(size int64) TsIncrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsIncrbyChunkSize)(c) +} + +func (c TsIncrbyUncompressed) Labels() TsIncrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsIncrbyLabels)(c) +} + +func (c TsIncrbyUncompressed) Build() Completed { + return Completed(c) +} + +type TsIncrbyValue Completed + +func (c TsIncrbyValue) Timestamp(timestamp int64) TsIncrbyTimestamp { + c.cs.s = append(c.cs.s, "TIMESTAMP", strconv.FormatInt(timestamp, 10)) + return (TsIncrbyTimestamp)(c) +} + +func (c TsIncrbyValue) Retention(retentionperiod int64) TsIncrbyRetention { + c.cs.s = append(c.cs.s, "RETENTION", strconv.FormatInt(retentionperiod, 10)) + return (TsIncrbyRetention)(c) +} + +func (c TsIncrbyValue) Uncompressed() TsIncrbyUncompressed { + c.cs.s = append(c.cs.s, "UNCOMPRESSED") + return (TsIncrbyUncompressed)(c) +} + +func (c TsIncrbyValue) ChunkSize(size int64) TsIncrbyChunkSize { + c.cs.s = append(c.cs.s, "CHUNK_SIZE", strconv.FormatInt(size, 10)) + return (TsIncrbyChunkSize)(c) +} + +func (c TsIncrbyValue) Labels() TsIncrbyLabels { + c.cs.s = append(c.cs.s, "LABELS") + return (TsIncrbyLabels)(c) +} + +func (c TsIncrbyValue) Build() Completed { + return Completed(c) +} + +type TsInfo Completed + +func (b Builder) TsInfo() (c TsInfo) { + c = TsInfo{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TS.INFO") + return c +} + +func (c TsInfo) Key(key string) TsInfoKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsInfoKey)(c) +} + +type TsInfoDebug Completed + +func (c TsInfoDebug) Build() Completed { + return Completed(c) +} + +type TsInfoKey Completed + +func (c TsInfoKey) Debug(debug string) TsInfoDebug { + c.cs.s = append(c.cs.s, debug) + return (TsInfoDebug)(c) +} + +func (c TsInfoKey) Build() Completed { + return Completed(c) +} + +type TsMadd Completed + +func (b Builder) TsMadd() (c TsMadd) { + c = TsMadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.MADD") + return c +} + +func (c TsMadd) KeyTimestampValue() TsMaddKeyTimestampValue { + return (TsMaddKeyTimestampValue)(c) +} + +type TsMaddKeyTimestampValue Completed + +func (c TsMaddKeyTimestampValue) KeyTimestampValue(key string, timestamp int64, value float64) TsMaddKeyTimestampValue { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key, strconv.FormatInt(timestamp, 10), strconv.FormatFloat(value, 'f', -1, 64)) + return c +} + +func (c TsMaddKeyTimestampValue) Build() Completed { + return Completed(c) +} + +type TsMget Completed + +func (b Builder) TsMget() (c TsMget) { + c = TsMget{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.MGET") + return c +} + +func (c TsMget) Latest() TsMgetLatest { + c.cs.s = append(c.cs.s, "LATEST") + return (TsMgetLatest)(c) +} + +func (c TsMget) Withlabels() TsMgetWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMgetWithlabels)(c) +} + +func (c TsMget) SelectedLabels(labels []string) TsMgetSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMgetSelectedLabels)(c) +} + +func (c TsMget) Filter(filter ...string) TsMgetFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMgetFilter)(c) +} + +type TsMgetFilter Completed + +func (c TsMgetFilter) Filter(filter ...string) TsMgetFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return c +} + +func (c TsMgetFilter) Build() Completed { + return Completed(c) +} + +type TsMgetLatest Completed + +func (c TsMgetLatest) Withlabels() TsMgetWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMgetWithlabels)(c) +} + +func (c TsMgetLatest) SelectedLabels(labels []string) TsMgetSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMgetSelectedLabels)(c) +} + +func (c TsMgetLatest) Filter(filter ...string) TsMgetFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMgetFilter)(c) +} + +type TsMgetSelectedLabels Completed + +func (c TsMgetSelectedLabels) Filter(filter ...string) TsMgetFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMgetFilter)(c) +} + +type TsMgetWithlabels Completed + +func (c TsMgetWithlabels) Filter(filter ...string) TsMgetFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMgetFilter)(c) +} + +type TsMrange Completed + +func (b Builder) TsMrange() (c TsMrange) { + c = TsMrange{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.MRANGE") + return c +} + +func (c TsMrange) Fromtimestamp(fromtimestamp int64) TsMrangeFromtimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(fromtimestamp, 10)) + return (TsMrangeFromtimestamp)(c) +} + +type TsMrangeAggregationAggregationAvg Completed + +func (c TsMrangeAggregationAggregationAvg) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationCount Completed + +func (c TsMrangeAggregationAggregationCount) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationFirst Completed + +func (c TsMrangeAggregationAggregationFirst) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationLast Completed + +func (c TsMrangeAggregationAggregationLast) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationMax Completed + +func (c TsMrangeAggregationAggregationMax) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationMin Completed + +func (c TsMrangeAggregationAggregationMin) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationRange Completed + +func (c TsMrangeAggregationAggregationRange) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationStdP Completed + +func (c TsMrangeAggregationAggregationStdP) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationStdS Completed + +func (c TsMrangeAggregationAggregationStdS) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationSum Completed + +func (c TsMrangeAggregationAggregationSum) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationTwa Completed + +func (c TsMrangeAggregationAggregationTwa) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationVarP Completed + +func (c TsMrangeAggregationAggregationVarP) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationAggregationVarS Completed + +func (c TsMrangeAggregationAggregationVarS) Bucketduration(bucketduration int64) TsMrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrangeAggregationBucketduration)(c) +} + +type TsMrangeAggregationBucketduration Completed + +func (c TsMrangeAggregationBucketduration) Buckettimestamp(buckettimestamp string) TsMrangeAggregationBuckettimestamp { + c.cs.s = append(c.cs.s, "BUCKETTIMESTAMP", buckettimestamp) + return (TsMrangeAggregationBuckettimestamp)(c) +} + +func (c TsMrangeAggregationBucketduration) Empty() TsMrangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsMrangeAggregationEmpty)(c) +} + +func (c TsMrangeAggregationBucketduration) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeAggregationBuckettimestamp Completed + +func (c TsMrangeAggregationBuckettimestamp) Empty() TsMrangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsMrangeAggregationEmpty)(c) +} + +func (c TsMrangeAggregationBuckettimestamp) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeAggregationEmpty Completed + +func (c TsMrangeAggregationEmpty) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeAlign Completed + +func (c TsMrangeAlign) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeAlign) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeAlign) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeAlign) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeAlign) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeAlign) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeAlign) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeAlign) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeAlign) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeAlign) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeAlign) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeAlign) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeAlign) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeAlign) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeCount Completed + +func (c TsMrangeCount) Align(value string) TsMrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrangeAlign)(c) +} + +func (c TsMrangeCount) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeCount) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeCount) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeCount) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeCount) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeCount) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeCount) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeCount) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeCount) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeCount) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeCount) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeCount) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeCount) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeCount) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeFilter Completed + +func (c TsMrangeFilter) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return c +} + +func (c TsMrangeFilter) Groupby(label string, reduce string, reducer string) TsMrangeGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", label, reduce, reducer) + return (TsMrangeGroupby)(c) +} + +func (c TsMrangeFilter) Build() Completed { + return Completed(c) +} + +type TsMrangeFilterByTs Completed + +func (c TsMrangeFilterByTs) FilterByTs(timestamp ...int64) TsMrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c TsMrangeFilterByTs) FilterByValue(min float64, max float64) TsMrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsMrangeFilterByValue)(c) +} + +func (c TsMrangeFilterByTs) Withlabels() TsMrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrangeWithlabels)(c) +} + +func (c TsMrangeFilterByTs) SelectedLabels(labels []string) TsMrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrangeSelectedLabels)(c) +} + +func (c TsMrangeFilterByTs) Count(count int64) TsMrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrangeCount)(c) +} + +func (c TsMrangeFilterByTs) Align(value string) TsMrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrangeAlign)(c) +} + +func (c TsMrangeFilterByTs) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeFilterByTs) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeFilterByTs) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeFilterByTs) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeFilterByTs) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeFilterByTs) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeFilterByTs) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeFilterByTs) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeFilterByTs) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeFilterByTs) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeFilterByTs) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeFilterByTs) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeFilterByTs) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeFilterByTs) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeFilterByValue Completed + +func (c TsMrangeFilterByValue) Withlabels() TsMrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrangeWithlabels)(c) +} + +func (c TsMrangeFilterByValue) SelectedLabels(labels []string) TsMrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrangeSelectedLabels)(c) +} + +func (c TsMrangeFilterByValue) Count(count int64) TsMrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrangeCount)(c) +} + +func (c TsMrangeFilterByValue) Align(value string) TsMrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrangeAlign)(c) +} + +func (c TsMrangeFilterByValue) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeFilterByValue) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeFilterByValue) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeFilterByValue) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeFilterByValue) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeFilterByValue) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeFilterByValue) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeFilterByValue) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeFilterByValue) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeFilterByValue) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeFilterByValue) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeFilterByValue) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeFilterByValue) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeFilterByValue) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeFromtimestamp Completed + +func (c TsMrangeFromtimestamp) Totimestamp(totimestamp int64) TsMrangeTotimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(totimestamp, 10)) + return (TsMrangeTotimestamp)(c) +} + +type TsMrangeGroupby Completed + +func (c TsMrangeGroupby) Build() Completed { + return Completed(c) +} + +type TsMrangeLatest Completed + +func (c TsMrangeLatest) FilterByTs(timestamp ...int64) TsMrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsMrangeFilterByTs)(c) +} + +func (c TsMrangeLatest) FilterByValue(min float64, max float64) TsMrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsMrangeFilterByValue)(c) +} + +func (c TsMrangeLatest) Withlabels() TsMrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrangeWithlabels)(c) +} + +func (c TsMrangeLatest) SelectedLabels(labels []string) TsMrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrangeSelectedLabels)(c) +} + +func (c TsMrangeLatest) Count(count int64) TsMrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrangeCount)(c) +} + +func (c TsMrangeLatest) Align(value string) TsMrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrangeAlign)(c) +} + +func (c TsMrangeLatest) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeLatest) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeLatest) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeLatest) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeLatest) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeLatest) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeLatest) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeLatest) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeLatest) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeLatest) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeLatest) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeLatest) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeLatest) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeLatest) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeSelectedLabels Completed + +func (c TsMrangeSelectedLabels) Count(count int64) TsMrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrangeCount)(c) +} + +func (c TsMrangeSelectedLabels) Align(value string) TsMrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrangeAlign)(c) +} + +func (c TsMrangeSelectedLabels) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeSelectedLabels) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeSelectedLabels) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeSelectedLabels) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeSelectedLabels) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeSelectedLabels) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeSelectedLabels) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeSelectedLabels) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeSelectedLabels) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeSelectedLabels) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeSelectedLabels) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeSelectedLabels) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeSelectedLabels) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeSelectedLabels) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeTotimestamp Completed + +func (c TsMrangeTotimestamp) Latest() TsMrangeLatest { + c.cs.s = append(c.cs.s, "LATEST") + return (TsMrangeLatest)(c) +} + +func (c TsMrangeTotimestamp) FilterByTs(timestamp ...int64) TsMrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsMrangeFilterByTs)(c) +} + +func (c TsMrangeTotimestamp) FilterByValue(min float64, max float64) TsMrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsMrangeFilterByValue)(c) +} + +func (c TsMrangeTotimestamp) Withlabels() TsMrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrangeWithlabels)(c) +} + +func (c TsMrangeTotimestamp) SelectedLabels(labels []string) TsMrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrangeSelectedLabels)(c) +} + +func (c TsMrangeTotimestamp) Count(count int64) TsMrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrangeCount)(c) +} + +func (c TsMrangeTotimestamp) Align(value string) TsMrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrangeAlign)(c) +} + +func (c TsMrangeTotimestamp) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeTotimestamp) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeTotimestamp) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeTotimestamp) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeTotimestamp) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeTotimestamp) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeTotimestamp) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeTotimestamp) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeTotimestamp) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeTotimestamp) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeTotimestamp) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeTotimestamp) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeTotimestamp) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeTotimestamp) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrangeWithlabels Completed + +func (c TsMrangeWithlabels) Count(count int64) TsMrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrangeCount)(c) +} + +func (c TsMrangeWithlabels) Align(value string) TsMrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrangeAlign)(c) +} + +func (c TsMrangeWithlabels) AggregationAvg() TsMrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrangeAggregationAggregationAvg)(c) +} + +func (c TsMrangeWithlabels) AggregationSum() TsMrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrangeAggregationAggregationSum)(c) +} + +func (c TsMrangeWithlabels) AggregationMin() TsMrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrangeAggregationAggregationMin)(c) +} + +func (c TsMrangeWithlabels) AggregationMax() TsMrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrangeAggregationAggregationMax)(c) +} + +func (c TsMrangeWithlabels) AggregationRange() TsMrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrangeAggregationAggregationRange)(c) +} + +func (c TsMrangeWithlabels) AggregationCount() TsMrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrangeAggregationAggregationCount)(c) +} + +func (c TsMrangeWithlabels) AggregationFirst() TsMrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrangeAggregationAggregationFirst)(c) +} + +func (c TsMrangeWithlabels) AggregationLast() TsMrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrangeAggregationAggregationLast)(c) +} + +func (c TsMrangeWithlabels) AggregationStdP() TsMrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrangeAggregationAggregationStdP)(c) +} + +func (c TsMrangeWithlabels) AggregationStdS() TsMrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrangeAggregationAggregationStdS)(c) +} + +func (c TsMrangeWithlabels) AggregationVarP() TsMrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrangeAggregationAggregationVarP)(c) +} + +func (c TsMrangeWithlabels) AggregationVarS() TsMrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrangeAggregationAggregationVarS)(c) +} + +func (c TsMrangeWithlabels) AggregationTwa() TsMrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrangeAggregationAggregationTwa)(c) +} + +func (c TsMrangeWithlabels) Filter(filter ...string) TsMrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrangeFilter)(c) +} + +type TsMrevrange Completed + +func (b Builder) TsMrevrange() (c TsMrevrange) { + c = TsMrevrange{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "TS.MREVRANGE") + return c +} + +func (c TsMrevrange) Fromtimestamp(fromtimestamp int64) TsMrevrangeFromtimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(fromtimestamp, 10)) + return (TsMrevrangeFromtimestamp)(c) +} + +type TsMrevrangeAggregationAggregationAvg Completed + +func (c TsMrevrangeAggregationAggregationAvg) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationCount Completed + +func (c TsMrevrangeAggregationAggregationCount) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationFirst Completed + +func (c TsMrevrangeAggregationAggregationFirst) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationLast Completed + +func (c TsMrevrangeAggregationAggregationLast) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationMax Completed + +func (c TsMrevrangeAggregationAggregationMax) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationMin Completed + +func (c TsMrevrangeAggregationAggregationMin) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationRange Completed + +func (c TsMrevrangeAggregationAggregationRange) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationStdP Completed + +func (c TsMrevrangeAggregationAggregationStdP) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationStdS Completed + +func (c TsMrevrangeAggregationAggregationStdS) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationSum Completed + +func (c TsMrevrangeAggregationAggregationSum) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationTwa Completed + +func (c TsMrevrangeAggregationAggregationTwa) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationVarP Completed + +func (c TsMrevrangeAggregationAggregationVarP) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationAggregationVarS Completed + +func (c TsMrevrangeAggregationAggregationVarS) Bucketduration(bucketduration int64) TsMrevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsMrevrangeAggregationBucketduration)(c) +} + +type TsMrevrangeAggregationBucketduration Completed + +func (c TsMrevrangeAggregationBucketduration) Buckettimestamp(buckettimestamp string) TsMrevrangeAggregationBuckettimestamp { + c.cs.s = append(c.cs.s, "BUCKETTIMESTAMP", buckettimestamp) + return (TsMrevrangeAggregationBuckettimestamp)(c) +} + +func (c TsMrevrangeAggregationBucketduration) Empty() TsMrevrangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsMrevrangeAggregationEmpty)(c) +} + +func (c TsMrevrangeAggregationBucketduration) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeAggregationBuckettimestamp Completed + +func (c TsMrevrangeAggregationBuckettimestamp) Empty() TsMrevrangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsMrevrangeAggregationEmpty)(c) +} + +func (c TsMrevrangeAggregationBuckettimestamp) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeAggregationEmpty Completed + +func (c TsMrevrangeAggregationEmpty) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeAlign Completed + +func (c TsMrevrangeAlign) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeAlign) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeAlign) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeAlign) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeAlign) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeAlign) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeAlign) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeAlign) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeAlign) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeAlign) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeAlign) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeAlign) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeAlign) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeAlign) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeCount Completed + +func (c TsMrevrangeCount) Align(value string) TsMrevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrevrangeAlign)(c) +} + +func (c TsMrevrangeCount) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeCount) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeCount) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeCount) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeCount) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeCount) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeCount) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeCount) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeCount) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeCount) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeCount) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeCount) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeCount) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeCount) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeFilter Completed + +func (c TsMrevrangeFilter) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return c +} + +func (c TsMrevrangeFilter) Groupby(label string, reduce string, reducer string) TsMrevrangeGroupby { + c.cs.s = append(c.cs.s, "GROUPBY", label, reduce, reducer) + return (TsMrevrangeGroupby)(c) +} + +func (c TsMrevrangeFilter) Build() Completed { + return Completed(c) +} + +type TsMrevrangeFilterByTs Completed + +func (c TsMrevrangeFilterByTs) FilterByTs(timestamp ...int64) TsMrevrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c TsMrevrangeFilterByTs) FilterByValue(min float64, max float64) TsMrevrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsMrevrangeFilterByValue)(c) +} + +func (c TsMrevrangeFilterByTs) Withlabels() TsMrevrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrevrangeWithlabels)(c) +} + +func (c TsMrevrangeFilterByTs) SelectedLabels(labels []string) TsMrevrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrevrangeSelectedLabels)(c) +} + +func (c TsMrevrangeFilterByTs) Count(count int64) TsMrevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrevrangeCount)(c) +} + +func (c TsMrevrangeFilterByTs) Align(value string) TsMrevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrevrangeAlign)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeFilterByTs) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeFilterByTs) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeFilterByValue Completed + +func (c TsMrevrangeFilterByValue) Withlabels() TsMrevrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrevrangeWithlabels)(c) +} + +func (c TsMrevrangeFilterByValue) SelectedLabels(labels []string) TsMrevrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrevrangeSelectedLabels)(c) +} + +func (c TsMrevrangeFilterByValue) Count(count int64) TsMrevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrevrangeCount)(c) +} + +func (c TsMrevrangeFilterByValue) Align(value string) TsMrevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrevrangeAlign)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeFilterByValue) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeFilterByValue) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeFromtimestamp Completed + +func (c TsMrevrangeFromtimestamp) Totimestamp(totimestamp int64) TsMrevrangeTotimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(totimestamp, 10)) + return (TsMrevrangeTotimestamp)(c) +} + +type TsMrevrangeGroupby Completed + +func (c TsMrevrangeGroupby) Build() Completed { + return Completed(c) +} + +type TsMrevrangeLatest Completed + +func (c TsMrevrangeLatest) FilterByTs(timestamp ...int64) TsMrevrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsMrevrangeFilterByTs)(c) +} + +func (c TsMrevrangeLatest) FilterByValue(min float64, max float64) TsMrevrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsMrevrangeFilterByValue)(c) +} + +func (c TsMrevrangeLatest) Withlabels() TsMrevrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrevrangeWithlabels)(c) +} + +func (c TsMrevrangeLatest) SelectedLabels(labels []string) TsMrevrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrevrangeSelectedLabels)(c) +} + +func (c TsMrevrangeLatest) Count(count int64) TsMrevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrevrangeCount)(c) +} + +func (c TsMrevrangeLatest) Align(value string) TsMrevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrevrangeAlign)(c) +} + +func (c TsMrevrangeLatest) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeLatest) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeLatest) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeLatest) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeLatest) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeLatest) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeLatest) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeLatest) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeLatest) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeLatest) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeLatest) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeLatest) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeLatest) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeLatest) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeSelectedLabels Completed + +func (c TsMrevrangeSelectedLabels) Count(count int64) TsMrevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrevrangeCount)(c) +} + +func (c TsMrevrangeSelectedLabels) Align(value string) TsMrevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrevrangeAlign)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeSelectedLabels) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeSelectedLabels) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeTotimestamp Completed + +func (c TsMrevrangeTotimestamp) Latest() TsMrevrangeLatest { + c.cs.s = append(c.cs.s, "LATEST") + return (TsMrevrangeLatest)(c) +} + +func (c TsMrevrangeTotimestamp) FilterByTs(timestamp ...int64) TsMrevrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsMrevrangeFilterByTs)(c) +} + +func (c TsMrevrangeTotimestamp) FilterByValue(min float64, max float64) TsMrevrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsMrevrangeFilterByValue)(c) +} + +func (c TsMrevrangeTotimestamp) Withlabels() TsMrevrangeWithlabels { + c.cs.s = append(c.cs.s, "WITHLABELS") + return (TsMrevrangeWithlabels)(c) +} + +func (c TsMrevrangeTotimestamp) SelectedLabels(labels []string) TsMrevrangeSelectedLabels { + c.cs.s = append(c.cs.s, "SELECTED_LABELS") + c.cs.s = append(c.cs.s, labels...) + return (TsMrevrangeSelectedLabels)(c) +} + +func (c TsMrevrangeTotimestamp) Count(count int64) TsMrevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrevrangeCount)(c) +} + +func (c TsMrevrangeTotimestamp) Align(value string) TsMrevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrevrangeAlign)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeTotimestamp) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeTotimestamp) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsMrevrangeWithlabels Completed + +func (c TsMrevrangeWithlabels) Count(count int64) TsMrevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsMrevrangeCount)(c) +} + +func (c TsMrevrangeWithlabels) Align(value string) TsMrevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsMrevrangeAlign)(c) +} + +func (c TsMrevrangeWithlabels) AggregationAvg() TsMrevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsMrevrangeAggregationAggregationAvg)(c) +} + +func (c TsMrevrangeWithlabels) AggregationSum() TsMrevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsMrevrangeAggregationAggregationSum)(c) +} + +func (c TsMrevrangeWithlabels) AggregationMin() TsMrevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsMrevrangeAggregationAggregationMin)(c) +} + +func (c TsMrevrangeWithlabels) AggregationMax() TsMrevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsMrevrangeAggregationAggregationMax)(c) +} + +func (c TsMrevrangeWithlabels) AggregationRange() TsMrevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsMrevrangeAggregationAggregationRange)(c) +} + +func (c TsMrevrangeWithlabels) AggregationCount() TsMrevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsMrevrangeAggregationAggregationCount)(c) +} + +func (c TsMrevrangeWithlabels) AggregationFirst() TsMrevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsMrevrangeAggregationAggregationFirst)(c) +} + +func (c TsMrevrangeWithlabels) AggregationLast() TsMrevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsMrevrangeAggregationAggregationLast)(c) +} + +func (c TsMrevrangeWithlabels) AggregationStdP() TsMrevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsMrevrangeAggregationAggregationStdP)(c) +} + +func (c TsMrevrangeWithlabels) AggregationStdS() TsMrevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsMrevrangeAggregationAggregationStdS)(c) +} + +func (c TsMrevrangeWithlabels) AggregationVarP() TsMrevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsMrevrangeAggregationAggregationVarP)(c) +} + +func (c TsMrevrangeWithlabels) AggregationVarS() TsMrevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsMrevrangeAggregationAggregationVarS)(c) +} + +func (c TsMrevrangeWithlabels) AggregationTwa() TsMrevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsMrevrangeAggregationAggregationTwa)(c) +} + +func (c TsMrevrangeWithlabels) Filter(filter ...string) TsMrevrangeFilter { + c.cs.s = append(c.cs.s, "FILTER") + c.cs.s = append(c.cs.s, filter...) + return (TsMrevrangeFilter)(c) +} + +type TsQueryindex Completed + +func (b Builder) TsQueryindex() (c TsQueryindex) { + c = TsQueryindex{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TS.QUERYINDEX") + return c +} + +func (c TsQueryindex) Filter(filter ...string) TsQueryindexFilter { + c.cs.s = append(c.cs.s, filter...) + return (TsQueryindexFilter)(c) +} + +type TsQueryindexFilter Completed + +func (c TsQueryindexFilter) Filter(filter ...string) TsQueryindexFilter { + c.cs.s = append(c.cs.s, filter...) + return c +} + +func (c TsQueryindexFilter) Build() Completed { + return Completed(c) +} + +type TsRange Completed + +func (b Builder) TsRange() (c TsRange) { + c = TsRange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TS.RANGE") + return c +} + +func (c TsRange) Key(key string) TsRangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsRangeKey)(c) +} + +type TsRangeAggregationAggregationAvg Completed + +func (c TsRangeAggregationAggregationAvg) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationCount Completed + +func (c TsRangeAggregationAggregationCount) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationFirst Completed + +func (c TsRangeAggregationAggregationFirst) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationLast Completed + +func (c TsRangeAggregationAggregationLast) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationMax Completed + +func (c TsRangeAggregationAggregationMax) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationMin Completed + +func (c TsRangeAggregationAggregationMin) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationRange Completed + +func (c TsRangeAggregationAggregationRange) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationStdP Completed + +func (c TsRangeAggregationAggregationStdP) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationStdS Completed + +func (c TsRangeAggregationAggregationStdS) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationSum Completed + +func (c TsRangeAggregationAggregationSum) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationTwa Completed + +func (c TsRangeAggregationAggregationTwa) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationVarP Completed + +func (c TsRangeAggregationAggregationVarP) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationAggregationVarS Completed + +func (c TsRangeAggregationAggregationVarS) Bucketduration(bucketduration int64) TsRangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRangeAggregationBucketduration)(c) +} + +type TsRangeAggregationBucketduration Completed + +func (c TsRangeAggregationBucketduration) Buckettimestamp(buckettimestamp string) TsRangeAggregationBuckettimestamp { + c.cs.s = append(c.cs.s, "BUCKETTIMESTAMP", buckettimestamp) + return (TsRangeAggregationBuckettimestamp)(c) +} + +func (c TsRangeAggregationBucketduration) Empty() TsRangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsRangeAggregationEmpty)(c) +} + +func (c TsRangeAggregationBucketduration) Build() Completed { + return Completed(c) +} + +type TsRangeAggregationBuckettimestamp Completed + +func (c TsRangeAggregationBuckettimestamp) Empty() TsRangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsRangeAggregationEmpty)(c) +} + +func (c TsRangeAggregationBuckettimestamp) Build() Completed { + return Completed(c) +} + +type TsRangeAggregationEmpty Completed + +func (c TsRangeAggregationEmpty) Build() Completed { + return Completed(c) +} + +type TsRangeAlign Completed + +func (c TsRangeAlign) AggregationAvg() TsRangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRangeAggregationAggregationAvg)(c) +} + +func (c TsRangeAlign) AggregationSum() TsRangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRangeAggregationAggregationSum)(c) +} + +func (c TsRangeAlign) AggregationMin() TsRangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRangeAggregationAggregationMin)(c) +} + +func (c TsRangeAlign) AggregationMax() TsRangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRangeAggregationAggregationMax)(c) +} + +func (c TsRangeAlign) AggregationRange() TsRangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRangeAggregationAggregationRange)(c) +} + +func (c TsRangeAlign) AggregationCount() TsRangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRangeAggregationAggregationCount)(c) +} + +func (c TsRangeAlign) AggregationFirst() TsRangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRangeAggregationAggregationFirst)(c) +} + +func (c TsRangeAlign) AggregationLast() TsRangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRangeAggregationAggregationLast)(c) +} + +func (c TsRangeAlign) AggregationStdP() TsRangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRangeAggregationAggregationStdP)(c) +} + +func (c TsRangeAlign) AggregationStdS() TsRangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRangeAggregationAggregationStdS)(c) +} + +func (c TsRangeAlign) AggregationVarP() TsRangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRangeAggregationAggregationVarP)(c) +} + +func (c TsRangeAlign) AggregationVarS() TsRangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRangeAggregationAggregationVarS)(c) +} + +func (c TsRangeAlign) AggregationTwa() TsRangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRangeAggregationAggregationTwa)(c) +} + +func (c TsRangeAlign) Build() Completed { + return Completed(c) +} + +type TsRangeCount Completed + +func (c TsRangeCount) Align(value string) TsRangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRangeAlign)(c) +} + +func (c TsRangeCount) AggregationAvg() TsRangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRangeAggregationAggregationAvg)(c) +} + +func (c TsRangeCount) AggregationSum() TsRangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRangeAggregationAggregationSum)(c) +} + +func (c TsRangeCount) AggregationMin() TsRangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRangeAggregationAggregationMin)(c) +} + +func (c TsRangeCount) AggregationMax() TsRangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRangeAggregationAggregationMax)(c) +} + +func (c TsRangeCount) AggregationRange() TsRangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRangeAggregationAggregationRange)(c) +} + +func (c TsRangeCount) AggregationCount() TsRangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRangeAggregationAggregationCount)(c) +} + +func (c TsRangeCount) AggregationFirst() TsRangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRangeAggregationAggregationFirst)(c) +} + +func (c TsRangeCount) AggregationLast() TsRangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRangeAggregationAggregationLast)(c) +} + +func (c TsRangeCount) AggregationStdP() TsRangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRangeAggregationAggregationStdP)(c) +} + +func (c TsRangeCount) AggregationStdS() TsRangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRangeAggregationAggregationStdS)(c) +} + +func (c TsRangeCount) AggregationVarP() TsRangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRangeAggregationAggregationVarP)(c) +} + +func (c TsRangeCount) AggregationVarS() TsRangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRangeAggregationAggregationVarS)(c) +} + +func (c TsRangeCount) AggregationTwa() TsRangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRangeAggregationAggregationTwa)(c) +} + +func (c TsRangeCount) Build() Completed { + return Completed(c) +} + +type TsRangeFilterByTs Completed + +func (c TsRangeFilterByTs) FilterByTs(timestamp ...int64) TsRangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c TsRangeFilterByTs) FilterByValue(min float64, max float64) TsRangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsRangeFilterByValue)(c) +} + +func (c TsRangeFilterByTs) Count(count int64) TsRangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRangeCount)(c) +} + +func (c TsRangeFilterByTs) Align(value string) TsRangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRangeAlign)(c) +} + +func (c TsRangeFilterByTs) AggregationAvg() TsRangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRangeAggregationAggregationAvg)(c) +} + +func (c TsRangeFilterByTs) AggregationSum() TsRangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRangeAggregationAggregationSum)(c) +} + +func (c TsRangeFilterByTs) AggregationMin() TsRangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRangeAggregationAggregationMin)(c) +} + +func (c TsRangeFilterByTs) AggregationMax() TsRangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRangeAggregationAggregationMax)(c) +} + +func (c TsRangeFilterByTs) AggregationRange() TsRangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRangeAggregationAggregationRange)(c) +} + +func (c TsRangeFilterByTs) AggregationCount() TsRangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRangeAggregationAggregationCount)(c) +} + +func (c TsRangeFilterByTs) AggregationFirst() TsRangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRangeAggregationAggregationFirst)(c) +} + +func (c TsRangeFilterByTs) AggregationLast() TsRangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRangeAggregationAggregationLast)(c) +} + +func (c TsRangeFilterByTs) AggregationStdP() TsRangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRangeAggregationAggregationStdP)(c) +} + +func (c TsRangeFilterByTs) AggregationStdS() TsRangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRangeAggregationAggregationStdS)(c) +} + +func (c TsRangeFilterByTs) AggregationVarP() TsRangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRangeAggregationAggregationVarP)(c) +} + +func (c TsRangeFilterByTs) AggregationVarS() TsRangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRangeAggregationAggregationVarS)(c) +} + +func (c TsRangeFilterByTs) AggregationTwa() TsRangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRangeAggregationAggregationTwa)(c) +} + +func (c TsRangeFilterByTs) Build() Completed { + return Completed(c) +} + +type TsRangeFilterByValue Completed + +func (c TsRangeFilterByValue) Count(count int64) TsRangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRangeCount)(c) +} + +func (c TsRangeFilterByValue) Align(value string) TsRangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRangeAlign)(c) +} + +func (c TsRangeFilterByValue) AggregationAvg() TsRangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRangeAggregationAggregationAvg)(c) +} + +func (c TsRangeFilterByValue) AggregationSum() TsRangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRangeAggregationAggregationSum)(c) +} + +func (c TsRangeFilterByValue) AggregationMin() TsRangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRangeAggregationAggregationMin)(c) +} + +func (c TsRangeFilterByValue) AggregationMax() TsRangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRangeAggregationAggregationMax)(c) +} + +func (c TsRangeFilterByValue) AggregationRange() TsRangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRangeAggregationAggregationRange)(c) +} + +func (c TsRangeFilterByValue) AggregationCount() TsRangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRangeAggregationAggregationCount)(c) +} + +func (c TsRangeFilterByValue) AggregationFirst() TsRangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRangeAggregationAggregationFirst)(c) +} + +func (c TsRangeFilterByValue) AggregationLast() TsRangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRangeAggregationAggregationLast)(c) +} + +func (c TsRangeFilterByValue) AggregationStdP() TsRangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRangeAggregationAggregationStdP)(c) +} + +func (c TsRangeFilterByValue) AggregationStdS() TsRangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRangeAggregationAggregationStdS)(c) +} + +func (c TsRangeFilterByValue) AggregationVarP() TsRangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRangeAggregationAggregationVarP)(c) +} + +func (c TsRangeFilterByValue) AggregationVarS() TsRangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRangeAggregationAggregationVarS)(c) +} + +func (c TsRangeFilterByValue) AggregationTwa() TsRangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRangeAggregationAggregationTwa)(c) +} + +func (c TsRangeFilterByValue) Build() Completed { + return Completed(c) +} + +type TsRangeFromtimestamp Completed + +func (c TsRangeFromtimestamp) Totimestamp(totimestamp int64) TsRangeTotimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(totimestamp, 10)) + return (TsRangeTotimestamp)(c) +} + +type TsRangeKey Completed + +func (c TsRangeKey) Fromtimestamp(fromtimestamp int64) TsRangeFromtimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(fromtimestamp, 10)) + return (TsRangeFromtimestamp)(c) +} + +type TsRangeLatest Completed + +func (c TsRangeLatest) FilterByTs(timestamp ...int64) TsRangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsRangeFilterByTs)(c) +} + +func (c TsRangeLatest) FilterByValue(min float64, max float64) TsRangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsRangeFilterByValue)(c) +} + +func (c TsRangeLatest) Count(count int64) TsRangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRangeCount)(c) +} + +func (c TsRangeLatest) Align(value string) TsRangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRangeAlign)(c) +} + +func (c TsRangeLatest) AggregationAvg() TsRangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRangeAggregationAggregationAvg)(c) +} + +func (c TsRangeLatest) AggregationSum() TsRangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRangeAggregationAggregationSum)(c) +} + +func (c TsRangeLatest) AggregationMin() TsRangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRangeAggregationAggregationMin)(c) +} + +func (c TsRangeLatest) AggregationMax() TsRangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRangeAggregationAggregationMax)(c) +} + +func (c TsRangeLatest) AggregationRange() TsRangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRangeAggregationAggregationRange)(c) +} + +func (c TsRangeLatest) AggregationCount() TsRangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRangeAggregationAggregationCount)(c) +} + +func (c TsRangeLatest) AggregationFirst() TsRangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRangeAggregationAggregationFirst)(c) +} + +func (c TsRangeLatest) AggregationLast() TsRangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRangeAggregationAggregationLast)(c) +} + +func (c TsRangeLatest) AggregationStdP() TsRangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRangeAggregationAggregationStdP)(c) +} + +func (c TsRangeLatest) AggregationStdS() TsRangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRangeAggregationAggregationStdS)(c) +} + +func (c TsRangeLatest) AggregationVarP() TsRangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRangeAggregationAggregationVarP)(c) +} + +func (c TsRangeLatest) AggregationVarS() TsRangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRangeAggregationAggregationVarS)(c) +} + +func (c TsRangeLatest) AggregationTwa() TsRangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRangeAggregationAggregationTwa)(c) +} + +func (c TsRangeLatest) Build() Completed { + return Completed(c) +} + +type TsRangeTotimestamp Completed + +func (c TsRangeTotimestamp) Latest() TsRangeLatest { + c.cs.s = append(c.cs.s, "LATEST") + return (TsRangeLatest)(c) +} + +func (c TsRangeTotimestamp) FilterByTs(timestamp ...int64) TsRangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsRangeFilterByTs)(c) +} + +func (c TsRangeTotimestamp) FilterByValue(min float64, max float64) TsRangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsRangeFilterByValue)(c) +} + +func (c TsRangeTotimestamp) Count(count int64) TsRangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRangeCount)(c) +} + +func (c TsRangeTotimestamp) Align(value string) TsRangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRangeAlign)(c) +} + +func (c TsRangeTotimestamp) AggregationAvg() TsRangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRangeAggregationAggregationAvg)(c) +} + +func (c TsRangeTotimestamp) AggregationSum() TsRangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRangeAggregationAggregationSum)(c) +} + +func (c TsRangeTotimestamp) AggregationMin() TsRangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRangeAggregationAggregationMin)(c) +} + +func (c TsRangeTotimestamp) AggregationMax() TsRangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRangeAggregationAggregationMax)(c) +} + +func (c TsRangeTotimestamp) AggregationRange() TsRangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRangeAggregationAggregationRange)(c) +} + +func (c TsRangeTotimestamp) AggregationCount() TsRangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRangeAggregationAggregationCount)(c) +} + +func (c TsRangeTotimestamp) AggregationFirst() TsRangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRangeAggregationAggregationFirst)(c) +} + +func (c TsRangeTotimestamp) AggregationLast() TsRangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRangeAggregationAggregationLast)(c) +} + +func (c TsRangeTotimestamp) AggregationStdP() TsRangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRangeAggregationAggregationStdP)(c) +} + +func (c TsRangeTotimestamp) AggregationStdS() TsRangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRangeAggregationAggregationStdS)(c) +} + +func (c TsRangeTotimestamp) AggregationVarP() TsRangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRangeAggregationAggregationVarP)(c) +} + +func (c TsRangeTotimestamp) AggregationVarS() TsRangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRangeAggregationAggregationVarS)(c) +} + +func (c TsRangeTotimestamp) AggregationTwa() TsRangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRangeAggregationAggregationTwa)(c) +} + +func (c TsRangeTotimestamp) Build() Completed { + return Completed(c) +} + +type TsRevrange Completed + +func (b Builder) TsRevrange() (c TsRevrange) { + c = TsRevrange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TS.REVRANGE") + return c +} + +func (c TsRevrange) Key(key string) TsRevrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TsRevrangeKey)(c) +} + +type TsRevrangeAggregationAggregationAvg Completed + +func (c TsRevrangeAggregationAggregationAvg) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationCount Completed + +func (c TsRevrangeAggregationAggregationCount) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationFirst Completed + +func (c TsRevrangeAggregationAggregationFirst) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationLast Completed + +func (c TsRevrangeAggregationAggregationLast) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationMax Completed + +func (c TsRevrangeAggregationAggregationMax) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationMin Completed + +func (c TsRevrangeAggregationAggregationMin) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationRange Completed + +func (c TsRevrangeAggregationAggregationRange) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationStdP Completed + +func (c TsRevrangeAggregationAggregationStdP) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationStdS Completed + +func (c TsRevrangeAggregationAggregationStdS) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationSum Completed + +func (c TsRevrangeAggregationAggregationSum) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationTwa Completed + +func (c TsRevrangeAggregationAggregationTwa) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationVarP Completed + +func (c TsRevrangeAggregationAggregationVarP) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationAggregationVarS Completed + +func (c TsRevrangeAggregationAggregationVarS) Bucketduration(bucketduration int64) TsRevrangeAggregationBucketduration { + c.cs.s = append(c.cs.s, strconv.FormatInt(bucketduration, 10)) + return (TsRevrangeAggregationBucketduration)(c) +} + +type TsRevrangeAggregationBucketduration Completed + +func (c TsRevrangeAggregationBucketduration) Buckettimestamp(buckettimestamp string) TsRevrangeAggregationBuckettimestamp { + c.cs.s = append(c.cs.s, "BUCKETTIMESTAMP", buckettimestamp) + return (TsRevrangeAggregationBuckettimestamp)(c) +} + +func (c TsRevrangeAggregationBucketduration) Empty() TsRevrangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsRevrangeAggregationEmpty)(c) +} + +func (c TsRevrangeAggregationBucketduration) Build() Completed { + return Completed(c) +} + +type TsRevrangeAggregationBuckettimestamp Completed + +func (c TsRevrangeAggregationBuckettimestamp) Empty() TsRevrangeAggregationEmpty { + c.cs.s = append(c.cs.s, "EMPTY") + return (TsRevrangeAggregationEmpty)(c) +} + +func (c TsRevrangeAggregationBuckettimestamp) Build() Completed { + return Completed(c) +} + +type TsRevrangeAggregationEmpty Completed + +func (c TsRevrangeAggregationEmpty) Build() Completed { + return Completed(c) +} + +type TsRevrangeAlign Completed + +func (c TsRevrangeAlign) AggregationAvg() TsRevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRevrangeAggregationAggregationAvg)(c) +} + +func (c TsRevrangeAlign) AggregationSum() TsRevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRevrangeAggregationAggregationSum)(c) +} + +func (c TsRevrangeAlign) AggregationMin() TsRevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRevrangeAggregationAggregationMin)(c) +} + +func (c TsRevrangeAlign) AggregationMax() TsRevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRevrangeAggregationAggregationMax)(c) +} + +func (c TsRevrangeAlign) AggregationRange() TsRevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRevrangeAggregationAggregationRange)(c) +} + +func (c TsRevrangeAlign) AggregationCount() TsRevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRevrangeAggregationAggregationCount)(c) +} + +func (c TsRevrangeAlign) AggregationFirst() TsRevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRevrangeAggregationAggregationFirst)(c) +} + +func (c TsRevrangeAlign) AggregationLast() TsRevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRevrangeAggregationAggregationLast)(c) +} + +func (c TsRevrangeAlign) AggregationStdP() TsRevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRevrangeAggregationAggregationStdP)(c) +} + +func (c TsRevrangeAlign) AggregationStdS() TsRevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRevrangeAggregationAggregationStdS)(c) +} + +func (c TsRevrangeAlign) AggregationVarP() TsRevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRevrangeAggregationAggregationVarP)(c) +} + +func (c TsRevrangeAlign) AggregationVarS() TsRevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRevrangeAggregationAggregationVarS)(c) +} + +func (c TsRevrangeAlign) AggregationTwa() TsRevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRevrangeAggregationAggregationTwa)(c) +} + +func (c TsRevrangeAlign) Build() Completed { + return Completed(c) +} + +type TsRevrangeCount Completed + +func (c TsRevrangeCount) Align(value string) TsRevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRevrangeAlign)(c) +} + +func (c TsRevrangeCount) AggregationAvg() TsRevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRevrangeAggregationAggregationAvg)(c) +} + +func (c TsRevrangeCount) AggregationSum() TsRevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRevrangeAggregationAggregationSum)(c) +} + +func (c TsRevrangeCount) AggregationMin() TsRevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRevrangeAggregationAggregationMin)(c) +} + +func (c TsRevrangeCount) AggregationMax() TsRevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRevrangeAggregationAggregationMax)(c) +} + +func (c TsRevrangeCount) AggregationRange() TsRevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRevrangeAggregationAggregationRange)(c) +} + +func (c TsRevrangeCount) AggregationCount() TsRevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRevrangeAggregationAggregationCount)(c) +} + +func (c TsRevrangeCount) AggregationFirst() TsRevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRevrangeAggregationAggregationFirst)(c) +} + +func (c TsRevrangeCount) AggregationLast() TsRevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRevrangeAggregationAggregationLast)(c) +} + +func (c TsRevrangeCount) AggregationStdP() TsRevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRevrangeAggregationAggregationStdP)(c) +} + +func (c TsRevrangeCount) AggregationStdS() TsRevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRevrangeAggregationAggregationStdS)(c) +} + +func (c TsRevrangeCount) AggregationVarP() TsRevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRevrangeAggregationAggregationVarP)(c) +} + +func (c TsRevrangeCount) AggregationVarS() TsRevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRevrangeAggregationAggregationVarS)(c) +} + +func (c TsRevrangeCount) AggregationTwa() TsRevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRevrangeAggregationAggregationTwa)(c) +} + +func (c TsRevrangeCount) Build() Completed { + return Completed(c) +} + +type TsRevrangeFilterByTs Completed + +func (c TsRevrangeFilterByTs) FilterByTs(timestamp ...int64) TsRevrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c TsRevrangeFilterByTs) FilterByValue(min float64, max float64) TsRevrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsRevrangeFilterByValue)(c) +} + +func (c TsRevrangeFilterByTs) Count(count int64) TsRevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRevrangeCount)(c) +} + +func (c TsRevrangeFilterByTs) Align(value string) TsRevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRevrangeAlign)(c) +} + +func (c TsRevrangeFilterByTs) AggregationAvg() TsRevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRevrangeAggregationAggregationAvg)(c) +} + +func (c TsRevrangeFilterByTs) AggregationSum() TsRevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRevrangeAggregationAggregationSum)(c) +} + +func (c TsRevrangeFilterByTs) AggregationMin() TsRevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRevrangeAggregationAggregationMin)(c) +} + +func (c TsRevrangeFilterByTs) AggregationMax() TsRevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRevrangeAggregationAggregationMax)(c) +} + +func (c TsRevrangeFilterByTs) AggregationRange() TsRevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRevrangeAggregationAggregationRange)(c) +} + +func (c TsRevrangeFilterByTs) AggregationCount() TsRevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRevrangeAggregationAggregationCount)(c) +} + +func (c TsRevrangeFilterByTs) AggregationFirst() TsRevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRevrangeAggregationAggregationFirst)(c) +} + +func (c TsRevrangeFilterByTs) AggregationLast() TsRevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRevrangeAggregationAggregationLast)(c) +} + +func (c TsRevrangeFilterByTs) AggregationStdP() TsRevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRevrangeAggregationAggregationStdP)(c) +} + +func (c TsRevrangeFilterByTs) AggregationStdS() TsRevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRevrangeAggregationAggregationStdS)(c) +} + +func (c TsRevrangeFilterByTs) AggregationVarP() TsRevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRevrangeAggregationAggregationVarP)(c) +} + +func (c TsRevrangeFilterByTs) AggregationVarS() TsRevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRevrangeAggregationAggregationVarS)(c) +} + +func (c TsRevrangeFilterByTs) AggregationTwa() TsRevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRevrangeAggregationAggregationTwa)(c) +} + +func (c TsRevrangeFilterByTs) Build() Completed { + return Completed(c) +} + +type TsRevrangeFilterByValue Completed + +func (c TsRevrangeFilterByValue) Count(count int64) TsRevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRevrangeCount)(c) +} + +func (c TsRevrangeFilterByValue) Align(value string) TsRevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRevrangeAlign)(c) +} + +func (c TsRevrangeFilterByValue) AggregationAvg() TsRevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRevrangeAggregationAggregationAvg)(c) +} + +func (c TsRevrangeFilterByValue) AggregationSum() TsRevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRevrangeAggregationAggregationSum)(c) +} + +func (c TsRevrangeFilterByValue) AggregationMin() TsRevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRevrangeAggregationAggregationMin)(c) +} + +func (c TsRevrangeFilterByValue) AggregationMax() TsRevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRevrangeAggregationAggregationMax)(c) +} + +func (c TsRevrangeFilterByValue) AggregationRange() TsRevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRevrangeAggregationAggregationRange)(c) +} + +func (c TsRevrangeFilterByValue) AggregationCount() TsRevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRevrangeAggregationAggregationCount)(c) +} + +func (c TsRevrangeFilterByValue) AggregationFirst() TsRevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRevrangeAggregationAggregationFirst)(c) +} + +func (c TsRevrangeFilterByValue) AggregationLast() TsRevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRevrangeAggregationAggregationLast)(c) +} + +func (c TsRevrangeFilterByValue) AggregationStdP() TsRevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRevrangeAggregationAggregationStdP)(c) +} + +func (c TsRevrangeFilterByValue) AggregationStdS() TsRevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRevrangeAggregationAggregationStdS)(c) +} + +func (c TsRevrangeFilterByValue) AggregationVarP() TsRevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRevrangeAggregationAggregationVarP)(c) +} + +func (c TsRevrangeFilterByValue) AggregationVarS() TsRevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRevrangeAggregationAggregationVarS)(c) +} + +func (c TsRevrangeFilterByValue) AggregationTwa() TsRevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRevrangeAggregationAggregationTwa)(c) +} + +func (c TsRevrangeFilterByValue) Build() Completed { + return Completed(c) +} + +type TsRevrangeFromtimestamp Completed + +func (c TsRevrangeFromtimestamp) Totimestamp(totimestamp int64) TsRevrangeTotimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(totimestamp, 10)) + return (TsRevrangeTotimestamp)(c) +} + +type TsRevrangeKey Completed + +func (c TsRevrangeKey) Fromtimestamp(fromtimestamp int64) TsRevrangeFromtimestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(fromtimestamp, 10)) + return (TsRevrangeFromtimestamp)(c) +} + +type TsRevrangeLatest Completed + +func (c TsRevrangeLatest) FilterByTs(timestamp ...int64) TsRevrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsRevrangeFilterByTs)(c) +} + +func (c TsRevrangeLatest) FilterByValue(min float64, max float64) TsRevrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsRevrangeFilterByValue)(c) +} + +func (c TsRevrangeLatest) Count(count int64) TsRevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRevrangeCount)(c) +} + +func (c TsRevrangeLatest) Align(value string) TsRevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRevrangeAlign)(c) +} + +func (c TsRevrangeLatest) AggregationAvg() TsRevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRevrangeAggregationAggregationAvg)(c) +} + +func (c TsRevrangeLatest) AggregationSum() TsRevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRevrangeAggregationAggregationSum)(c) +} + +func (c TsRevrangeLatest) AggregationMin() TsRevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRevrangeAggregationAggregationMin)(c) +} + +func (c TsRevrangeLatest) AggregationMax() TsRevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRevrangeAggregationAggregationMax)(c) +} + +func (c TsRevrangeLatest) AggregationRange() TsRevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRevrangeAggregationAggregationRange)(c) +} + +func (c TsRevrangeLatest) AggregationCount() TsRevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRevrangeAggregationAggregationCount)(c) +} + +func (c TsRevrangeLatest) AggregationFirst() TsRevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRevrangeAggregationAggregationFirst)(c) +} + +func (c TsRevrangeLatest) AggregationLast() TsRevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRevrangeAggregationAggregationLast)(c) +} + +func (c TsRevrangeLatest) AggregationStdP() TsRevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRevrangeAggregationAggregationStdP)(c) +} + +func (c TsRevrangeLatest) AggregationStdS() TsRevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRevrangeAggregationAggregationStdS)(c) +} + +func (c TsRevrangeLatest) AggregationVarP() TsRevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRevrangeAggregationAggregationVarP)(c) +} + +func (c TsRevrangeLatest) AggregationVarS() TsRevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRevrangeAggregationAggregationVarS)(c) +} + +func (c TsRevrangeLatest) AggregationTwa() TsRevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRevrangeAggregationAggregationTwa)(c) +} + +func (c TsRevrangeLatest) Build() Completed { + return Completed(c) +} + +type TsRevrangeTotimestamp Completed + +func (c TsRevrangeTotimestamp) Latest() TsRevrangeLatest { + c.cs.s = append(c.cs.s, "LATEST") + return (TsRevrangeLatest)(c) +} + +func (c TsRevrangeTotimestamp) FilterByTs(timestamp ...int64) TsRevrangeFilterByTs { + c.cs.s = append(c.cs.s, "FILTER_BY_TS") + for _, n := range timestamp { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (TsRevrangeFilterByTs)(c) +} + +func (c TsRevrangeTotimestamp) FilterByValue(min float64, max float64) TsRevrangeFilterByValue { + c.cs.s = append(c.cs.s, "FILTER_BY_VALUE", strconv.FormatFloat(min, 'f', -1, 64), strconv.FormatFloat(max, 'f', -1, 64)) + return (TsRevrangeFilterByValue)(c) +} + +func (c TsRevrangeTotimestamp) Count(count int64) TsRevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (TsRevrangeCount)(c) +} + +func (c TsRevrangeTotimestamp) Align(value string) TsRevrangeAlign { + c.cs.s = append(c.cs.s, "ALIGN", value) + return (TsRevrangeAlign)(c) +} + +func (c TsRevrangeTotimestamp) AggregationAvg() TsRevrangeAggregationAggregationAvg { + c.cs.s = append(c.cs.s, "AGGREGATION", "AVG") + return (TsRevrangeAggregationAggregationAvg)(c) +} + +func (c TsRevrangeTotimestamp) AggregationSum() TsRevrangeAggregationAggregationSum { + c.cs.s = append(c.cs.s, "AGGREGATION", "SUM") + return (TsRevrangeAggregationAggregationSum)(c) +} + +func (c TsRevrangeTotimestamp) AggregationMin() TsRevrangeAggregationAggregationMin { + c.cs.s = append(c.cs.s, "AGGREGATION", "MIN") + return (TsRevrangeAggregationAggregationMin)(c) +} + +func (c TsRevrangeTotimestamp) AggregationMax() TsRevrangeAggregationAggregationMax { + c.cs.s = append(c.cs.s, "AGGREGATION", "MAX") + return (TsRevrangeAggregationAggregationMax)(c) +} + +func (c TsRevrangeTotimestamp) AggregationRange() TsRevrangeAggregationAggregationRange { + c.cs.s = append(c.cs.s, "AGGREGATION", "RANGE") + return (TsRevrangeAggregationAggregationRange)(c) +} + +func (c TsRevrangeTotimestamp) AggregationCount() TsRevrangeAggregationAggregationCount { + c.cs.s = append(c.cs.s, "AGGREGATION", "COUNT") + return (TsRevrangeAggregationAggregationCount)(c) +} + +func (c TsRevrangeTotimestamp) AggregationFirst() TsRevrangeAggregationAggregationFirst { + c.cs.s = append(c.cs.s, "AGGREGATION", "FIRST") + return (TsRevrangeAggregationAggregationFirst)(c) +} + +func (c TsRevrangeTotimestamp) AggregationLast() TsRevrangeAggregationAggregationLast { + c.cs.s = append(c.cs.s, "AGGREGATION", "LAST") + return (TsRevrangeAggregationAggregationLast)(c) +} + +func (c TsRevrangeTotimestamp) AggregationStdP() TsRevrangeAggregationAggregationStdP { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.P") + return (TsRevrangeAggregationAggregationStdP)(c) +} + +func (c TsRevrangeTotimestamp) AggregationStdS() TsRevrangeAggregationAggregationStdS { + c.cs.s = append(c.cs.s, "AGGREGATION", "STD.S") + return (TsRevrangeAggregationAggregationStdS)(c) +} + +func (c TsRevrangeTotimestamp) AggregationVarP() TsRevrangeAggregationAggregationVarP { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.P") + return (TsRevrangeAggregationAggregationVarP)(c) +} + +func (c TsRevrangeTotimestamp) AggregationVarS() TsRevrangeAggregationAggregationVarS { + c.cs.s = append(c.cs.s, "AGGREGATION", "VAR.S") + return (TsRevrangeAggregationAggregationVarS)(c) +} + +func (c TsRevrangeTotimestamp) AggregationTwa() TsRevrangeAggregationAggregationTwa { + c.cs.s = append(c.cs.s, "AGGREGATION", "TWA") + return (TsRevrangeAggregationAggregationTwa)(c) +} + +func (c TsRevrangeTotimestamp) Build() Completed { + return Completed(c) +} + +type Ttl Completed + +func (b Builder) Ttl() (c Ttl) { + c = Ttl{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TTL") + return c +} + +func (c Ttl) Key(key string) TtlKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TtlKey)(c) +} + +type TtlKey Completed + +func (c TtlKey) Build() Completed { + return Completed(c) +} + +func (c TtlKey) Cache() Cacheable { + return Cacheable(c) +} + +type Type Completed + +func (b Builder) Type() (c Type) { + c = Type{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "TYPE") + return c +} + +func (c Type) Key(key string) TypeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (TypeKey)(c) +} + +type TypeKey Completed + +func (c TypeKey) Build() Completed { + return Completed(c) +} + +func (c TypeKey) Cache() Cacheable { + return Cacheable(c) +} + +type Unlink Completed + +func (b Builder) Unlink() (c Unlink) { + c = Unlink{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "UNLINK") + return c +} + +func (c Unlink) Key(key ...string) UnlinkKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (UnlinkKey)(c) +} + +type UnlinkKey Completed + +func (c UnlinkKey) Key(key ...string) UnlinkKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c UnlinkKey) Build() Completed { + return Completed(c) +} + +type Unsubscribe Completed + +func (b Builder) Unsubscribe() (c Unsubscribe) { + c = Unsubscribe{cs: get(), ks: b.ks, cf: noRetTag} + c.cs.s = append(c.cs.s, "UNSUBSCRIBE") + return c +} + +func (c Unsubscribe) Channel(channel ...string) UnsubscribeChannel { + c.cs.s = append(c.cs.s, channel...) + return (UnsubscribeChannel)(c) +} + +func (c Unsubscribe) Build() Completed { + return Completed(c) +} + +type UnsubscribeChannel Completed + +func (c UnsubscribeChannel) Channel(channel ...string) UnsubscribeChannel { + c.cs.s = append(c.cs.s, channel...) + return c +} + +func (c UnsubscribeChannel) Build() Completed { + return Completed(c) +} + +type Unwatch Completed + +func (b Builder) Unwatch() (c Unwatch) { + c = Unwatch{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "UNWATCH") + return c +} + +func (c Unwatch) Build() Completed { + return Completed(c) +} + +type Wait Completed + +func (b Builder) Wait() (c Wait) { + c = Wait{cs: get(), ks: b.ks, cf: blockTag} + c.cs.s = append(c.cs.s, "WAIT") + return c +} + +func (c Wait) Numreplicas(numreplicas int64) WaitNumreplicas { + c.cs.s = append(c.cs.s, strconv.FormatInt(numreplicas, 10)) + return (WaitNumreplicas)(c) +} + +type WaitNumreplicas Completed + +func (c WaitNumreplicas) Timeout(timeout int64) WaitTimeout { + c.cs.s = append(c.cs.s, strconv.FormatInt(timeout, 10)) + return (WaitTimeout)(c) +} + +type WaitTimeout Completed + +func (c WaitTimeout) Build() Completed { + return Completed(c) +} + +type Watch Completed + +func (b Builder) Watch() (c Watch) { + c = Watch{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "WATCH") + return c +} + +func (c Watch) Key(key ...string) WatchKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (WatchKey)(c) +} + +type WatchKey Completed + +func (c WatchKey) Key(key ...string) WatchKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c WatchKey) Build() Completed { + return Completed(c) +} + +type Xack Completed + +func (b Builder) Xack() (c Xack) { + c = Xack{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XACK") + return c +} + +func (c Xack) Key(key string) XackKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XackKey)(c) +} + +type XackGroup Completed + +func (c XackGroup) Id(id ...string) XackId { + c.cs.s = append(c.cs.s, id...) + return (XackId)(c) +} + +type XackId Completed + +func (c XackId) Id(id ...string) XackId { + c.cs.s = append(c.cs.s, id...) + return c +} + +func (c XackId) Build() Completed { + return Completed(c) +} + +type XackKey Completed + +func (c XackKey) Group(group string) XackGroup { + c.cs.s = append(c.cs.s, group) + return (XackGroup)(c) +} + +type Xadd Completed + +func (b Builder) Xadd() (c Xadd) { + c = Xadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XADD") + return c +} + +func (c Xadd) Key(key string) XaddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XaddKey)(c) +} + +type XaddFieldValue Completed + +func (c XaddFieldValue) FieldValue(field string, value string) XaddFieldValue { + c.cs.s = append(c.cs.s, field, value) + return c +} + +func (c XaddFieldValue) Build() Completed { + return Completed(c) +} + +type XaddId Completed + +func (c XaddId) FieldValue() XaddFieldValue { + return (XaddFieldValue)(c) +} + +type XaddKey Completed + +func (c XaddKey) Nomkstream() XaddNomkstream { + c.cs.s = append(c.cs.s, "NOMKSTREAM") + return (XaddNomkstream)(c) +} + +func (c XaddKey) Maxlen() XaddTrimStrategyMaxlen { + c.cs.s = append(c.cs.s, "MAXLEN") + return (XaddTrimStrategyMaxlen)(c) +} + +func (c XaddKey) Minid() XaddTrimStrategyMinid { + c.cs.s = append(c.cs.s, "MINID") + return (XaddTrimStrategyMinid)(c) +} + +func (c XaddKey) Id(id string) XaddId { + c.cs.s = append(c.cs.s, id) + return (XaddId)(c) +} + +type XaddNomkstream Completed + +func (c XaddNomkstream) Maxlen() XaddTrimStrategyMaxlen { + c.cs.s = append(c.cs.s, "MAXLEN") + return (XaddTrimStrategyMaxlen)(c) +} + +func (c XaddNomkstream) Minid() XaddTrimStrategyMinid { + c.cs.s = append(c.cs.s, "MINID") + return (XaddTrimStrategyMinid)(c) +} + +func (c XaddNomkstream) Id(id string) XaddId { + c.cs.s = append(c.cs.s, id) + return (XaddId)(c) +} + +type XaddTrimLimit Completed + +func (c XaddTrimLimit) Id(id string) XaddId { + c.cs.s = append(c.cs.s, id) + return (XaddId)(c) +} + +type XaddTrimOperatorAlmost Completed + +func (c XaddTrimOperatorAlmost) Threshold(threshold string) XaddTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XaddTrimThreshold)(c) +} + +type XaddTrimOperatorExact Completed + +func (c XaddTrimOperatorExact) Threshold(threshold string) XaddTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XaddTrimThreshold)(c) +} + +type XaddTrimStrategyMaxlen Completed + +func (c XaddTrimStrategyMaxlen) Exact() XaddTrimOperatorExact { + c.cs.s = append(c.cs.s, "=") + return (XaddTrimOperatorExact)(c) +} + +func (c XaddTrimStrategyMaxlen) Almost() XaddTrimOperatorAlmost { + c.cs.s = append(c.cs.s, "~") + return (XaddTrimOperatorAlmost)(c) +} + +func (c XaddTrimStrategyMaxlen) Threshold(threshold string) XaddTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XaddTrimThreshold)(c) +} + +type XaddTrimStrategyMinid Completed + +func (c XaddTrimStrategyMinid) Exact() XaddTrimOperatorExact { + c.cs.s = append(c.cs.s, "=") + return (XaddTrimOperatorExact)(c) +} + +func (c XaddTrimStrategyMinid) Almost() XaddTrimOperatorAlmost { + c.cs.s = append(c.cs.s, "~") + return (XaddTrimOperatorAlmost)(c) +} + +func (c XaddTrimStrategyMinid) Threshold(threshold string) XaddTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XaddTrimThreshold)(c) +} + +type XaddTrimThreshold Completed + +func (c XaddTrimThreshold) Limit(count int64) XaddTrimLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(count, 10)) + return (XaddTrimLimit)(c) +} + +func (c XaddTrimThreshold) Id(id string) XaddId { + c.cs.s = append(c.cs.s, id) + return (XaddId)(c) +} + +type Xautoclaim Completed + +func (b Builder) Xautoclaim() (c Xautoclaim) { + c = Xautoclaim{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XAUTOCLAIM") + return c +} + +func (c Xautoclaim) Key(key string) XautoclaimKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XautoclaimKey)(c) +} + +type XautoclaimConsumer Completed + +func (c XautoclaimConsumer) MinIdleTime(minIdleTime string) XautoclaimMinIdleTime { + c.cs.s = append(c.cs.s, minIdleTime) + return (XautoclaimMinIdleTime)(c) +} + +type XautoclaimCount Completed + +func (c XautoclaimCount) Justid() XautoclaimJustid { + c.cs.s = append(c.cs.s, "JUSTID") + return (XautoclaimJustid)(c) +} + +func (c XautoclaimCount) Build() Completed { + return Completed(c) +} + +type XautoclaimGroup Completed + +func (c XautoclaimGroup) Consumer(consumer string) XautoclaimConsumer { + c.cs.s = append(c.cs.s, consumer) + return (XautoclaimConsumer)(c) +} + +type XautoclaimJustid Completed + +func (c XautoclaimJustid) Build() Completed { + return Completed(c) +} + +type XautoclaimKey Completed + +func (c XautoclaimKey) Group(group string) XautoclaimGroup { + c.cs.s = append(c.cs.s, group) + return (XautoclaimGroup)(c) +} + +type XautoclaimMinIdleTime Completed + +func (c XautoclaimMinIdleTime) Start(start string) XautoclaimStart { + c.cs.s = append(c.cs.s, start) + return (XautoclaimStart)(c) +} + +type XautoclaimStart Completed + +func (c XautoclaimStart) Count(count int64) XautoclaimCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (XautoclaimCount)(c) +} + +func (c XautoclaimStart) Justid() XautoclaimJustid { + c.cs.s = append(c.cs.s, "JUSTID") + return (XautoclaimJustid)(c) +} + +func (c XautoclaimStart) Build() Completed { + return Completed(c) +} + +type Xclaim Completed + +func (b Builder) Xclaim() (c Xclaim) { + c = Xclaim{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XCLAIM") + return c +} + +func (c Xclaim) Key(key string) XclaimKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XclaimKey)(c) +} + +type XclaimConsumer Completed + +func (c XclaimConsumer) MinIdleTime(minIdleTime string) XclaimMinIdleTime { + c.cs.s = append(c.cs.s, minIdleTime) + return (XclaimMinIdleTime)(c) +} + +type XclaimForce Completed + +func (c XclaimForce) Justid() XclaimJustid { + c.cs.s = append(c.cs.s, "JUSTID") + return (XclaimJustid)(c) +} + +func (c XclaimForce) Lastid() XclaimLastid { + c.cs.s = append(c.cs.s, "LASTID") + return (XclaimLastid)(c) +} + +func (c XclaimForce) Build() Completed { + return Completed(c) +} + +type XclaimGroup Completed + +func (c XclaimGroup) Consumer(consumer string) XclaimConsumer { + c.cs.s = append(c.cs.s, consumer) + return (XclaimConsumer)(c) +} + +type XclaimId Completed + +func (c XclaimId) Id(id ...string) XclaimId { + c.cs.s = append(c.cs.s, id...) + return c +} + +func (c XclaimId) Idle(ms int64) XclaimIdle { + c.cs.s = append(c.cs.s, "IDLE", strconv.FormatInt(ms, 10)) + return (XclaimIdle)(c) +} + +func (c XclaimId) Time(msUnixTime int64) XclaimTime { + c.cs.s = append(c.cs.s, "TIME", strconv.FormatInt(msUnixTime, 10)) + return (XclaimTime)(c) +} + +func (c XclaimId) Retrycount(count int64) XclaimRetrycount { + c.cs.s = append(c.cs.s, "RETRYCOUNT", strconv.FormatInt(count, 10)) + return (XclaimRetrycount)(c) +} + +func (c XclaimId) Force() XclaimForce { + c.cs.s = append(c.cs.s, "FORCE") + return (XclaimForce)(c) +} + +func (c XclaimId) Justid() XclaimJustid { + c.cs.s = append(c.cs.s, "JUSTID") + return (XclaimJustid)(c) +} + +func (c XclaimId) Lastid() XclaimLastid { + c.cs.s = append(c.cs.s, "LASTID") + return (XclaimLastid)(c) +} + +func (c XclaimId) Build() Completed { + return Completed(c) +} + +type XclaimIdle Completed + +func (c XclaimIdle) Time(msUnixTime int64) XclaimTime { + c.cs.s = append(c.cs.s, "TIME", strconv.FormatInt(msUnixTime, 10)) + return (XclaimTime)(c) +} + +func (c XclaimIdle) Retrycount(count int64) XclaimRetrycount { + c.cs.s = append(c.cs.s, "RETRYCOUNT", strconv.FormatInt(count, 10)) + return (XclaimRetrycount)(c) +} + +func (c XclaimIdle) Force() XclaimForce { + c.cs.s = append(c.cs.s, "FORCE") + return (XclaimForce)(c) +} + +func (c XclaimIdle) Justid() XclaimJustid { + c.cs.s = append(c.cs.s, "JUSTID") + return (XclaimJustid)(c) +} + +func (c XclaimIdle) Lastid() XclaimLastid { + c.cs.s = append(c.cs.s, "LASTID") + return (XclaimLastid)(c) +} + +func (c XclaimIdle) Build() Completed { + return Completed(c) +} + +type XclaimJustid Completed + +func (c XclaimJustid) Lastid() XclaimLastid { + c.cs.s = append(c.cs.s, "LASTID") + return (XclaimLastid)(c) +} + +func (c XclaimJustid) Build() Completed { + return Completed(c) +} + +type XclaimKey Completed + +func (c XclaimKey) Group(group string) XclaimGroup { + c.cs.s = append(c.cs.s, group) + return (XclaimGroup)(c) +} + +type XclaimLastid Completed + +func (c XclaimLastid) Build() Completed { + return Completed(c) +} + +type XclaimMinIdleTime Completed + +func (c XclaimMinIdleTime) Id(id ...string) XclaimId { + c.cs.s = append(c.cs.s, id...) + return (XclaimId)(c) +} + +type XclaimRetrycount Completed + +func (c XclaimRetrycount) Force() XclaimForce { + c.cs.s = append(c.cs.s, "FORCE") + return (XclaimForce)(c) +} + +func (c XclaimRetrycount) Justid() XclaimJustid { + c.cs.s = append(c.cs.s, "JUSTID") + return (XclaimJustid)(c) +} + +func (c XclaimRetrycount) Lastid() XclaimLastid { + c.cs.s = append(c.cs.s, "LASTID") + return (XclaimLastid)(c) +} + +func (c XclaimRetrycount) Build() Completed { + return Completed(c) +} + +type XclaimTime Completed + +func (c XclaimTime) Retrycount(count int64) XclaimRetrycount { + c.cs.s = append(c.cs.s, "RETRYCOUNT", strconv.FormatInt(count, 10)) + return (XclaimRetrycount)(c) +} + +func (c XclaimTime) Force() XclaimForce { + c.cs.s = append(c.cs.s, "FORCE") + return (XclaimForce)(c) +} + +func (c XclaimTime) Justid() XclaimJustid { + c.cs.s = append(c.cs.s, "JUSTID") + return (XclaimJustid)(c) +} + +func (c XclaimTime) Lastid() XclaimLastid { + c.cs.s = append(c.cs.s, "LASTID") + return (XclaimLastid)(c) +} + +func (c XclaimTime) Build() Completed { + return Completed(c) +} + +type Xdel Completed + +func (b Builder) Xdel() (c Xdel) { + c = Xdel{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XDEL") + return c +} + +func (c Xdel) Key(key string) XdelKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XdelKey)(c) +} + +type XdelId Completed + +func (c XdelId) Id(id ...string) XdelId { + c.cs.s = append(c.cs.s, id...) + return c +} + +func (c XdelId) Build() Completed { + return Completed(c) +} + +type XdelKey Completed + +func (c XdelKey) Id(id ...string) XdelId { + c.cs.s = append(c.cs.s, id...) + return (XdelId)(c) +} + +type XgroupCreate Completed + +func (b Builder) XgroupCreate() (c XgroupCreate) { + c = XgroupCreate{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XGROUP", "CREATE") + return c +} + +func (c XgroupCreate) Key(key string) XgroupCreateKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XgroupCreateKey)(c) +} + +type XgroupCreateEntriesread Completed + +func (c XgroupCreateEntriesread) Build() Completed { + return Completed(c) +} + +type XgroupCreateGroupname Completed + +func (c XgroupCreateGroupname) Id(id string) XgroupCreateId { + c.cs.s = append(c.cs.s, id) + return (XgroupCreateId)(c) +} + +type XgroupCreateId Completed + +func (c XgroupCreateId) Mkstream() XgroupCreateMkstream { + c.cs.s = append(c.cs.s, "MKSTREAM") + return (XgroupCreateMkstream)(c) +} + +func (c XgroupCreateId) Entriesread(entriesRead int64) XgroupCreateEntriesread { + c.cs.s = append(c.cs.s, "ENTRIESREAD", strconv.FormatInt(entriesRead, 10)) + return (XgroupCreateEntriesread)(c) +} + +func (c XgroupCreateId) Build() Completed { + return Completed(c) +} + +type XgroupCreateKey Completed + +func (c XgroupCreateKey) Groupname(groupname string) XgroupCreateGroupname { + c.cs.s = append(c.cs.s, groupname) + return (XgroupCreateGroupname)(c) +} + +type XgroupCreateMkstream Completed + +func (c XgroupCreateMkstream) Entriesread(entriesRead int64) XgroupCreateEntriesread { + c.cs.s = append(c.cs.s, "ENTRIESREAD", strconv.FormatInt(entriesRead, 10)) + return (XgroupCreateEntriesread)(c) +} + +func (c XgroupCreateMkstream) Build() Completed { + return Completed(c) +} + +type XgroupCreateconsumer Completed + +func (b Builder) XgroupCreateconsumer() (c XgroupCreateconsumer) { + c = XgroupCreateconsumer{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XGROUP", "CREATECONSUMER") + return c +} + +func (c XgroupCreateconsumer) Key(key string) XgroupCreateconsumerKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XgroupCreateconsumerKey)(c) +} + +type XgroupCreateconsumerConsumername Completed + +func (c XgroupCreateconsumerConsumername) Build() Completed { + return Completed(c) +} + +type XgroupCreateconsumerGroupname Completed + +func (c XgroupCreateconsumerGroupname) Consumername(consumername string) XgroupCreateconsumerConsumername { + c.cs.s = append(c.cs.s, consumername) + return (XgroupCreateconsumerConsumername)(c) +} + +type XgroupCreateconsumerKey Completed + +func (c XgroupCreateconsumerKey) Groupname(groupname string) XgroupCreateconsumerGroupname { + c.cs.s = append(c.cs.s, groupname) + return (XgroupCreateconsumerGroupname)(c) +} + +type XgroupDelconsumer Completed + +func (b Builder) XgroupDelconsumer() (c XgroupDelconsumer) { + c = XgroupDelconsumer{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XGROUP", "DELCONSUMER") + return c +} + +func (c XgroupDelconsumer) Key(key string) XgroupDelconsumerKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XgroupDelconsumerKey)(c) +} + +type XgroupDelconsumerConsumername Completed + +func (c XgroupDelconsumerConsumername) Build() Completed { + return Completed(c) +} + +type XgroupDelconsumerGroupname Completed + +func (c XgroupDelconsumerGroupname) Consumername(consumername string) XgroupDelconsumerConsumername { + c.cs.s = append(c.cs.s, consumername) + return (XgroupDelconsumerConsumername)(c) +} + +type XgroupDelconsumerKey Completed + +func (c XgroupDelconsumerKey) Groupname(groupname string) XgroupDelconsumerGroupname { + c.cs.s = append(c.cs.s, groupname) + return (XgroupDelconsumerGroupname)(c) +} + +type XgroupDestroy Completed + +func (b Builder) XgroupDestroy() (c XgroupDestroy) { + c = XgroupDestroy{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XGROUP", "DESTROY") + return c +} + +func (c XgroupDestroy) Key(key string) XgroupDestroyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XgroupDestroyKey)(c) +} + +type XgroupDestroyGroupname Completed + +func (c XgroupDestroyGroupname) Build() Completed { + return Completed(c) +} + +type XgroupDestroyKey Completed + +func (c XgroupDestroyKey) Groupname(groupname string) XgroupDestroyGroupname { + c.cs.s = append(c.cs.s, groupname) + return (XgroupDestroyGroupname)(c) +} + +type XgroupHelp Completed + +func (b Builder) XgroupHelp() (c XgroupHelp) { + c = XgroupHelp{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XGROUP", "HELP") + return c +} + +func (c XgroupHelp) Build() Completed { + return Completed(c) +} + +type XgroupSetid Completed + +func (b Builder) XgroupSetid() (c XgroupSetid) { + c = XgroupSetid{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XGROUP", "SETID") + return c +} + +func (c XgroupSetid) Key(key string) XgroupSetidKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XgroupSetidKey)(c) +} + +type XgroupSetidEntriesread Completed + +func (c XgroupSetidEntriesread) Build() Completed { + return Completed(c) +} + +type XgroupSetidGroupname Completed + +func (c XgroupSetidGroupname) Id(id string) XgroupSetidId { + c.cs.s = append(c.cs.s, id) + return (XgroupSetidId)(c) +} + +type XgroupSetidId Completed + +func (c XgroupSetidId) Entriesread(entriesRead int64) XgroupSetidEntriesread { + c.cs.s = append(c.cs.s, "ENTRIESREAD", strconv.FormatInt(entriesRead, 10)) + return (XgroupSetidEntriesread)(c) +} + +func (c XgroupSetidId) Build() Completed { + return Completed(c) +} + +type XgroupSetidKey Completed + +func (c XgroupSetidKey) Groupname(groupname string) XgroupSetidGroupname { + c.cs.s = append(c.cs.s, groupname) + return (XgroupSetidGroupname)(c) +} + +type XinfoConsumers Completed + +func (b Builder) XinfoConsumers() (c XinfoConsumers) { + c = XinfoConsumers{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XINFO", "CONSUMERS") + return c +} + +func (c XinfoConsumers) Key(key string) XinfoConsumersKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XinfoConsumersKey)(c) +} + +type XinfoConsumersGroupname Completed + +func (c XinfoConsumersGroupname) Build() Completed { + return Completed(c) +} + +type XinfoConsumersKey Completed + +func (c XinfoConsumersKey) Groupname(groupname string) XinfoConsumersGroupname { + c.cs.s = append(c.cs.s, groupname) + return (XinfoConsumersGroupname)(c) +} + +type XinfoGroups Completed + +func (b Builder) XinfoGroups() (c XinfoGroups) { + c = XinfoGroups{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XINFO", "GROUPS") + return c +} + +func (c XinfoGroups) Key(key string) XinfoGroupsKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XinfoGroupsKey)(c) +} + +type XinfoGroupsKey Completed + +func (c XinfoGroupsKey) Build() Completed { + return Completed(c) +} + +type XinfoHelp Completed + +func (b Builder) XinfoHelp() (c XinfoHelp) { + c = XinfoHelp{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XINFO", "HELP") + return c +} + +func (c XinfoHelp) Build() Completed { + return Completed(c) +} + +type XinfoStream Completed + +func (b Builder) XinfoStream() (c XinfoStream) { + c = XinfoStream{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XINFO", "STREAM") + return c +} + +func (c XinfoStream) Key(key string) XinfoStreamKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XinfoStreamKey)(c) +} + +type XinfoStreamFullCount Completed + +func (c XinfoStreamFullCount) Build() Completed { + return Completed(c) +} + +type XinfoStreamFullFull Completed + +func (c XinfoStreamFullFull) Count(count int64) XinfoStreamFullCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (XinfoStreamFullCount)(c) +} + +func (c XinfoStreamFullFull) Build() Completed { + return Completed(c) +} + +type XinfoStreamKey Completed + +func (c XinfoStreamKey) Full() XinfoStreamFullFull { + c.cs.s = append(c.cs.s, "FULL") + return (XinfoStreamFullFull)(c) +} + +func (c XinfoStreamKey) Build() Completed { + return Completed(c) +} + +type Xlen Completed + +func (b Builder) Xlen() (c Xlen) { + c = Xlen{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XLEN") + return c +} + +func (c Xlen) Key(key string) XlenKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XlenKey)(c) +} + +type XlenKey Completed + +func (c XlenKey) Build() Completed { + return Completed(c) +} + +type Xpending Completed + +func (b Builder) Xpending() (c Xpending) { + c = Xpending{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XPENDING") + return c +} + +func (c Xpending) Key(key string) XpendingKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XpendingKey)(c) +} + +type XpendingFiltersConsumer Completed + +func (c XpendingFiltersConsumer) Build() Completed { + return Completed(c) +} + +type XpendingFiltersCount Completed + +func (c XpendingFiltersCount) Consumer(consumer string) XpendingFiltersConsumer { + c.cs.s = append(c.cs.s, consumer) + return (XpendingFiltersConsumer)(c) +} + +func (c XpendingFiltersCount) Build() Completed { + return Completed(c) +} + +type XpendingFiltersEnd Completed + +func (c XpendingFiltersEnd) Count(count int64) XpendingFiltersCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (XpendingFiltersCount)(c) +} + +type XpendingFiltersIdle Completed + +func (c XpendingFiltersIdle) Start(start string) XpendingFiltersStart { + c.cs.s = append(c.cs.s, start) + return (XpendingFiltersStart)(c) +} + +type XpendingFiltersStart Completed + +func (c XpendingFiltersStart) End(end string) XpendingFiltersEnd { + c.cs.s = append(c.cs.s, end) + return (XpendingFiltersEnd)(c) +} + +type XpendingGroup Completed + +func (c XpendingGroup) Idle(minIdleTime int64) XpendingFiltersIdle { + c.cs.s = append(c.cs.s, "IDLE", strconv.FormatInt(minIdleTime, 10)) + return (XpendingFiltersIdle)(c) +} + +func (c XpendingGroup) Start(start string) XpendingFiltersStart { + c.cs.s = append(c.cs.s, start) + return (XpendingFiltersStart)(c) +} + +func (c XpendingGroup) Build() Completed { + return Completed(c) +} + +type XpendingKey Completed + +func (c XpendingKey) Group(group string) XpendingGroup { + c.cs.s = append(c.cs.s, group) + return (XpendingGroup)(c) +} + +type Xrange Completed + +func (b Builder) Xrange() (c Xrange) { + c = Xrange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XRANGE") + return c +} + +func (c Xrange) Key(key string) XrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XrangeKey)(c) +} + +type XrangeCount Completed + +func (c XrangeCount) Build() Completed { + return Completed(c) +} + +type XrangeEnd Completed + +func (c XrangeEnd) Count(count int64) XrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (XrangeCount)(c) +} + +func (c XrangeEnd) Build() Completed { + return Completed(c) +} + +type XrangeKey Completed + +func (c XrangeKey) Start(start string) XrangeStart { + c.cs.s = append(c.cs.s, start) + return (XrangeStart)(c) +} + +type XrangeStart Completed + +func (c XrangeStart) End(end string) XrangeEnd { + c.cs.s = append(c.cs.s, end) + return (XrangeEnd)(c) +} + +type Xread Completed + +func (b Builder) Xread() (c Xread) { + c = Xread{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XREAD") + return c +} + +func (c Xread) Count(count int64) XreadCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (XreadCount)(c) +} + +func (c Xread) Block(milliseconds int64) XreadBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "BLOCK", strconv.FormatInt(milliseconds, 10)) + return (XreadBlock)(c) +} + +func (c Xread) Streams() XreadStreams { + c.cs.s = append(c.cs.s, "STREAMS") + return (XreadStreams)(c) +} + +type XreadBlock Completed + +func (c XreadBlock) Streams() XreadStreams { + c.cs.s = append(c.cs.s, "STREAMS") + return (XreadStreams)(c) +} + +type XreadCount Completed + +func (c XreadCount) Block(milliseconds int64) XreadBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "BLOCK", strconv.FormatInt(milliseconds, 10)) + return (XreadBlock)(c) +} + +func (c XreadCount) Streams() XreadStreams { + c.cs.s = append(c.cs.s, "STREAMS") + return (XreadStreams)(c) +} + +type XreadId Completed + +func (c XreadId) Id(id ...string) XreadId { + c.cs.s = append(c.cs.s, id...) + return c +} + +func (c XreadId) Build() Completed { + return Completed(c) +} + +type XreadKey Completed + +func (c XreadKey) Key(key ...string) XreadKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c XreadKey) Id(id ...string) XreadId { + c.cs.s = append(c.cs.s, id...) + return (XreadId)(c) +} + +type XreadStreams Completed + +func (c XreadStreams) Key(key ...string) XreadKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (XreadKey)(c) +} + +type Xreadgroup Completed + +func (b Builder) Xreadgroup() (c Xreadgroup) { + c = Xreadgroup{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XREADGROUP") + return c +} + +func (c Xreadgroup) Group(group string, consumer string) XreadgroupGroup { + c.cs.s = append(c.cs.s, "GROUP", group, consumer) + return (XreadgroupGroup)(c) +} + +type XreadgroupBlock Completed + +func (c XreadgroupBlock) Noack() XreadgroupNoack { + c.cs.s = append(c.cs.s, "NOACK") + return (XreadgroupNoack)(c) +} + +func (c XreadgroupBlock) Streams() XreadgroupStreams { + c.cs.s = append(c.cs.s, "STREAMS") + return (XreadgroupStreams)(c) +} + +type XreadgroupCount Completed + +func (c XreadgroupCount) Block(milliseconds int64) XreadgroupBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "BLOCK", strconv.FormatInt(milliseconds, 10)) + return (XreadgroupBlock)(c) +} + +func (c XreadgroupCount) Noack() XreadgroupNoack { + c.cs.s = append(c.cs.s, "NOACK") + return (XreadgroupNoack)(c) +} + +func (c XreadgroupCount) Streams() XreadgroupStreams { + c.cs.s = append(c.cs.s, "STREAMS") + return (XreadgroupStreams)(c) +} + +type XreadgroupGroup Completed + +func (c XreadgroupGroup) Count(count int64) XreadgroupCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (XreadgroupCount)(c) +} + +func (c XreadgroupGroup) Block(milliseconds int64) XreadgroupBlock { + c.cf = blockTag + c.cs.s = append(c.cs.s, "BLOCK", strconv.FormatInt(milliseconds, 10)) + return (XreadgroupBlock)(c) +} + +func (c XreadgroupGroup) Noack() XreadgroupNoack { + c.cs.s = append(c.cs.s, "NOACK") + return (XreadgroupNoack)(c) +} + +func (c XreadgroupGroup) Streams() XreadgroupStreams { + c.cs.s = append(c.cs.s, "STREAMS") + return (XreadgroupStreams)(c) +} + +type XreadgroupId Completed + +func (c XreadgroupId) Id(id ...string) XreadgroupId { + c.cs.s = append(c.cs.s, id...) + return c +} + +func (c XreadgroupId) Build() Completed { + return Completed(c) +} + +type XreadgroupKey Completed + +func (c XreadgroupKey) Key(key ...string) XreadgroupKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c XreadgroupKey) Id(id ...string) XreadgroupId { + c.cs.s = append(c.cs.s, id...) + return (XreadgroupId)(c) +} + +type XreadgroupNoack Completed + +func (c XreadgroupNoack) Streams() XreadgroupStreams { + c.cs.s = append(c.cs.s, "STREAMS") + return (XreadgroupStreams)(c) +} + +type XreadgroupStreams Completed + +func (c XreadgroupStreams) Key(key ...string) XreadgroupKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (XreadgroupKey)(c) +} + +type Xrevrange Completed + +func (b Builder) Xrevrange() (c Xrevrange) { + c = Xrevrange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "XREVRANGE") + return c +} + +func (c Xrevrange) Key(key string) XrevrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XrevrangeKey)(c) +} + +type XrevrangeCount Completed + +func (c XrevrangeCount) Build() Completed { + return Completed(c) +} + +type XrevrangeEnd Completed + +func (c XrevrangeEnd) Start(start string) XrevrangeStart { + c.cs.s = append(c.cs.s, start) + return (XrevrangeStart)(c) +} + +type XrevrangeKey Completed + +func (c XrevrangeKey) End(end string) XrevrangeEnd { + c.cs.s = append(c.cs.s, end) + return (XrevrangeEnd)(c) +} + +type XrevrangeStart Completed + +func (c XrevrangeStart) Count(count int64) XrevrangeCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (XrevrangeCount)(c) +} + +func (c XrevrangeStart) Build() Completed { + return Completed(c) +} + +type Xsetid Completed + +func (b Builder) Xsetid() (c Xsetid) { + c = Xsetid{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XSETID") + return c +} + +func (c Xsetid) Key(key string) XsetidKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XsetidKey)(c) +} + +type XsetidEntriesadded Completed + +func (c XsetidEntriesadded) Maxdeletedid(maxDeletedEntryId string) XsetidMaxdeletedid { + c.cs.s = append(c.cs.s, "MAXDELETEDID", maxDeletedEntryId) + return (XsetidMaxdeletedid)(c) +} + +func (c XsetidEntriesadded) Build() Completed { + return Completed(c) +} + +type XsetidKey Completed + +func (c XsetidKey) LastId(lastId string) XsetidLastId { + c.cs.s = append(c.cs.s, lastId) + return (XsetidLastId)(c) +} + +type XsetidLastId Completed + +func (c XsetidLastId) Entriesadded(entriesAdded int64) XsetidEntriesadded { + c.cs.s = append(c.cs.s, "ENTRIESADDED", strconv.FormatInt(entriesAdded, 10)) + return (XsetidEntriesadded)(c) +} + +func (c XsetidLastId) Maxdeletedid(maxDeletedEntryId string) XsetidMaxdeletedid { + c.cs.s = append(c.cs.s, "MAXDELETEDID", maxDeletedEntryId) + return (XsetidMaxdeletedid)(c) +} + +func (c XsetidLastId) Build() Completed { + return Completed(c) +} + +type XsetidMaxdeletedid Completed + +func (c XsetidMaxdeletedid) Build() Completed { + return Completed(c) +} + +type Xtrim Completed + +func (b Builder) Xtrim() (c Xtrim) { + c = Xtrim{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "XTRIM") + return c +} + +func (c Xtrim) Key(key string) XtrimKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (XtrimKey)(c) +} + +type XtrimKey Completed + +func (c XtrimKey) Maxlen() XtrimTrimStrategyMaxlen { + c.cs.s = append(c.cs.s, "MAXLEN") + return (XtrimTrimStrategyMaxlen)(c) +} + +func (c XtrimKey) Minid() XtrimTrimStrategyMinid { + c.cs.s = append(c.cs.s, "MINID") + return (XtrimTrimStrategyMinid)(c) +} + +type XtrimTrimLimit Completed + +func (c XtrimTrimLimit) Build() Completed { + return Completed(c) +} + +type XtrimTrimOperatorAlmost Completed + +func (c XtrimTrimOperatorAlmost) Threshold(threshold string) XtrimTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XtrimTrimThreshold)(c) +} + +type XtrimTrimOperatorExact Completed + +func (c XtrimTrimOperatorExact) Threshold(threshold string) XtrimTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XtrimTrimThreshold)(c) +} + +type XtrimTrimStrategyMaxlen Completed + +func (c XtrimTrimStrategyMaxlen) Exact() XtrimTrimOperatorExact { + c.cs.s = append(c.cs.s, "=") + return (XtrimTrimOperatorExact)(c) +} + +func (c XtrimTrimStrategyMaxlen) Almost() XtrimTrimOperatorAlmost { + c.cs.s = append(c.cs.s, "~") + return (XtrimTrimOperatorAlmost)(c) +} + +func (c XtrimTrimStrategyMaxlen) Threshold(threshold string) XtrimTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XtrimTrimThreshold)(c) +} + +type XtrimTrimStrategyMinid Completed + +func (c XtrimTrimStrategyMinid) Exact() XtrimTrimOperatorExact { + c.cs.s = append(c.cs.s, "=") + return (XtrimTrimOperatorExact)(c) +} + +func (c XtrimTrimStrategyMinid) Almost() XtrimTrimOperatorAlmost { + c.cs.s = append(c.cs.s, "~") + return (XtrimTrimOperatorAlmost)(c) +} + +func (c XtrimTrimStrategyMinid) Threshold(threshold string) XtrimTrimThreshold { + c.cs.s = append(c.cs.s, threshold) + return (XtrimTrimThreshold)(c) +} + +type XtrimTrimThreshold Completed + +func (c XtrimTrimThreshold) Limit(count int64) XtrimTrimLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(count, 10)) + return (XtrimTrimLimit)(c) +} + +func (c XtrimTrimThreshold) Build() Completed { + return Completed(c) +} + +type Zadd Completed + +func (b Builder) Zadd() (c Zadd) { + c = Zadd{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZADD") + return c +} + +func (c Zadd) Key(key string) ZaddKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZaddKey)(c) +} + +type ZaddChangeCh Completed + +func (c ZaddChangeCh) Incr() ZaddIncrementIncr { + c.cs.s = append(c.cs.s, "INCR") + return (ZaddIncrementIncr)(c) +} + +func (c ZaddChangeCh) ScoreMember() ZaddScoreMember { + return (ZaddScoreMember)(c) +} + +type ZaddComparisonGt Completed + +func (c ZaddComparisonGt) Ch() ZaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (ZaddChangeCh)(c) +} + +func (c ZaddComparisonGt) Incr() ZaddIncrementIncr { + c.cs.s = append(c.cs.s, "INCR") + return (ZaddIncrementIncr)(c) +} + +func (c ZaddComparisonGt) ScoreMember() ZaddScoreMember { + return (ZaddScoreMember)(c) +} + +type ZaddComparisonLt Completed + +func (c ZaddComparisonLt) Ch() ZaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (ZaddChangeCh)(c) +} + +func (c ZaddComparisonLt) Incr() ZaddIncrementIncr { + c.cs.s = append(c.cs.s, "INCR") + return (ZaddIncrementIncr)(c) +} + +func (c ZaddComparisonLt) ScoreMember() ZaddScoreMember { + return (ZaddScoreMember)(c) +} + +type ZaddConditionNx Completed + +func (c ZaddConditionNx) Gt() ZaddComparisonGt { + c.cs.s = append(c.cs.s, "GT") + return (ZaddComparisonGt)(c) +} + +func (c ZaddConditionNx) Lt() ZaddComparisonLt { + c.cs.s = append(c.cs.s, "LT") + return (ZaddComparisonLt)(c) +} + +func (c ZaddConditionNx) Ch() ZaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (ZaddChangeCh)(c) +} + +func (c ZaddConditionNx) Incr() ZaddIncrementIncr { + c.cs.s = append(c.cs.s, "INCR") + return (ZaddIncrementIncr)(c) +} + +func (c ZaddConditionNx) ScoreMember() ZaddScoreMember { + return (ZaddScoreMember)(c) +} + +type ZaddConditionXx Completed + +func (c ZaddConditionXx) Gt() ZaddComparisonGt { + c.cs.s = append(c.cs.s, "GT") + return (ZaddComparisonGt)(c) +} + +func (c ZaddConditionXx) Lt() ZaddComparisonLt { + c.cs.s = append(c.cs.s, "LT") + return (ZaddComparisonLt)(c) +} + +func (c ZaddConditionXx) Ch() ZaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (ZaddChangeCh)(c) +} + +func (c ZaddConditionXx) Incr() ZaddIncrementIncr { + c.cs.s = append(c.cs.s, "INCR") + return (ZaddIncrementIncr)(c) +} + +func (c ZaddConditionXx) ScoreMember() ZaddScoreMember { + return (ZaddScoreMember)(c) +} + +type ZaddIncrementIncr Completed + +func (c ZaddIncrementIncr) ScoreMember() ZaddScoreMember { + return (ZaddScoreMember)(c) +} + +type ZaddKey Completed + +func (c ZaddKey) Nx() ZaddConditionNx { + c.cs.s = append(c.cs.s, "NX") + return (ZaddConditionNx)(c) +} + +func (c ZaddKey) Xx() ZaddConditionXx { + c.cs.s = append(c.cs.s, "XX") + return (ZaddConditionXx)(c) +} + +func (c ZaddKey) Gt() ZaddComparisonGt { + c.cs.s = append(c.cs.s, "GT") + return (ZaddComparisonGt)(c) +} + +func (c ZaddKey) Lt() ZaddComparisonLt { + c.cs.s = append(c.cs.s, "LT") + return (ZaddComparisonLt)(c) +} + +func (c ZaddKey) Ch() ZaddChangeCh { + c.cs.s = append(c.cs.s, "CH") + return (ZaddChangeCh)(c) +} + +func (c ZaddKey) Incr() ZaddIncrementIncr { + c.cs.s = append(c.cs.s, "INCR") + return (ZaddIncrementIncr)(c) +} + +func (c ZaddKey) ScoreMember() ZaddScoreMember { + return (ZaddScoreMember)(c) +} + +type ZaddScoreMember Completed + +func (c ZaddScoreMember) ScoreMember(score float64, member string) ZaddScoreMember { + c.cs.s = append(c.cs.s, strconv.FormatFloat(score, 'f', -1, 64), member) + return c +} + +func (c ZaddScoreMember) Build() Completed { + return Completed(c) +} + +type Zcard Completed + +func (b Builder) Zcard() (c Zcard) { + c = Zcard{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZCARD") + return c +} + +func (c Zcard) Key(key string) ZcardKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZcardKey)(c) +} + +type ZcardKey Completed + +func (c ZcardKey) Build() Completed { + return Completed(c) +} + +func (c ZcardKey) Cache() Cacheable { + return Cacheable(c) +} + +type Zcount Completed + +func (b Builder) Zcount() (c Zcount) { + c = Zcount{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZCOUNT") + return c +} + +func (c Zcount) Key(key string) ZcountKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZcountKey)(c) +} + +type ZcountKey Completed + +func (c ZcountKey) Min(min string) ZcountMin { + c.cs.s = append(c.cs.s, min) + return (ZcountMin)(c) +} + +type ZcountMax Completed + +func (c ZcountMax) Build() Completed { + return Completed(c) +} + +func (c ZcountMax) Cache() Cacheable { + return Cacheable(c) +} + +type ZcountMin Completed + +func (c ZcountMin) Max(max string) ZcountMax { + c.cs.s = append(c.cs.s, max) + return (ZcountMax)(c) +} + +type Zdiff Completed + +func (b Builder) Zdiff() (c Zdiff) { + c = Zdiff{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZDIFF") + return c +} + +func (c Zdiff) Numkeys(numkeys int64) ZdiffNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZdiffNumkeys)(c) +} + +type ZdiffKey Completed + +func (c ZdiffKey) Key(key ...string) ZdiffKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZdiffKey) Withscores() ZdiffWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZdiffWithscores)(c) +} + +func (c ZdiffKey) Build() Completed { + return Completed(c) +} + +type ZdiffNumkeys Completed + +func (c ZdiffNumkeys) Key(key ...string) ZdiffKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZdiffKey)(c) +} + +type ZdiffWithscores Completed + +func (c ZdiffWithscores) Build() Completed { + return Completed(c) +} + +type Zdiffstore Completed + +func (b Builder) Zdiffstore() (c Zdiffstore) { + c = Zdiffstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZDIFFSTORE") + return c +} + +func (c Zdiffstore) Destination(destination string) ZdiffstoreDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (ZdiffstoreDestination)(c) +} + +type ZdiffstoreDestination Completed + +func (c ZdiffstoreDestination) Numkeys(numkeys int64) ZdiffstoreNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZdiffstoreNumkeys)(c) +} + +type ZdiffstoreKey Completed + +func (c ZdiffstoreKey) Key(key ...string) ZdiffstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZdiffstoreKey) Build() Completed { + return Completed(c) +} + +type ZdiffstoreNumkeys Completed + +func (c ZdiffstoreNumkeys) Key(key ...string) ZdiffstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZdiffstoreKey)(c) +} + +type Zincrby Completed + +func (b Builder) Zincrby() (c Zincrby) { + c = Zincrby{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZINCRBY") + return c +} + +func (c Zincrby) Key(key string) ZincrbyKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZincrbyKey)(c) +} + +type ZincrbyIncrement Completed + +func (c ZincrbyIncrement) Member(member string) ZincrbyMember { + c.cs.s = append(c.cs.s, member) + return (ZincrbyMember)(c) +} + +type ZincrbyKey Completed + +func (c ZincrbyKey) Increment(increment float64) ZincrbyIncrement { + c.cs.s = append(c.cs.s, strconv.FormatFloat(increment, 'f', -1, 64)) + return (ZincrbyIncrement)(c) +} + +type ZincrbyMember Completed + +func (c ZincrbyMember) Build() Completed { + return Completed(c) +} + +type Zinter Completed + +func (b Builder) Zinter() (c Zinter) { + c = Zinter{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZINTER") + return c +} + +func (c Zinter) Numkeys(numkeys int64) ZinterNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZinterNumkeys)(c) +} + +type ZinterAggregateMax Completed + +func (c ZinterAggregateMax) Withscores() ZinterWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZinterWithscores)(c) +} + +func (c ZinterAggregateMax) Build() Completed { + return Completed(c) +} + +type ZinterAggregateMin Completed + +func (c ZinterAggregateMin) Withscores() ZinterWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZinterWithscores)(c) +} + +func (c ZinterAggregateMin) Build() Completed { + return Completed(c) +} + +type ZinterAggregateSum Completed + +func (c ZinterAggregateSum) Withscores() ZinterWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZinterWithscores)(c) +} + +func (c ZinterAggregateSum) Build() Completed { + return Completed(c) +} + +type ZinterKey Completed + +func (c ZinterKey) Key(key ...string) ZinterKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZinterKey) Weights(weight ...int64) ZinterWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (ZinterWeights)(c) +} + +func (c ZinterKey) AggregateSum() ZinterAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZinterAggregateSum)(c) +} + +func (c ZinterKey) AggregateMin() ZinterAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZinterAggregateMin)(c) +} + +func (c ZinterKey) AggregateMax() ZinterAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZinterAggregateMax)(c) +} + +func (c ZinterKey) Withscores() ZinterWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZinterWithscores)(c) +} + +func (c ZinterKey) Build() Completed { + return Completed(c) +} + +type ZinterNumkeys Completed + +func (c ZinterNumkeys) Key(key ...string) ZinterKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZinterKey)(c) +} + +type ZinterWeights Completed + +func (c ZinterWeights) Weights(weight ...int64) ZinterWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c ZinterWeights) AggregateSum() ZinterAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZinterAggregateSum)(c) +} + +func (c ZinterWeights) AggregateMin() ZinterAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZinterAggregateMin)(c) +} + +func (c ZinterWeights) AggregateMax() ZinterAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZinterAggregateMax)(c) +} + +func (c ZinterWeights) Withscores() ZinterWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZinterWithscores)(c) +} + +func (c ZinterWeights) Build() Completed { + return Completed(c) +} + +type ZinterWithscores Completed + +func (c ZinterWithscores) Build() Completed { + return Completed(c) +} + +type Zintercard Completed + +func (b Builder) Zintercard() (c Zintercard) { + c = Zintercard{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZINTERCARD") + return c +} + +func (c Zintercard) Numkeys(numkeys int64) ZintercardNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZintercardNumkeys)(c) +} + +type ZintercardKey Completed + +func (c ZintercardKey) Key(key ...string) ZintercardKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZintercardKey) Limit(limit int64) ZintercardLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(limit, 10)) + return (ZintercardLimit)(c) +} + +func (c ZintercardKey) Build() Completed { + return Completed(c) +} + +type ZintercardLimit Completed + +func (c ZintercardLimit) Build() Completed { + return Completed(c) +} + +type ZintercardNumkeys Completed + +func (c ZintercardNumkeys) Key(key ...string) ZintercardKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZintercardKey)(c) +} + +type Zinterstore Completed + +func (b Builder) Zinterstore() (c Zinterstore) { + c = Zinterstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZINTERSTORE") + return c +} + +func (c Zinterstore) Destination(destination string) ZinterstoreDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (ZinterstoreDestination)(c) +} + +type ZinterstoreAggregateMax Completed + +func (c ZinterstoreAggregateMax) Build() Completed { + return Completed(c) +} + +type ZinterstoreAggregateMin Completed + +func (c ZinterstoreAggregateMin) Build() Completed { + return Completed(c) +} + +type ZinterstoreAggregateSum Completed + +func (c ZinterstoreAggregateSum) Build() Completed { + return Completed(c) +} + +type ZinterstoreDestination Completed + +func (c ZinterstoreDestination) Numkeys(numkeys int64) ZinterstoreNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZinterstoreNumkeys)(c) +} + +type ZinterstoreKey Completed + +func (c ZinterstoreKey) Key(key ...string) ZinterstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZinterstoreKey) Weights(weight ...int64) ZinterstoreWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (ZinterstoreWeights)(c) +} + +func (c ZinterstoreKey) AggregateSum() ZinterstoreAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZinterstoreAggregateSum)(c) +} + +func (c ZinterstoreKey) AggregateMin() ZinterstoreAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZinterstoreAggregateMin)(c) +} + +func (c ZinterstoreKey) AggregateMax() ZinterstoreAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZinterstoreAggregateMax)(c) +} + +func (c ZinterstoreKey) Build() Completed { + return Completed(c) +} + +type ZinterstoreNumkeys Completed + +func (c ZinterstoreNumkeys) Key(key ...string) ZinterstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZinterstoreKey)(c) +} + +type ZinterstoreWeights Completed + +func (c ZinterstoreWeights) Weights(weight ...int64) ZinterstoreWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c ZinterstoreWeights) AggregateSum() ZinterstoreAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZinterstoreAggregateSum)(c) +} + +func (c ZinterstoreWeights) AggregateMin() ZinterstoreAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZinterstoreAggregateMin)(c) +} + +func (c ZinterstoreWeights) AggregateMax() ZinterstoreAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZinterstoreAggregateMax)(c) +} + +func (c ZinterstoreWeights) Build() Completed { + return Completed(c) +} + +type Zlexcount Completed + +func (b Builder) Zlexcount() (c Zlexcount) { + c = Zlexcount{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZLEXCOUNT") + return c +} + +func (c Zlexcount) Key(key string) ZlexcountKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZlexcountKey)(c) +} + +type ZlexcountKey Completed + +func (c ZlexcountKey) Min(min string) ZlexcountMin { + c.cs.s = append(c.cs.s, min) + return (ZlexcountMin)(c) +} + +type ZlexcountMax Completed + +func (c ZlexcountMax) Build() Completed { + return Completed(c) +} + +func (c ZlexcountMax) Cache() Cacheable { + return Cacheable(c) +} + +type ZlexcountMin Completed + +func (c ZlexcountMin) Max(max string) ZlexcountMax { + c.cs.s = append(c.cs.s, max) + return (ZlexcountMax)(c) +} + +type Zmpop Completed + +func (b Builder) Zmpop() (c Zmpop) { + c = Zmpop{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZMPOP") + return c +} + +func (c Zmpop) Numkeys(numkeys int64) ZmpopNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZmpopNumkeys)(c) +} + +type ZmpopCount Completed + +func (c ZmpopCount) Build() Completed { + return Completed(c) +} + +type ZmpopKey Completed + +func (c ZmpopKey) Key(key ...string) ZmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZmpopKey) Min() ZmpopWhereMin { + c.cs.s = append(c.cs.s, "MIN") + return (ZmpopWhereMin)(c) +} + +func (c ZmpopKey) Max() ZmpopWhereMax { + c.cs.s = append(c.cs.s, "MAX") + return (ZmpopWhereMax)(c) +} + +type ZmpopNumkeys Completed + +func (c ZmpopNumkeys) Key(key ...string) ZmpopKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZmpopKey)(c) +} + +type ZmpopWhereMax Completed + +func (c ZmpopWhereMax) Count(count int64) ZmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (ZmpopCount)(c) +} + +func (c ZmpopWhereMax) Build() Completed { + return Completed(c) +} + +type ZmpopWhereMin Completed + +func (c ZmpopWhereMin) Count(count int64) ZmpopCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (ZmpopCount)(c) +} + +func (c ZmpopWhereMin) Build() Completed { + return Completed(c) +} + +type Zmscore Completed + +func (b Builder) Zmscore() (c Zmscore) { + c = Zmscore{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZMSCORE") + return c +} + +func (c Zmscore) Key(key string) ZmscoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZmscoreKey)(c) +} + +type ZmscoreKey Completed + +func (c ZmscoreKey) Member(member ...string) ZmscoreMember { + c.cs.s = append(c.cs.s, member...) + return (ZmscoreMember)(c) +} + +type ZmscoreMember Completed + +func (c ZmscoreMember) Member(member ...string) ZmscoreMember { + c.cs.s = append(c.cs.s, member...) + return c +} + +func (c ZmscoreMember) Build() Completed { + return Completed(c) +} + +func (c ZmscoreMember) Cache() Cacheable { + return Cacheable(c) +} + +type Zpopmax Completed + +func (b Builder) Zpopmax() (c Zpopmax) { + c = Zpopmax{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZPOPMAX") + return c +} + +func (c Zpopmax) Key(key string) ZpopmaxKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZpopmaxKey)(c) +} + +type ZpopmaxCount Completed + +func (c ZpopmaxCount) Build() Completed { + return Completed(c) +} + +type ZpopmaxKey Completed + +func (c ZpopmaxKey) Count(count int64) ZpopmaxCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (ZpopmaxCount)(c) +} + +func (c ZpopmaxKey) Build() Completed { + return Completed(c) +} + +type Zpopmin Completed + +func (b Builder) Zpopmin() (c Zpopmin) { + c = Zpopmin{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZPOPMIN") + return c +} + +func (c Zpopmin) Key(key string) ZpopminKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZpopminKey)(c) +} + +type ZpopminCount Completed + +func (c ZpopminCount) Build() Completed { + return Completed(c) +} + +type ZpopminKey Completed + +func (c ZpopminKey) Count(count int64) ZpopminCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (ZpopminCount)(c) +} + +func (c ZpopminKey) Build() Completed { + return Completed(c) +} + +type Zrandmember Completed + +func (b Builder) Zrandmember() (c Zrandmember) { + c = Zrandmember{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZRANDMEMBER") + return c +} + +func (c Zrandmember) Key(key string) ZrandmemberKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrandmemberKey)(c) +} + +type ZrandmemberKey Completed + +func (c ZrandmemberKey) Count(count int64) ZrandmemberOptionsCount { + c.cs.s = append(c.cs.s, strconv.FormatInt(count, 10)) + return (ZrandmemberOptionsCount)(c) +} + +func (c ZrandmemberKey) Build() Completed { + return Completed(c) +} + +type ZrandmemberOptionsCount Completed + +func (c ZrandmemberOptionsCount) Withscores() ZrandmemberOptionsWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrandmemberOptionsWithscores)(c) +} + +func (c ZrandmemberOptionsCount) Build() Completed { + return Completed(c) +} + +type ZrandmemberOptionsWithscores Completed + +func (c ZrandmemberOptionsWithscores) Build() Completed { + return Completed(c) +} + +type Zrange Completed + +func (b Builder) Zrange() (c Zrange) { + c = Zrange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZRANGE") + return c +} + +func (c Zrange) Key(key string) ZrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrangeKey)(c) +} + +type ZrangeKey Completed + +func (c ZrangeKey) Min(min string) ZrangeMin { + c.cs.s = append(c.cs.s, min) + return (ZrangeMin)(c) +} + +type ZrangeLimit Completed + +func (c ZrangeLimit) Withscores() ZrangeWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrangeWithscores)(c) +} + +func (c ZrangeLimit) Build() Completed { + return Completed(c) +} + +func (c ZrangeLimit) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangeMax Completed + +func (c ZrangeMax) Byscore() ZrangeSortbyByscore { + c.cs.s = append(c.cs.s, "BYSCORE") + return (ZrangeSortbyByscore)(c) +} + +func (c ZrangeMax) Bylex() ZrangeSortbyBylex { + c.cs.s = append(c.cs.s, "BYLEX") + return (ZrangeSortbyBylex)(c) +} + +func (c ZrangeMax) Rev() ZrangeRev { + c.cs.s = append(c.cs.s, "REV") + return (ZrangeRev)(c) +} + +func (c ZrangeMax) Limit(offset int64, count int64) ZrangeLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangeLimit)(c) +} + +func (c ZrangeMax) Withscores() ZrangeWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrangeWithscores)(c) +} + +func (c ZrangeMax) Build() Completed { + return Completed(c) +} + +func (c ZrangeMax) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangeMin Completed + +func (c ZrangeMin) Max(max string) ZrangeMax { + c.cs.s = append(c.cs.s, max) + return (ZrangeMax)(c) +} + +type ZrangeRev Completed + +func (c ZrangeRev) Limit(offset int64, count int64) ZrangeLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangeLimit)(c) +} + +func (c ZrangeRev) Withscores() ZrangeWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrangeWithscores)(c) +} + +func (c ZrangeRev) Build() Completed { + return Completed(c) +} + +func (c ZrangeRev) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangeSortbyBylex Completed + +func (c ZrangeSortbyBylex) Rev() ZrangeRev { + c.cs.s = append(c.cs.s, "REV") + return (ZrangeRev)(c) +} + +func (c ZrangeSortbyBylex) Limit(offset int64, count int64) ZrangeLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangeLimit)(c) +} + +func (c ZrangeSortbyBylex) Withscores() ZrangeWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrangeWithscores)(c) +} + +func (c ZrangeSortbyBylex) Build() Completed { + return Completed(c) +} + +func (c ZrangeSortbyBylex) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangeSortbyByscore Completed + +func (c ZrangeSortbyByscore) Rev() ZrangeRev { + c.cs.s = append(c.cs.s, "REV") + return (ZrangeRev)(c) +} + +func (c ZrangeSortbyByscore) Limit(offset int64, count int64) ZrangeLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangeLimit)(c) +} + +func (c ZrangeSortbyByscore) Withscores() ZrangeWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrangeWithscores)(c) +} + +func (c ZrangeSortbyByscore) Build() Completed { + return Completed(c) +} + +func (c ZrangeSortbyByscore) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangeWithscores Completed + +func (c ZrangeWithscores) Build() Completed { + return Completed(c) +} + +func (c ZrangeWithscores) Cache() Cacheable { + return Cacheable(c) +} + +type Zrangebylex Completed + +func (b Builder) Zrangebylex() (c Zrangebylex) { + c = Zrangebylex{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZRANGEBYLEX") + return c +} + +func (c Zrangebylex) Key(key string) ZrangebylexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrangebylexKey)(c) +} + +type ZrangebylexKey Completed + +func (c ZrangebylexKey) Min(min string) ZrangebylexMin { + c.cs.s = append(c.cs.s, min) + return (ZrangebylexMin)(c) +} + +type ZrangebylexLimit Completed + +func (c ZrangebylexLimit) Build() Completed { + return Completed(c) +} + +func (c ZrangebylexLimit) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangebylexMax Completed + +func (c ZrangebylexMax) Limit(offset int64, count int64) ZrangebylexLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangebylexLimit)(c) +} + +func (c ZrangebylexMax) Build() Completed { + return Completed(c) +} + +func (c ZrangebylexMax) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangebylexMin Completed + +func (c ZrangebylexMin) Max(max string) ZrangebylexMax { + c.cs.s = append(c.cs.s, max) + return (ZrangebylexMax)(c) +} + +type Zrangebyscore Completed + +func (b Builder) Zrangebyscore() (c Zrangebyscore) { + c = Zrangebyscore{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZRANGEBYSCORE") + return c +} + +func (c Zrangebyscore) Key(key string) ZrangebyscoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrangebyscoreKey)(c) +} + +type ZrangebyscoreKey Completed + +func (c ZrangebyscoreKey) Min(min string) ZrangebyscoreMin { + c.cs.s = append(c.cs.s, min) + return (ZrangebyscoreMin)(c) +} + +type ZrangebyscoreLimit Completed + +func (c ZrangebyscoreLimit) Build() Completed { + return Completed(c) +} + +func (c ZrangebyscoreLimit) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangebyscoreMax Completed + +func (c ZrangebyscoreMax) Withscores() ZrangebyscoreWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrangebyscoreWithscores)(c) +} + +func (c ZrangebyscoreMax) Limit(offset int64, count int64) ZrangebyscoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangebyscoreLimit)(c) +} + +func (c ZrangebyscoreMax) Build() Completed { + return Completed(c) +} + +func (c ZrangebyscoreMax) Cache() Cacheable { + return Cacheable(c) +} + +type ZrangebyscoreMin Completed + +func (c ZrangebyscoreMin) Max(max string) ZrangebyscoreMax { + c.cs.s = append(c.cs.s, max) + return (ZrangebyscoreMax)(c) +} + +type ZrangebyscoreWithscores Completed + +func (c ZrangebyscoreWithscores) Limit(offset int64, count int64) ZrangebyscoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangebyscoreLimit)(c) +} + +func (c ZrangebyscoreWithscores) Build() Completed { + return Completed(c) +} + +func (c ZrangebyscoreWithscores) Cache() Cacheable { + return Cacheable(c) +} + +type Zrangestore Completed + +func (b Builder) Zrangestore() (c Zrangestore) { + c = Zrangestore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZRANGESTORE") + return c +} + +func (c Zrangestore) Dst(dst string) ZrangestoreDst { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(dst) + } else { + c.ks = check(c.ks, slot(dst)) + } + c.cs.s = append(c.cs.s, dst) + return (ZrangestoreDst)(c) +} + +type ZrangestoreDst Completed + +func (c ZrangestoreDst) Src(src string) ZrangestoreSrc { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(src) + } else { + c.ks = check(c.ks, slot(src)) + } + c.cs.s = append(c.cs.s, src) + return (ZrangestoreSrc)(c) +} + +type ZrangestoreLimit Completed + +func (c ZrangestoreLimit) Build() Completed { + return Completed(c) +} + +type ZrangestoreMax Completed + +func (c ZrangestoreMax) Byscore() ZrangestoreSortbyByscore { + c.cs.s = append(c.cs.s, "BYSCORE") + return (ZrangestoreSortbyByscore)(c) +} + +func (c ZrangestoreMax) Bylex() ZrangestoreSortbyBylex { + c.cs.s = append(c.cs.s, "BYLEX") + return (ZrangestoreSortbyBylex)(c) +} + +func (c ZrangestoreMax) Rev() ZrangestoreRev { + c.cs.s = append(c.cs.s, "REV") + return (ZrangestoreRev)(c) +} + +func (c ZrangestoreMax) Limit(offset int64, count int64) ZrangestoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangestoreLimit)(c) +} + +func (c ZrangestoreMax) Build() Completed { + return Completed(c) +} + +type ZrangestoreMin Completed + +func (c ZrangestoreMin) Max(max string) ZrangestoreMax { + c.cs.s = append(c.cs.s, max) + return (ZrangestoreMax)(c) +} + +type ZrangestoreRev Completed + +func (c ZrangestoreRev) Limit(offset int64, count int64) ZrangestoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangestoreLimit)(c) +} + +func (c ZrangestoreRev) Build() Completed { + return Completed(c) +} + +type ZrangestoreSortbyBylex Completed + +func (c ZrangestoreSortbyBylex) Rev() ZrangestoreRev { + c.cs.s = append(c.cs.s, "REV") + return (ZrangestoreRev)(c) +} + +func (c ZrangestoreSortbyBylex) Limit(offset int64, count int64) ZrangestoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangestoreLimit)(c) +} + +func (c ZrangestoreSortbyBylex) Build() Completed { + return Completed(c) +} + +type ZrangestoreSortbyByscore Completed + +func (c ZrangestoreSortbyByscore) Rev() ZrangestoreRev { + c.cs.s = append(c.cs.s, "REV") + return (ZrangestoreRev)(c) +} + +func (c ZrangestoreSortbyByscore) Limit(offset int64, count int64) ZrangestoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrangestoreLimit)(c) +} + +func (c ZrangestoreSortbyByscore) Build() Completed { + return Completed(c) +} + +type ZrangestoreSrc Completed + +func (c ZrangestoreSrc) Min(min string) ZrangestoreMin { + c.cs.s = append(c.cs.s, min) + return (ZrangestoreMin)(c) +} + +type Zrank Completed + +func (b Builder) Zrank() (c Zrank) { + c = Zrank{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZRANK") + return c +} + +func (c Zrank) Key(key string) ZrankKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrankKey)(c) +} + +type ZrankKey Completed + +func (c ZrankKey) Member(member string) ZrankMember { + c.cs.s = append(c.cs.s, member) + return (ZrankMember)(c) +} + +type ZrankMember Completed + +func (c ZrankMember) Build() Completed { + return Completed(c) +} + +func (c ZrankMember) Cache() Cacheable { + return Cacheable(c) +} + +type Zrem Completed + +func (b Builder) Zrem() (c Zrem) { + c = Zrem{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZREM") + return c +} + +func (c Zrem) Key(key string) ZremKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZremKey)(c) +} + +type ZremKey Completed + +func (c ZremKey) Member(member ...string) ZremMember { + c.cs.s = append(c.cs.s, member...) + return (ZremMember)(c) +} + +type ZremMember Completed + +func (c ZremMember) Member(member ...string) ZremMember { + c.cs.s = append(c.cs.s, member...) + return c +} + +func (c ZremMember) Build() Completed { + return Completed(c) +} + +type Zremrangebylex Completed + +func (b Builder) Zremrangebylex() (c Zremrangebylex) { + c = Zremrangebylex{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZREMRANGEBYLEX") + return c +} + +func (c Zremrangebylex) Key(key string) ZremrangebylexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZremrangebylexKey)(c) +} + +type ZremrangebylexKey Completed + +func (c ZremrangebylexKey) Min(min string) ZremrangebylexMin { + c.cs.s = append(c.cs.s, min) + return (ZremrangebylexMin)(c) +} + +type ZremrangebylexMax Completed + +func (c ZremrangebylexMax) Build() Completed { + return Completed(c) +} + +type ZremrangebylexMin Completed + +func (c ZremrangebylexMin) Max(max string) ZremrangebylexMax { + c.cs.s = append(c.cs.s, max) + return (ZremrangebylexMax)(c) +} + +type Zremrangebyrank Completed + +func (b Builder) Zremrangebyrank() (c Zremrangebyrank) { + c = Zremrangebyrank{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZREMRANGEBYRANK") + return c +} + +func (c Zremrangebyrank) Key(key string) ZremrangebyrankKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZremrangebyrankKey)(c) +} + +type ZremrangebyrankKey Completed + +func (c ZremrangebyrankKey) Start(start int64) ZremrangebyrankStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (ZremrangebyrankStart)(c) +} + +type ZremrangebyrankStart Completed + +func (c ZremrangebyrankStart) Stop(stop int64) ZremrangebyrankStop { + c.cs.s = append(c.cs.s, strconv.FormatInt(stop, 10)) + return (ZremrangebyrankStop)(c) +} + +type ZremrangebyrankStop Completed + +func (c ZremrangebyrankStop) Build() Completed { + return Completed(c) +} + +type Zremrangebyscore Completed + +func (b Builder) Zremrangebyscore() (c Zremrangebyscore) { + c = Zremrangebyscore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZREMRANGEBYSCORE") + return c +} + +func (c Zremrangebyscore) Key(key string) ZremrangebyscoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZremrangebyscoreKey)(c) +} + +type ZremrangebyscoreKey Completed + +func (c ZremrangebyscoreKey) Min(min string) ZremrangebyscoreMin { + c.cs.s = append(c.cs.s, min) + return (ZremrangebyscoreMin)(c) +} + +type ZremrangebyscoreMax Completed + +func (c ZremrangebyscoreMax) Build() Completed { + return Completed(c) +} + +type ZremrangebyscoreMin Completed + +func (c ZremrangebyscoreMin) Max(max string) ZremrangebyscoreMax { + c.cs.s = append(c.cs.s, max) + return (ZremrangebyscoreMax)(c) +} + +type Zrevrange Completed + +func (b Builder) Zrevrange() (c Zrevrange) { + c = Zrevrange{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZREVRANGE") + return c +} + +func (c Zrevrange) Key(key string) ZrevrangeKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrevrangeKey)(c) +} + +type ZrevrangeKey Completed + +func (c ZrevrangeKey) Start(start int64) ZrevrangeStart { + c.cs.s = append(c.cs.s, strconv.FormatInt(start, 10)) + return (ZrevrangeStart)(c) +} + +type ZrevrangeStart Completed + +func (c ZrevrangeStart) Stop(stop int64) ZrevrangeStop { + c.cs.s = append(c.cs.s, strconv.FormatInt(stop, 10)) + return (ZrevrangeStop)(c) +} + +type ZrevrangeStop Completed + +func (c ZrevrangeStop) Withscores() ZrevrangeWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrevrangeWithscores)(c) +} + +func (c ZrevrangeStop) Build() Completed { + return Completed(c) +} + +func (c ZrevrangeStop) Cache() Cacheable { + return Cacheable(c) +} + +type ZrevrangeWithscores Completed + +func (c ZrevrangeWithscores) Build() Completed { + return Completed(c) +} + +func (c ZrevrangeWithscores) Cache() Cacheable { + return Cacheable(c) +} + +type Zrevrangebylex Completed + +func (b Builder) Zrevrangebylex() (c Zrevrangebylex) { + c = Zrevrangebylex{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZREVRANGEBYLEX") + return c +} + +func (c Zrevrangebylex) Key(key string) ZrevrangebylexKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrevrangebylexKey)(c) +} + +type ZrevrangebylexKey Completed + +func (c ZrevrangebylexKey) Max(max string) ZrevrangebylexMax { + c.cs.s = append(c.cs.s, max) + return (ZrevrangebylexMax)(c) +} + +type ZrevrangebylexLimit Completed + +func (c ZrevrangebylexLimit) Build() Completed { + return Completed(c) +} + +func (c ZrevrangebylexLimit) Cache() Cacheable { + return Cacheable(c) +} + +type ZrevrangebylexMax Completed + +func (c ZrevrangebylexMax) Min(min string) ZrevrangebylexMin { + c.cs.s = append(c.cs.s, min) + return (ZrevrangebylexMin)(c) +} + +type ZrevrangebylexMin Completed + +func (c ZrevrangebylexMin) Limit(offset int64, count int64) ZrevrangebylexLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrevrangebylexLimit)(c) +} + +func (c ZrevrangebylexMin) Build() Completed { + return Completed(c) +} + +func (c ZrevrangebylexMin) Cache() Cacheable { + return Cacheable(c) +} + +type Zrevrangebyscore Completed + +func (b Builder) Zrevrangebyscore() (c Zrevrangebyscore) { + c = Zrevrangebyscore{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZREVRANGEBYSCORE") + return c +} + +func (c Zrevrangebyscore) Key(key string) ZrevrangebyscoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrevrangebyscoreKey)(c) +} + +type ZrevrangebyscoreKey Completed + +func (c ZrevrangebyscoreKey) Max(max string) ZrevrangebyscoreMax { + c.cs.s = append(c.cs.s, max) + return (ZrevrangebyscoreMax)(c) +} + +type ZrevrangebyscoreLimit Completed + +func (c ZrevrangebyscoreLimit) Build() Completed { + return Completed(c) +} + +func (c ZrevrangebyscoreLimit) Cache() Cacheable { + return Cacheable(c) +} + +type ZrevrangebyscoreMax Completed + +func (c ZrevrangebyscoreMax) Min(min string) ZrevrangebyscoreMin { + c.cs.s = append(c.cs.s, min) + return (ZrevrangebyscoreMin)(c) +} + +type ZrevrangebyscoreMin Completed + +func (c ZrevrangebyscoreMin) Withscores() ZrevrangebyscoreWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZrevrangebyscoreWithscores)(c) +} + +func (c ZrevrangebyscoreMin) Limit(offset int64, count int64) ZrevrangebyscoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrevrangebyscoreLimit)(c) +} + +func (c ZrevrangebyscoreMin) Build() Completed { + return Completed(c) +} + +func (c ZrevrangebyscoreMin) Cache() Cacheable { + return Cacheable(c) +} + +type ZrevrangebyscoreWithscores Completed + +func (c ZrevrangebyscoreWithscores) Limit(offset int64, count int64) ZrevrangebyscoreLimit { + c.cs.s = append(c.cs.s, "LIMIT", strconv.FormatInt(offset, 10), strconv.FormatInt(count, 10)) + return (ZrevrangebyscoreLimit)(c) +} + +func (c ZrevrangebyscoreWithscores) Build() Completed { + return Completed(c) +} + +func (c ZrevrangebyscoreWithscores) Cache() Cacheable { + return Cacheable(c) +} + +type Zrevrank Completed + +func (b Builder) Zrevrank() (c Zrevrank) { + c = Zrevrank{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZREVRANK") + return c +} + +func (c Zrevrank) Key(key string) ZrevrankKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZrevrankKey)(c) +} + +type ZrevrankKey Completed + +func (c ZrevrankKey) Member(member string) ZrevrankMember { + c.cs.s = append(c.cs.s, member) + return (ZrevrankMember)(c) +} + +type ZrevrankMember Completed + +func (c ZrevrankMember) Build() Completed { + return Completed(c) +} + +func (c ZrevrankMember) Cache() Cacheable { + return Cacheable(c) +} + +type Zscan Completed + +func (b Builder) Zscan() (c Zscan) { + c = Zscan{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZSCAN") + return c +} + +func (c Zscan) Key(key string) ZscanKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZscanKey)(c) +} + +type ZscanCount Completed + +func (c ZscanCount) Build() Completed { + return Completed(c) +} + +type ZscanCursor Completed + +func (c ZscanCursor) Match(pattern string) ZscanMatch { + c.cs.s = append(c.cs.s, "MATCH", pattern) + return (ZscanMatch)(c) +} + +func (c ZscanCursor) Count(count int64) ZscanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (ZscanCount)(c) +} + +func (c ZscanCursor) Build() Completed { + return Completed(c) +} + +type ZscanKey Completed + +func (c ZscanKey) Cursor(cursor int64) ZscanCursor { + c.cs.s = append(c.cs.s, strconv.FormatInt(cursor, 10)) + return (ZscanCursor)(c) +} + +type ZscanMatch Completed + +func (c ZscanMatch) Count(count int64) ZscanCount { + c.cs.s = append(c.cs.s, "COUNT", strconv.FormatInt(count, 10)) + return (ZscanCount)(c) +} + +func (c ZscanMatch) Build() Completed { + return Completed(c) +} + +type Zscore Completed + +func (b Builder) Zscore() (c Zscore) { + c = Zscore{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZSCORE") + return c +} + +func (c Zscore) Key(key string) ZscoreKey { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(key) + } else { + c.ks = check(c.ks, slot(key)) + } + c.cs.s = append(c.cs.s, key) + return (ZscoreKey)(c) +} + +type ZscoreKey Completed + +func (c ZscoreKey) Member(member string) ZscoreMember { + c.cs.s = append(c.cs.s, member) + return (ZscoreMember)(c) +} + +type ZscoreMember Completed + +func (c ZscoreMember) Build() Completed { + return Completed(c) +} + +func (c ZscoreMember) Cache() Cacheable { + return Cacheable(c) +} + +type Zunion Completed + +func (b Builder) Zunion() (c Zunion) { + c = Zunion{cs: get(), ks: b.ks, cf: readonly} + c.cs.s = append(c.cs.s, "ZUNION") + return c +} + +func (c Zunion) Numkeys(numkeys int64) ZunionNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZunionNumkeys)(c) +} + +type ZunionAggregateMax Completed + +func (c ZunionAggregateMax) Withscores() ZunionWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZunionWithscores)(c) +} + +func (c ZunionAggregateMax) Build() Completed { + return Completed(c) +} + +type ZunionAggregateMin Completed + +func (c ZunionAggregateMin) Withscores() ZunionWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZunionWithscores)(c) +} + +func (c ZunionAggregateMin) Build() Completed { + return Completed(c) +} + +type ZunionAggregateSum Completed + +func (c ZunionAggregateSum) Withscores() ZunionWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZunionWithscores)(c) +} + +func (c ZunionAggregateSum) Build() Completed { + return Completed(c) +} + +type ZunionKey Completed + +func (c ZunionKey) Key(key ...string) ZunionKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZunionKey) Weights(weight ...int64) ZunionWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (ZunionWeights)(c) +} + +func (c ZunionKey) AggregateSum() ZunionAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZunionAggregateSum)(c) +} + +func (c ZunionKey) AggregateMin() ZunionAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZunionAggregateMin)(c) +} + +func (c ZunionKey) AggregateMax() ZunionAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZunionAggregateMax)(c) +} + +func (c ZunionKey) Withscores() ZunionWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZunionWithscores)(c) +} + +func (c ZunionKey) Build() Completed { + return Completed(c) +} + +type ZunionNumkeys Completed + +func (c ZunionNumkeys) Key(key ...string) ZunionKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZunionKey)(c) +} + +type ZunionWeights Completed + +func (c ZunionWeights) Weights(weight ...int64) ZunionWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c ZunionWeights) AggregateSum() ZunionAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZunionAggregateSum)(c) +} + +func (c ZunionWeights) AggregateMin() ZunionAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZunionAggregateMin)(c) +} + +func (c ZunionWeights) AggregateMax() ZunionAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZunionAggregateMax)(c) +} + +func (c ZunionWeights) Withscores() ZunionWithscores { + c.cs.s = append(c.cs.s, "WITHSCORES") + return (ZunionWithscores)(c) +} + +func (c ZunionWeights) Build() Completed { + return Completed(c) +} + +type ZunionWithscores Completed + +func (c ZunionWithscores) Build() Completed { + return Completed(c) +} + +type Zunionstore Completed + +func (b Builder) Zunionstore() (c Zunionstore) { + c = Zunionstore{cs: get(), ks: b.ks} + c.cs.s = append(c.cs.s, "ZUNIONSTORE") + return c +} + +func (c Zunionstore) Destination(destination string) ZunionstoreDestination { + if c.ks&NoSlot == NoSlot { + c.ks = NoSlot | slot(destination) + } else { + c.ks = check(c.ks, slot(destination)) + } + c.cs.s = append(c.cs.s, destination) + return (ZunionstoreDestination)(c) +} + +type ZunionstoreAggregateMax Completed + +func (c ZunionstoreAggregateMax) Build() Completed { + return Completed(c) +} + +type ZunionstoreAggregateMin Completed + +func (c ZunionstoreAggregateMin) Build() Completed { + return Completed(c) +} + +type ZunionstoreAggregateSum Completed + +func (c ZunionstoreAggregateSum) Build() Completed { + return Completed(c) +} + +type ZunionstoreDestination Completed + +func (c ZunionstoreDestination) Numkeys(numkeys int64) ZunionstoreNumkeys { + c.cs.s = append(c.cs.s, strconv.FormatInt(numkeys, 10)) + return (ZunionstoreNumkeys)(c) +} + +type ZunionstoreKey Completed + +func (c ZunionstoreKey) Key(key ...string) ZunionstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return c +} + +func (c ZunionstoreKey) Weights(weight ...int64) ZunionstoreWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return (ZunionstoreWeights)(c) +} + +func (c ZunionstoreKey) AggregateSum() ZunionstoreAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZunionstoreAggregateSum)(c) +} + +func (c ZunionstoreKey) AggregateMin() ZunionstoreAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZunionstoreAggregateMin)(c) +} + +func (c ZunionstoreKey) AggregateMax() ZunionstoreAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZunionstoreAggregateMax)(c) +} + +func (c ZunionstoreKey) Build() Completed { + return Completed(c) +} + +type ZunionstoreNumkeys Completed + +func (c ZunionstoreNumkeys) Key(key ...string) ZunionstoreKey { + if c.ks&NoSlot == NoSlot { + for _, k := range key { + c.ks = NoSlot | slot(k) + break + } + } else { + for _, k := range key { + c.ks = check(c.ks, slot(k)) + } + } + c.cs.s = append(c.cs.s, key...) + return (ZunionstoreKey)(c) +} + +type ZunionstoreWeights Completed + +func (c ZunionstoreWeights) Weights(weight ...int64) ZunionstoreWeights { + c.cs.s = append(c.cs.s, "WEIGHTS") + for _, n := range weight { + c.cs.s = append(c.cs.s, strconv.FormatInt(n, 10)) + } + return c +} + +func (c ZunionstoreWeights) AggregateSum() ZunionstoreAggregateSum { + c.cs.s = append(c.cs.s, "AGGREGATE", "SUM") + return (ZunionstoreAggregateSum)(c) +} + +func (c ZunionstoreWeights) AggregateMin() ZunionstoreAggregateMin { + c.cs.s = append(c.cs.s, "AGGREGATE", "MIN") + return (ZunionstoreAggregateMin)(c) +} + +func (c ZunionstoreWeights) AggregateMax() ZunionstoreAggregateMax { + c.cs.s = append(c.cs.s, "AGGREGATE", "MAX") + return (ZunionstoreAggregateMax)(c) +} + +func (c ZunionstoreWeights) Build() Completed { + return Completed(c) +} + diff --git a/vendor/github.com/rueian/rueidis/internal/cmds/slot.go b/vendor/github.com/rueian/rueidis/internal/cmds/slot.go new file mode 100644 index 00000000000..fef8f5aec3c --- /dev/null +++ b/vendor/github.com/rueian/rueidis/internal/cmds/slot.go @@ -0,0 +1,109 @@ +package cmds + +// https://redis.io/topics/cluster-spec + +func slot(key string) uint16 { + var s, e int + for ; s < len(key); s++ { + if key[s] == '{' { + break + } + } + if s == len(key) { + return crc16(key) & 16383 + } + for e = s + 1; e < len(key); e++ { + if key[e] == '}' { + break + } + } + if e == len(key) || e == s+1 { + return crc16(key) & 16383 + } + return crc16(key[s+1:e]) & 16383 +} + +/* + * Copyright 2001-2010 Georges Menie (www.menie.org) + * Copyright 2010 Salvatore Sanfilippo (adapted to Redis coding style) + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* CRC16 implementation according to CCITT standards. + * + * Note by @antirez: this is actually the XMODEM CRC 16 algorithm, using the + * following parameters: + * + * Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" + * Width : 16 bit + * Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) + * Initialization : 0000 + * Reflect Input byte : False + * Reflect Output CRC : False + * Xor constant to output CRC : 0000 + * Output for "123456789" : 31C3 + */ + +var crc16tab = [256]uint16{ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, +} + +func crc16(key string) (crc uint16) { + for i := 0; i < len(key); i++ { + crc = (crc << 8) ^ crc16tab[(uint8(crc>>8)^key[i])&0x00FF] + } + return crc +} diff --git a/vendor/github.com/rueian/rueidis/internal/util/parallel.go b/vendor/github.com/rueian/rueidis/internal/util/parallel.go new file mode 100644 index 00000000000..9ad06107245 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/internal/util/parallel.go @@ -0,0 +1,45 @@ +package util + +import ( + "runtime" + "sync" +) + +func ParallelKeys[K comparable, V any](p map[K]V, fn func(k K)) { + ch := make(chan K, len(p)) + for k := range p { + ch <- k + } + closeThenParallel(ch, fn) +} + +func ParallelVals[K comparable, V any](p map[K]V, fn func(k V)) { + ch := make(chan V, len(p)) + for _, v := range p { + ch <- v + } + closeThenParallel(ch, fn) +} + +func closeThenParallel[V any](ch chan V, fn func(k V)) { + close(ch) + concurrency := len(ch) + if cpus := runtime.NumCPU(); concurrency > cpus { + concurrency = cpus + } + wg := sync.WaitGroup{} + wg.Add(concurrency) + for i := 1; i < concurrency; i++ { + go func() { + for v := range ch { + fn(v) + } + wg.Done() + }() + } + for v := range ch { + fn(v) + } + wg.Done() + wg.Wait() +} diff --git a/vendor/github.com/rueian/rueidis/lru.go b/vendor/github.com/rueian/rueidis/lru.go new file mode 100644 index 00000000000..c38d08ac78c --- /dev/null +++ b/vendor/github.com/rueian/rueidis/lru.go @@ -0,0 +1,260 @@ +package rueidis + +import ( + "container/list" + "context" + "sync" + "sync/atomic" + "time" + "unsafe" +) + +const ( + entrySize = int(unsafe.Sizeof(entry{})) + int(unsafe.Sizeof(&entry{})) + keyCacheSize = int(unsafe.Sizeof(keyCache{})) + int(unsafe.Sizeof(&keyCache{})) + elementSize = int(unsafe.Sizeof(list.Element{})) + int(unsafe.Sizeof(&list.Element{})) + stringSSize = int(unsafe.Sizeof("")) + + entryBaseSize = (keyCacheSize + entrySize + elementSize + stringSSize*2) * 3 / 2 + entryMinSize = entryBaseSize + messageStructSize + + moveThreshold = uint64(1024 - 1) +) + +type cache interface { + GetOrPrepare(key, cmd string, now time.Time, ttl time.Duration) (v RedisMessage, entry *entry) + Update(key, cmd string, value RedisMessage, pttl int64) + Cancel(key, cmd string, err error) + Delete(keys []RedisMessage) + GetTTL(key string) time.Duration + FreeAndClose(err error) +} + +type entry struct { + err error + ch chan struct{} + kc *keyCache + cmd string + val RedisMessage + size int +} + +func (e *entry) Wait(ctx context.Context) (RedisMessage, error) { + if ch := ctx.Done(); ch == nil { + <-e.ch + } else { + select { + case <-ch: + return RedisMessage{}, ctx.Err() + case <-e.ch: + } + } + return e.val, e.err +} + +type keyCache struct { + ttl time.Time + cache map[string]*list.Element + key string + hits uint64 + miss uint64 +} + +var _ cache = (*lru)(nil) + +type lru struct { + store map[string]*keyCache + list *list.List + mu sync.RWMutex + size int + max int +} + +func newLRU(max int) *lru { + return &lru{ + max: max, + store: make(map[string]*keyCache), + list: list.New(), + } +} + +func (c *lru) GetOrPrepare(key, cmd string, now time.Time, ttl time.Duration) (v RedisMessage, e *entry) { + var ok bool + var kc *keyCache + var kcTTL time.Time + var ele, back *list.Element + + c.mu.RLock() + if kc, ok = c.store[key]; ok { + kcTTL = kc.ttl + if ele, ok = kc.cache[cmd]; ok { + e = ele.Value.(*entry) + v = e.val + back = c.list.Back() + } + } + c.mu.RUnlock() + + if e != nil && (v.typ == 0 || kcTTL.After(now)) { + hits := atomic.AddUint64(&kc.hits, 1) + if ele != back && hits&moveThreshold == 0 { + c.mu.Lock() + if c.list != nil { + c.list.MoveToBack(ele) + } + c.mu.Unlock() + } + return v, e + } + + v = RedisMessage{} + e = nil + + c.mu.Lock() + if kc, ok = c.store[key]; !ok { + if c.store == nil { + goto miss + } + kc = &keyCache{cache: make(map[string]*list.Element, 1), key: key, ttl: now.Add(ttl)} + c.store[key] = kc + } + if ele, ok = kc.cache[cmd]; ok { + if e = ele.Value.(*entry); e.val.typ == 0 || kc.ttl.After(now) { + atomic.AddUint64(&kc.hits, 1) + v = e.val + c.list.MoveToBack(ele) + } else { + c.list.Remove(ele) + c.size -= e.size + e = nil + } + } + if e == nil { + atomic.AddUint64(&kc.miss, 1) + c.list.PushBack(&entry{ + cmd: cmd, + kc: kc, + ch: make(chan struct{}, 1), + }) + kc.ttl = now.Add(ttl) + kc.cache[cmd] = c.list.Back() + } +miss: + c.mu.Unlock() + return v, e +} + +func (c *lru) Update(key, cmd string, value RedisMessage, pttl int64) { + var ch chan struct{} + c.mu.Lock() + if kc, ok := c.store[key]; ok { + if pttl >= 0 { + // server side ttl should only shorten client side ttl + if ttl := time.Now().Add(time.Duration(pttl) * time.Millisecond); ttl.Before(kc.ttl) { + kc.ttl = ttl + } + } + if ele, ok := kc.cache[cmd]; ok { + if e := ele.Value.(*entry); e.val.typ == 0 { + e.val = value + e.val.setTTL(kc.ttl.Unix()) + e.size = entryBaseSize + 2*(len(key)+len(cmd)) + value.approximateSize() + c.size += e.size + ch = e.ch + } + + ele = c.list.Front() + for c.size > c.max && ele != nil { + if e := ele.Value.(*entry); e.val.typ != 0 { // do not delete pending entries + kc := e.kc + if delete(kc.cache, e.cmd); len(kc.cache) == 0 { + delete(c.store, kc.key) + } + c.list.Remove(ele) + c.size -= e.size + } + ele = ele.Next() + } + } + } + c.mu.Unlock() + if ch != nil { + close(ch) + } +} + +func (c *lru) Cancel(key, cmd string, err error) { + var ch chan struct{} + c.mu.Lock() + if kc, ok := c.store[key]; ok { + if ele, ok := kc.cache[cmd]; ok { + if e := ele.Value.(*entry); e.val.typ == 0 { + e.err = err + ch = e.ch + if delete(kc.cache, cmd); len(kc.cache) == 0 { + delete(c.store, key) + } + c.list.Remove(ele) + } + } + } + c.mu.Unlock() + if ch != nil { + close(ch) + } +} + +func (c *lru) GetTTL(key string) (ttl time.Duration) { + c.mu.Lock() + if kc, ok := c.store[key]; ok && len(kc.cache) != 0 { + ttl = kc.ttl.Sub(time.Now()) + } + if ttl <= 0 { + ttl = -2 + } + c.mu.Unlock() + return +} + +func (c *lru) purge(key string, kc *keyCache) { + if kc != nil { + for cmd, ele := range kc.cache { + if e := ele.Value.(*entry); e.val.typ != 0 { // do not delete pending entries + if delete(kc.cache, cmd); len(kc.cache) == 0 { + delete(c.store, key) + } + c.list.Remove(ele) + c.size -= e.size + } + } + } +} + +func (c *lru) Delete(keys []RedisMessage) { + c.mu.Lock() + if keys == nil { + for key, kc := range c.store { + c.purge(key, kc) + } + } else { + for _, k := range keys { + c.purge(k.string, c.store[k.string]) + } + } + c.mu.Unlock() +} + +func (c *lru) FreeAndClose(err error) { + c.mu.Lock() + for _, kc := range c.store { + for _, ele := range kc.cache { + if e := ele.Value.(*entry); e.val.typ == 0 { + e.err = err + close(e.ch) + } + } + } + c.store = nil + c.list = nil + c.mu.Unlock() +} diff --git a/vendor/github.com/rueian/rueidis/lua.go b/vendor/github.com/rueian/rueidis/lua.go new file mode 100644 index 00000000000..f4ec56c01dd --- /dev/null +++ b/vendor/github.com/rueian/rueidis/lua.go @@ -0,0 +1,85 @@ +package rueidis + +import ( + "context" + "crypto/sha1" + "encoding/hex" + "sync/atomic" + + "github.com/rueian/rueidis/internal/util" +) + +// NewLuaScript creates a Lua instance whose Lua.Exec uses EVALSHA and EVAL. +func NewLuaScript(script string) *Lua { + sum := sha1.Sum([]byte(script)) + return &Lua{script: script, sha1: hex.EncodeToString(sum[:])} +} + +// NewLuaScriptReadOnly creates a Lua instance whose Lua.Exec uses EVALSHA_RO and EVAL_RO. +func NewLuaScriptReadOnly(script string) *Lua { + lua := NewLuaScript(script) + lua.readonly = true + return lua +} + +// Lua represents a redis lua script. It should be created from the NewLuaScript() or NewLuaScriptReadOnly() +type Lua struct { + script string + sha1 string + readonly bool +} + +// Exec the script to the given Client. +// It will first try with the EVALSHA/EVALSHA_RO and then EVAL/EVAL_RO if first try failed. +// Cross slot keys are prohibited if the Client is a cluster client. +func (s *Lua) Exec(ctx context.Context, c Client, keys, args []string) (resp RedisResult) { + if s.readonly { + resp = c.Do(ctx, c.B().EvalshaRo().Sha1(s.sha1).Numkeys(int64(len(keys))).Key(keys...).Arg(args...).Build()) + } else { + resp = c.Do(ctx, c.B().Evalsha().Sha1(s.sha1).Numkeys(int64(len(keys))).Key(keys...).Arg(args...).Build()) + } + if err := resp.RedisError(); err != nil && err.IsNoScript() { + if s.readonly { + resp = c.Do(ctx, c.B().EvalRo().Script(s.script).Numkeys(int64(len(keys))).Key(keys...).Arg(args...).Build()) + } else { + resp = c.Do(ctx, c.B().Eval().Script(s.script).Numkeys(int64(len(keys))).Key(keys...).Arg(args...).Build()) + } + } + return resp +} + +// LuaExec is a single execution unit of Lua.ExecMulti +type LuaExec struct { + Keys []string + Args []string +} + +// ExecMulti exec the script multiple times by the provided LuaExec to the given Client. +// It will first try SCRIPT LOAD the script to all redis nodes and then exec it with the EVALSHA/EVALSHA_RO. +// Cross slot keys within single LuaExec are prohibited if the Client is a cluster client. +func (s *Lua) ExecMulti(ctx context.Context, c Client, multi ...LuaExec) (resp []RedisResult) { + var e atomic.Value + util.ParallelVals(c.Nodes(), func(n Client) { + if err := n.Do(ctx, n.B().ScriptLoad().Script(s.script).Build()).Error(); err != nil { + e.CompareAndSwap(nil, &errs{error: err}) + } + }) + if err := e.Load(); err != nil { + resp = make([]RedisResult, len(multi)) + for i := 0; i < len(resp); i++ { + resp[i] = newErrResult(err.(*errs).error) + } + return + } + cmds := make(Commands, 0, len(multi)) + if s.readonly { + for _, m := range multi { + cmds = append(cmds, c.B().EvalshaRo().Sha1(s.sha1).Numkeys(int64(len(m.Keys))).Key(m.Keys...).Arg(m.Args...).Build()) + } + } else { + for _, m := range multi { + cmds = append(cmds, c.B().Evalsha().Sha1(s.sha1).Numkeys(int64(len(m.Keys))).Key(m.Keys...).Arg(m.Args...).Build()) + } + } + return c.DoMulti(ctx, cmds...) +} diff --git a/vendor/github.com/rueian/rueidis/message.go b/vendor/github.com/rueian/rueidis/message.go new file mode 100644 index 00000000000..098087a41e1 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/message.go @@ -0,0 +1,875 @@ +package rueidis + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + "time" + "unsafe" +) + +const messageStructSize = int(unsafe.Sizeof(RedisMessage{})) + +// Nil represents a Redis Nil message +var Nil = &RedisError{typ: '_'} + +// IsRedisNil is a handy method to check if error is redis nil response. +// All redis nil response returns as an error. +func IsRedisNil(err error) bool { + return err == Nil +} + +// RedisError is an error response or a nil message from redis instance +type RedisError RedisMessage + +func (r *RedisError) Error() string { + if r.IsNil() { + return "redis nil message" + } + return r.string +} + +// IsNil checks if it is a redis nil message. +func (r *RedisError) IsNil() bool { + return r.typ == '_' +} + +// IsMoved checks if it is a redis MOVED message and returns moved address. +func (r *RedisError) IsMoved() (addr string, ok bool) { + if ok = strings.HasPrefix(r.string, "MOVED"); ok { + addr = strings.Split(r.string, " ")[2] + } + return +} + +// IsAsk checks if it is a redis ASK message and returns ask address. +func (r *RedisError) IsAsk() (addr string, ok bool) { + if ok = strings.HasPrefix(r.string, "ASK"); ok { + addr = strings.Split(r.string, " ")[2] + } + return +} + +// IsTryAgain checks if it is a redis TRYAGAIN message and returns ask address. +func (r *RedisError) IsTryAgain() bool { + return strings.HasPrefix(r.string, "TRYAGAIN") +} + +// IsClusterDown checks if it is a redis CLUSTERDOWN message and returns ask address. +func (r *RedisError) IsClusterDown() bool { + return strings.HasPrefix(r.string, "CLUSTERDOWN") +} + +// IsNoScript checks if it is a redis NOSCRIPT message. +func (r *RedisError) IsNoScript() bool { + return strings.HasPrefix(r.string, "NOSCRIPT") +} + +func newResult(val RedisMessage, err error) RedisResult { + return RedisResult{val: val, err: err} +} + +func newErrResult(err error) RedisResult { + return RedisResult{err: err} +} + +// RedisResult is the return struct from Client.Do or Client.DoCache +// it contains either a redis response or an underlying error (ex. network timeout). +type RedisResult struct { + err error + val RedisMessage +} + +// RedisError can be used to check if the redis response is an error message. +func (r RedisResult) RedisError() *RedisError { + if err := r.val.Error(); err != nil { + return err.(*RedisError) + } + return nil +} + +// NonRedisError can be used to check if there is an underlying error (ex. network timeout). +func (r RedisResult) NonRedisError() error { + return r.err +} + +// Error returns either underlying error or redis error or nil +func (r RedisResult) Error() (err error) { + if r.err != nil { + err = r.err + } else { + err = r.val.Error() + } + return +} + +// ToMessage retrieves the RedisMessage +func (r RedisResult) ToMessage() (v RedisMessage, err error) { + if r.err != nil { + err = r.err + } else { + err = r.val.Error() + } + return r.val, err +} + +// ToInt64 delegates to RedisMessage.ToInt64 +func (r RedisResult) ToInt64() (v int64, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.ToInt64() + } + return +} + +// ToBool delegates to RedisMessage.ToBool +func (r RedisResult) ToBool() (v bool, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.ToBool() + } + return +} + +// ToFloat64 delegates to RedisMessage.ToFloat64 +func (r RedisResult) ToFloat64() (v float64, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.ToFloat64() + } + return +} + +// ToString delegates to RedisMessage.ToString +func (r RedisResult) ToString() (v string, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.ToString() + } + return +} + +// AsReader delegates to RedisMessage.AsReader +func (r RedisResult) AsReader() (v io.Reader, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsReader() + } + return +} + +// DecodeJSON delegates to RedisMessage.DecodeJSON +func (r RedisResult) DecodeJSON(v interface{}) (err error) { + if r.err != nil { + err = r.err + } else { + err = r.val.DecodeJSON(v) + } + return +} + +// AsInt64 delegates to RedisMessage.AsInt64 +func (r RedisResult) AsInt64() (v int64, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsInt64() + } + return +} + +// AsBool delegates to RedisMessage.AsBool +func (r RedisResult) AsBool() (v bool, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsBool() + } + return +} + +// AsFloat64 delegates to RedisMessage.AsFloat64 +func (r RedisResult) AsFloat64() (v float64, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsFloat64() + } + return +} + +// ToArray delegates to RedisMessage.ToArray +func (r RedisResult) ToArray() (v []RedisMessage, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.ToArray() + } + return +} + +// AsStrSlice delegates to RedisMessage.AsStrSlice +func (r RedisResult) AsStrSlice() (v []string, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsStrSlice() + } + return +} + +// AsIntSlice delegates to RedisMessage.AsIntSlice +func (r RedisResult) AsIntSlice() (v []int64, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsIntSlice() + } + return +} + +// AsFloatSlice delegates to RedisMessage.AsFloatSlice +func (r RedisResult) AsFloatSlice() (v []float64, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsFloatSlice() + } + return +} + +// AsXRangeEntry delegates to RedisMessage.AsXRangeEntry +func (r RedisResult) AsXRangeEntry() (v XRangeEntry, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsXRangeEntry() + } + return +} + +// AsXRange delegates to RedisMessage.AsXRange +func (r RedisResult) AsXRange() (v []XRangeEntry, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsXRange() + } + return +} + +// AsZScore delegates to RedisMessage.AsZScore +func (r RedisResult) AsZScore() (v ZScore, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsZScore() + } + return +} + +// AsZScores delegates to RedisMessage.AsZScores +func (r RedisResult) AsZScores() (v []ZScore, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsZScores() + } + return +} + +// AsXRead delegates to RedisMessage.AsXRead +func (r RedisResult) AsXRead() (v map[string][]XRangeEntry, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsXRead() + } + return +} + +// AsMap delegates to RedisMessage.AsMap +func (r RedisResult) AsMap() (v map[string]RedisMessage, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsMap() + } + return +} + +// AsStrMap delegates to RedisMessage.AsStrMap +func (r RedisResult) AsStrMap() (v map[string]string, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsStrMap() + } + return +} + +// AsIntMap delegates to RedisMessage.AsIntMap +func (r RedisResult) AsIntMap() (v map[string]int64, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.AsIntMap() + } + return +} + +// ToMap delegates to RedisMessage.ToMap +func (r RedisResult) ToMap() (v map[string]RedisMessage, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.ToMap() + } + return +} + +// ToAny delegates to RedisMessage.ToAny +func (r RedisResult) ToAny() (v interface{}, err error) { + if r.err != nil { + err = r.err + } else { + v, err = r.val.ToAny() + } + return +} + +// IsCacheHit delegates to RedisMessage.IsCacheHit +func (r RedisResult) IsCacheHit() bool { + return r.val.IsCacheHit() +} + +// CacheTTL delegates to RedisMessage.CacheTTL +func (r RedisResult) CacheTTL() int64 { + return r.val.CacheTTL() +} + +// RedisMessage is a redis response message, it may be a nil response +type RedisMessage struct { + attrs *RedisMessage + string string + values []RedisMessage + integer int64 + typ byte + ttl [7]byte +} + +// IsNil check if message is a redis nil response +func (m *RedisMessage) IsNil() bool { + return m.typ == '_' +} + +// IsInt64 check if message is a redis RESP3 int response +func (m *RedisMessage) IsInt64() bool { + return m.typ == ':' +} + +// IsFloat64 check if message is a redis RESP3 double response +func (m *RedisMessage) IsFloat64() bool { + return m.typ == ',' +} + +// IsString check if message is a redis string response +func (m *RedisMessage) IsString() bool { + return m.typ == '$' || m.typ == '+' +} + +// IsBool check if message is a redis RESP3 bool response +func (m *RedisMessage) IsBool() bool { + return m.typ == '#' +} + +// IsArray check if message is a redis array response +func (m *RedisMessage) IsArray() bool { + return m.typ == '*' || m.typ == '~' +} + +// IsMap check if message is a redis RESP3 map response +func (m *RedisMessage) IsMap() bool { + return m.typ == '%' +} + +// Error check if message is a redis error response, including nil response +func (m *RedisMessage) Error() error { + if m.typ == '_' { + return Nil + } + if m.typ == '-' || m.typ == '!' { + // kvrocks: https://github.com/rueian/rueidis/issues/152#issuecomment-1333923750 + mm := *m + mm.string = strings.TrimPrefix(m.string, "ERR ") + return (*RedisError)(&mm) + } + return nil +} + +// ToString check if message is a redis string response, and return it +func (m *RedisMessage) ToString() (val string, err error) { + if m.IsString() { + return m.string, nil + } + if m.IsInt64() || m.values != nil { + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a string", typ)) + } + return m.string, m.Error() +} + +// AsReader check if message is a redis string response and wrap it with the strings.NewReader +func (m *RedisMessage) AsReader() (reader io.Reader, err error) { + str, err := m.ToString() + if err != nil { + return nil, err + } + return strings.NewReader(str), nil +} + +// DecodeJSON check if message is a redis string response and treat it as json, then unmarshal it into provided value +func (m *RedisMessage) DecodeJSON(v interface{}) (err error) { + str, err := m.ToString() + if err != nil { + return err + } + decoder := json.NewDecoder(strings.NewReader(str)) + return decoder.Decode(v) +} + +// AsInt64 check if message is a redis string response, and parse it as int64 +func (m *RedisMessage) AsInt64() (val int64, err error) { + if m.IsInt64() { + return m.integer, nil + } + v, err := m.ToString() + if err != nil { + return 0, err + } + return strconv.ParseInt(v, 10, 64) +} + +// AsBool checks if message is non-nil redis response, and parses it as bool +func (m *RedisMessage) AsBool() (val bool, err error) { + if err = m.Error(); err != nil { + return + } + switch m.typ { + case '$', '+': + val = m.string == "OK" + return + case ':': + val = m.integer != 0 + return + case '#': + val = m.integer == 1 + return + default: + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a int, string or bool", typ)) + } +} + +// AsFloat64 check if message is a redis string response, and parse it as float64 +func (m *RedisMessage) AsFloat64() (val float64, err error) { + if m.IsFloat64() { + return strconv.ParseFloat(m.string, 64) + } + v, err := m.ToString() + if err != nil { + return 0, err + } + return strconv.ParseFloat(v, 64) +} + +// ToInt64 check if message is a redis RESP3 int response, and return it +func (m *RedisMessage) ToInt64() (val int64, err error) { + if m.IsInt64() { + return m.integer, nil + } + if err = m.Error(); err != nil { + return 0, err + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a RESP3 int64", typ)) +} + +// ToBool check if message is a redis RESP3 bool response, and return it +func (m *RedisMessage) ToBool() (val bool, err error) { + if m.IsBool() { + return m.integer == 1, nil + } + if err = m.Error(); err != nil { + return false, err + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a RESP3 bool", typ)) +} + +// ToFloat64 check if message is a redis RESP3 double response, and return it +func (m *RedisMessage) ToFloat64() (val float64, err error) { + if m.IsFloat64() { + return strconv.ParseFloat(m.string, 64) + } + if err = m.Error(); err != nil { + return 0, err + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a RESP3 float64", typ)) +} + +// ToArray check if message is a redis array/set response, and return it +func (m *RedisMessage) ToArray() ([]RedisMessage, error) { + if m.IsArray() { + return m.values, nil + } + if err := m.Error(); err != nil { + return nil, err + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a array", typ)) +} + +// AsStrSlice check if message is a redis array/set response, and convert to []string. +// redis nil element and other non string element will be present as zero. +func (m *RedisMessage) AsStrSlice() ([]string, error) { + values, err := m.ToArray() + if err != nil { + return nil, err + } + s := make([]string, 0, len(values)) + for _, v := range values { + s = append(s, v.string) + } + return s, nil +} + +// AsIntSlice check if message is a redis array/set response, and convert to []int64. +// redis nil element and other non integer element will be present as zero. +func (m *RedisMessage) AsIntSlice() ([]int64, error) { + values, err := m.ToArray() + if err != nil { + return nil, err + } + s := make([]int64, 0, len(values)) + for _, v := range values { + s = append(s, v.integer) + } + return s, nil +} + +// AsFloatSlice check if message is a redis array/set response, and convert to []float64. +// redis nil element and other non float element will be present as zero. +func (m *RedisMessage) AsFloatSlice() ([]float64, error) { + values, err := m.ToArray() + if err != nil { + return nil, err + } + s := make([]float64, 0, len(values)) + for _, v := range values { + if len(v.string) != 0 { + i, err := strconv.ParseFloat(v.string, 64) + if err != nil { + return nil, err + } + s = append(s, i) + } else { + s = append(s, float64(v.integer)) + } + } + return s, nil +} + +// XRangeEntry is the element type of both XRANGE and XREVRANGE command response array +type XRangeEntry struct { + FieldValues map[string]string + ID string +} + +// AsXRangeEntry check if message is a redis array/set response of length 2, and convert to XRangeEntry +func (m *RedisMessage) AsXRangeEntry() (XRangeEntry, error) { + values, err := m.ToArray() + if err != nil { + return XRangeEntry{}, err + } + if len(values) != 2 { + return XRangeEntry{}, fmt.Errorf("got %d, wanted 2", len(values)) + } + id, err := values[0].ToString() + if err != nil { + return XRangeEntry{}, err + } + fieldValues, err := values[1].AsStrMap() + if err != nil { + if IsRedisNil(err) { + return XRangeEntry{ID: id, FieldValues: nil}, nil + } + return XRangeEntry{}, err + } + return XRangeEntry{ + ID: id, + FieldValues: fieldValues, + }, nil +} + +// AsXRange check if message is a redis array/set response, and convert to []XRangeEntry +func (m *RedisMessage) AsXRange() ([]XRangeEntry, error) { + values, err := m.ToArray() + if err != nil { + return nil, err + } + msgs := make([]XRangeEntry, 0, len(values)) + for _, v := range values { + msg, err := v.AsXRangeEntry() + if err != nil { + return nil, err + } + msgs = append(msgs, msg) + } + return msgs, nil +} + +// AsXRead converts XREAD/XREADGRUOP response to map[string][]XRangeEntry +func (m *RedisMessage) AsXRead() (ret map[string][]XRangeEntry, err error) { + if err = m.Error(); err != nil { + return nil, err + } + if m.IsMap() { + ret = make(map[string][]XRangeEntry, len(m.values)/2) + for i := 0; i < len(m.values); i += 2 { + if ret[m.values[i].string], err = m.values[i+1].AsXRange(); err != nil { + return nil, err + } + } + return ret, nil + } + if m.IsArray() { + ret = make(map[string][]XRangeEntry, len(m.values)) + for _, v := range m.values { + if !v.IsArray() || len(v.values) != 2 { + return nil, fmt.Errorf("got %d, wanted 2", len(v.values)) + } + if ret[v.values[0].string], err = v.values[1].AsXRange(); err != nil { + return nil, err + } + } + return ret, nil + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) +} + +// ZScore is the element type of ZRANGE WITHSCORES, ZDIFF WITHSCORES and ZPOPMAX command response +type ZScore struct { + Member string + Score float64 +} + +func toZScore(values []RedisMessage) (s ZScore, err error) { + if len(values) == 2 { + if s.Member, err = values[0].ToString(); err == nil { + s.Score, err = values[1].AsFloat64() + } + return s, err + } + panic(fmt.Sprintf("redis message is not a map/array/set or its length is not 2")) +} + +// AsZScore converts ZPOPMAX and ZPOPMIN command with count 1 response to a single ZScore +func (m *RedisMessage) AsZScore() (s ZScore, err error) { + arr, err := m.ToArray() + if err != nil { + return s, err + } + return toZScore(arr) +} + +// AsZScores converts ZRANGE WITHSCROES, ZDIFF WITHSCROES and ZPOPMAX/ZPOPMIN command with count > 1 responses to []ZScore +func (m *RedisMessage) AsZScores() ([]ZScore, error) { + arr, err := m.ToArray() + if err != nil { + return nil, err + } + if len(arr) > 0 && arr[0].IsArray() { + scores := make([]ZScore, len(arr)) + for i, v := range arr { + if scores[i], err = toZScore(v.values); err != nil { + return nil, err + } + } + return scores, nil + } + scores := make([]ZScore, len(arr)/2) + for i := 0; i < len(scores); i++ { + j := i * 2 + if scores[i], err = toZScore(arr[j : j+2]); err != nil { + return nil, err + } + } + return scores, nil +} + +// AsMap check if message is a redis array/set response, and convert to map[string]RedisMessage +func (m *RedisMessage) AsMap() (map[string]RedisMessage, error) { + if err := m.Error(); err != nil { + return nil, err + } + if (m.IsMap() || m.IsArray()) && len(m.values)%2 == 0 { + return toMap(m.values), nil + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) +} + +// AsStrMap check if message is a redis map/array/set response, and convert to map[string]string. +// redis nil element and other non string element will be present as zero. +func (m *RedisMessage) AsStrMap() (map[string]string, error) { + if err := m.Error(); err != nil { + return nil, err + } + if (m.IsMap() || m.IsArray()) && len(m.values)%2 == 0 { + r := make(map[string]string, len(m.values)/2) + for i := 0; i < len(m.values); i += 2 { + k := m.values[i] + v := m.values[i+1] + r[k.string] = v.string + } + return r, nil + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) +} + +// AsIntMap check if message is a redis map/array/set response, and convert to map[string]int64. +// redis nil element and other non integer element will be present as zero. +func (m *RedisMessage) AsIntMap() (map[string]int64, error) { + if err := m.Error(); err != nil { + return nil, err + } + if (m.IsMap() || m.IsArray()) && len(m.values)%2 == 0 { + var err error + r := make(map[string]int64, len(m.values)/2) + for i := 0; i < len(m.values); i += 2 { + k := m.values[i] + v := m.values[i+1] + if k.typ == '$' || k.typ == '+' { + if len(v.string) != 0 { + if r[k.string], err = strconv.ParseInt(v.string, 0, 64); err != nil { + return nil, err + } + } else if v.typ == ':' || v.typ == '_' { + r[k.string] = v.integer + } + } + } + return r, nil + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a map/array/set or its length is not even", typ)) +} + +// ToMap check if message is a redis RESP3 map response, and return it +func (m *RedisMessage) ToMap() (map[string]RedisMessage, error) { + if m.IsMap() { + return toMap(m.values), nil + } + if err := m.Error(); err != nil { + return nil, err + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a RESP3 map", typ)) +} + +// ToAny turns message into go interface{} value +func (m *RedisMessage) ToAny() (interface{}, error) { + if err := m.Error(); err != nil { + return nil, err + } + switch m.typ { + case ',': + return strconv.ParseFloat(m.string, 64) + case '$', '+', '=', '(': + return m.string, nil + case '#': + return m.integer == 1, nil + case ':': + return m.integer, nil + case '%': + vs := make(map[string]interface{}, len(m.values)/2) + for i := 0; i < len(m.values); i += 2 { + if v, err := m.values[i+1].ToAny(); err != nil && !IsRedisNil(err) { + vs[m.values[i].string] = err + } else { + vs[m.values[i].string] = v + } + } + return vs, nil + case '~', '*': + vs := make([]interface{}, len(m.values)) + for i := 0; i < len(m.values); i++ { + if v, err := m.values[i].ToAny(); err != nil && !IsRedisNil(err) { + vs[i] = err + } else { + vs[i] = v + } + } + return vs, nil + } + typ := m.typ + panic(fmt.Sprintf("redis message type %c is not a supported in ToAny", typ)) +} + +// IsCacheHit check if message is from client side cache +func (m *RedisMessage) IsCacheHit() bool { + return m.attrs == cacheMark +} + +// CacheTTL returns the remaining TTL in seconds of client side cache +func (m *RedisMessage) CacheTTL() int64 { + unix := int64(m.ttl[0]) | int64(m.ttl[1])<<8 | int64(m.ttl[2])<<16 | int64(m.ttl[3])<<24 | + int64(m.ttl[4])<<32 | int64(m.ttl[5])<<40 | int64(m.ttl[6])<<48 + if unix > 0 { + return unix - time.Now().Unix() + } + return 0 +} + +func (m *RedisMessage) setTTL(pttl int64) { + m.ttl[0] = byte(pttl) + m.ttl[1] = byte(pttl >> 8) + m.ttl[2] = byte(pttl >> 16) + m.ttl[3] = byte(pttl >> 24) + m.ttl[4] = byte(pttl >> 32) + m.ttl[5] = byte(pttl >> 40) + m.ttl[6] = byte(pttl >> 48) +} + +func toMap(values []RedisMessage) map[string]RedisMessage { + r := make(map[string]RedisMessage, len(values)/2) + for i := 0; i < len(values); i += 2 { + if values[i].typ == '$' || values[i].typ == '+' { + r[values[i].string] = values[i+1] + continue + } + typ := values[i].typ + panic(fmt.Sprintf("redis message type %c as map key is not supported", typ)) + } + return r +} + +func (m *RedisMessage) approximateSize() (s int) { + s += messageStructSize + s += len(m.string) + for _, v := range m.values { + s += v.approximateSize() + } + return +} diff --git a/vendor/github.com/rueian/rueidis/mux.go b/vendor/github.com/rueian/rueidis/mux.go new file mode 100644 index 00000000000..d8bf66f73ad --- /dev/null +++ b/vendor/github.com/rueian/rueidis/mux.go @@ -0,0 +1,319 @@ +package rueidis + +import ( + "context" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/rueian/rueidis/internal/cmds" + "github.com/rueian/rueidis/internal/util" +) + +type connFn func(dst string, opt *ClientOption) conn +type dialFn func(dst string, opt *ClientOption) (net.Conn, error) +type wireFn func() wire + +type singleconnect struct { + w wire + e error + g sync.WaitGroup +} + +type conn interface { + Do(ctx context.Context, cmd cmds.Completed) RedisResult + DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) RedisResult + DoMulti(ctx context.Context, multi ...cmds.Completed) []RedisResult + DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult + Receive(ctx context.Context, subscribe cmds.Completed, fn func(message PubSubMessage)) error + Info() map[string]RedisMessage + Error() error + Close() + Dial() error + Override(conn) + Acquire() wire + Store(w wire) + Addr() string +} + +var _ conn = (*mux)(nil) + +type mux struct { + init wire + dead wire + pool *pool + wireFn wireFn + dst string + wire []atomic.Value + sc []*singleconnect + mu []sync.Mutex +} + +func makeMux(dst string, option *ClientOption, dialFn dialFn) *mux { + dead := deadFn() + return newMux(dst, option, (*pipe)(nil), dead, func() (w wire) { + w, err := newPipe(func() (net.Conn, error) { + return dialFn(dst, option) + }, option) + if err != nil { + dead.error.Store(&errs{error: err}) + w = dead + } + return w + }) +} + +func newMux(dst string, option *ClientOption, init, dead wire, wireFn wireFn) *mux { + var multiplex int + if option.PipelineMultiplex >= 0 { + multiplex = 1 << option.PipelineMultiplex + } else { + multiplex = 1 + } + m := &mux{dst: dst, init: init, dead: dead, wireFn: wireFn, + wire: make([]atomic.Value, multiplex), + mu: make([]sync.Mutex, multiplex), + sc: make([]*singleconnect, multiplex), + } + for i := 0; i < len(m.wire); i++ { + m.wire[i].Store(init) + } + m.pool = newPool(option.BlockingPoolSize, dead, wireFn) + return m +} + +func (m *mux) Override(cc conn) { + if m2, ok := cc.(*mux); ok { + for i := 0; i < len(m.wire) && i < len(m2.wire); i++ { + m.wire[i].CompareAndSwap(m.init, m2.wire[i].Load()) + } + } +} + +func (m *mux) _pipe(i uint16) (w wire, err error) { + if w = m.wire[i].Load().(wire); w != m.init { + return w, nil + } + + m.mu[i].Lock() + sc := m.sc[i] + if m.sc[i] == nil { + m.sc[i] = &singleconnect{} + m.sc[i].g.Add(1) + } + m.mu[i].Unlock() + + if sc != nil { + sc.g.Wait() + return sc.w, sc.e + } + + if w = m.wire[i].Load().(wire); w == m.init { + if w = m.wireFn(); w != m.dead { + i := i + w := w + w.SetOnCloseHook(func(err error) { + if err != ErrClosing { + m.wire[i].CompareAndSwap(w, m.init) + } + }) + m.wire[i].Store(w) + } else { + err = w.Error() + } + } + + m.mu[i].Lock() + sc = m.sc[i] + m.sc[i] = nil + m.mu[i].Unlock() + + sc.w = w + sc.e = err + sc.g.Done() + + return w, err +} + +func (m *mux) pipe(i uint16) wire { + w, _ := m._pipe(i) + return w // this should never be nil +} + +func (m *mux) Dial() error { + _, err := m._pipe(0) + return err +} + +func (m *mux) Info() map[string]RedisMessage { + return m.pipe(0).Info() +} + +func (m *mux) Error() error { + return m.pipe(0).Error() +} + +func (m *mux) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { + if cmd.IsBlock() { + resp = m.blocking(ctx, cmd) + } else { + resp = m.pipeline(ctx, cmd) + } + return resp +} + +func (m *mux) DoMulti(ctx context.Context, multi ...cmds.Completed) (resp []RedisResult) { + for _, cmd := range multi { + if cmd.IsBlock() { + goto block + } + } + return m.pipelineMulti(ctx, multi) +block: + multi[0].ToBlock() // mark the first cmd as block if one of them is block to shortcut later check. + return m.blockingMulti(ctx, multi) +} + +func (m *mux) blocking(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { + wire := m.pool.Acquire() + resp = wire.Do(ctx, cmd) + if resp.NonRedisError() != nil { // abort the wire if blocking command return early (ex. context.DeadlineExceeded) + wire.Close() + } + m.pool.Store(wire) + return resp +} + +func (m *mux) blockingMulti(ctx context.Context, cmd []cmds.Completed) (resp []RedisResult) { + wire := m.pool.Acquire() + resp = wire.DoMulti(ctx, cmd...) + for _, res := range resp { + if res.NonRedisError() != nil { // abort the wire if blocking command return early (ex. context.DeadlineExceeded) + wire.Close() + break + } + } + m.pool.Store(wire) + return resp +} + +func (m *mux) pipeline(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { + slot := cmd.Slot() & uint16(len(m.wire)-1) + wire := m.pipe(slot) + if resp = wire.Do(ctx, cmd); isBroken(resp.NonRedisError(), wire) { + m.wire[slot].CompareAndSwap(wire, m.init) + } + return resp +} + +func (m *mux) pipelineMulti(ctx context.Context, cmd []cmds.Completed) (resp []RedisResult) { + slot := cmd[0].Slot() & uint16(len(m.wire)-1) + wire := m.pipe(slot) + resp = wire.DoMulti(ctx, cmd...) + for _, r := range resp { + if isBroken(r.NonRedisError(), wire) { + m.wire[slot].CompareAndSwap(wire, m.init) + return resp + } + } + return resp +} + +func (m *mux) DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) RedisResult { + slot := cmd.Slot() & uint16(len(m.wire)-1) + wire := m.pipe(slot) + resp := wire.DoCache(ctx, cmd, ttl) + if isBroken(resp.NonRedisError(), wire) { + m.wire[slot].CompareAndSwap(wire, m.init) + } + return resp +} + +func (m *mux) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (results []RedisResult) { + var slots map[uint16]int + var mask = uint16(len(m.wire) - 1) + + if mask == 0 { + return m.doMultiCache(ctx, 0, multi) + } + + slots = make(map[uint16]int, len(m.wire)) + for _, cmd := range multi { + slots[cmd.Cmd.Slot()&mask]++ + } + + if len(slots) == 1 { + return m.doMultiCache(ctx, multi[0].Cmd.Slot()&mask, multi) + } + + commands := make(map[uint16][]CacheableTTL, len(slots)) + cIndexes := make(map[uint16][]int, len(slots)) + for slot, count := range slots { + cIndexes[slot] = make([]int, 0, count) + commands[slot] = make([]CacheableTTL, 0, count) + } + for i, cmd := range multi { + slot := cmd.Cmd.Slot() & mask + commands[slot] = append(commands[slot], cmd) + cIndexes[slot] = append(cIndexes[slot], i) + } + + results = make([]RedisResult, len(multi)) + util.ParallelKeys(commands, func(slot uint16) { + for i, r := range m.doMultiCache(ctx, slot, commands[slot]) { + results[cIndexes[slot][i]] = r + } + }) + + return results +} + +func (m *mux) doMultiCache(ctx context.Context, slot uint16, multi []CacheableTTL) (resps []RedisResult) { + wire := m.pipe(slot) + resps = wire.DoMultiCache(ctx, multi...) + for _, r := range resps { + if isBroken(r.NonRedisError(), wire) { + m.wire[slot].CompareAndSwap(wire, m.init) + return resps + } + } + return resps +} + +func (m *mux) Receive(ctx context.Context, subscribe cmds.Completed, fn func(message PubSubMessage)) error { + slot := subscribe.Slot() & uint16(len(m.wire)-1) + wire := m.pipe(slot) + err := wire.Receive(ctx, subscribe, fn) + if isBroken(err, wire) { + m.wire[slot].CompareAndSwap(wire, m.init) + } + return err +} + +func (m *mux) Acquire() wire { + return m.pool.Acquire() +} + +func (m *mux) Store(w wire) { + w.SetPubSubHooks(PubSubHooks{}) + w.CleanSubscriptions() + m.pool.Store(w) +} + +func (m *mux) Close() { + for i := 0; i < len(m.wire); i++ { + if prev := m.wire[i].Swap(m.dead).(wire); prev != m.init && prev != m.dead { + prev.Close() + } + } + m.pool.Close() +} + +func (m *mux) Addr() string { + return m.dst +} + +func isBroken(err error, w wire) bool { + return err != nil && err != ErrClosing && w.Error() != nil +} diff --git a/vendor/github.com/rueian/rueidis/pipe.go b/vendor/github.com/rueian/rueidis/pipe.go new file mode 100644 index 00000000000..e33378facd0 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/pipe.go @@ -0,0 +1,1228 @@ +package rueidis + +import ( + "bufio" + "context" + "errors" + "fmt" + "net" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/rueian/rueidis/internal/cmds" +) + +var noHello = regexp.MustCompile("unknown command .?HELLO.?") + +type wire interface { + Do(ctx context.Context, cmd cmds.Completed) RedisResult + DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) RedisResult + DoMulti(ctx context.Context, multi ...cmds.Completed) []RedisResult + DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult + Receive(ctx context.Context, subscribe cmds.Completed, fn func(message PubSubMessage)) error + Info() map[string]RedisMessage + Error() error + Close() + + CleanSubscriptions() + SetPubSubHooks(hooks PubSubHooks) <-chan error + SetOnCloseHook(fn func(error)) +} + +var _ wire = (*pipe)(nil) + +type pipe struct { + conn net.Conn + error atomic.Value + clhks atomic.Value + pshks atomic.Value + queue queue + cache cache + r *bufio.Reader + w *bufio.Writer + close chan struct{} + onInvalidations func([]RedisMessage) + r2psFn func() (p *pipe, err error) + r2pipe *pipe + ssubs *subs + nsubs *subs + psubs *subs + info map[string]RedisMessage + timeout time.Duration + pinggap time.Duration + maxFlushDelay time.Duration + once sync.Once + r2mu sync.Mutex + version int32 + _ [10]int32 + blcksig int32 + state int32 + waits int32 + recvs int32 + r2ps bool +} + +func newPipe(connFn func() (net.Conn, error), option *ClientOption) (p *pipe, err error) { + return _newPipe(connFn, option, false) +} + +func _newPipe(connFn func() (net.Conn, error), option *ClientOption, r2ps bool) (p *pipe, err error) { + conn, err := connFn() + if err != nil { + return nil, err + } + p = &pipe{ + conn: conn, + queue: newRing(option.RingScaleEachConn), + r: bufio.NewReaderSize(conn, option.ReadBufferEachConn), + w: bufio.NewWriterSize(conn, option.WriteBufferEachConn), + + nsubs: newSubs(), + psubs: newSubs(), + ssubs: newSubs(), + close: make(chan struct{}), + + timeout: option.ConnWriteTimeout, + pinggap: option.Dialer.KeepAlive, + maxFlushDelay: option.MaxFlushDelay, + + r2ps: r2ps, + } + if !r2ps { + p.r2psFn = func() (p *pipe, err error) { + return _newPipe(connFn, option, true) + } + } + if !option.DisableCache { + p.cache = newLRU(option.CacheSizeEachConn) + } + p.pshks.Store(emptypshks) + p.clhks.Store(emptyclhks) + + helloCmd := []string{"HELLO", "3"} + if option.Password != "" && option.Username == "" { + helloCmd = append(helloCmd, "AUTH", "default", option.Password) + } else if option.Username != "" { + helloCmd = append(helloCmd, "AUTH", option.Username, option.Password) + } + if option.ClientName != "" { + helloCmd = append(helloCmd, "SETNAME", option.ClientName) + } + + init := make([][]string, 0, 3) + if option.ClientTrackingOptions == nil { + init = append(init, helloCmd, []string{"CLIENT", "TRACKING", "ON", "OPTIN"}) + } else { + init = append(init, helloCmd, append([]string{"CLIENT", "TRACKING", "ON"}, option.ClientTrackingOptions...)) + } + if option.DisableCache { + init = init[:1] + } + if option.SelectDB != 0 { + init = append(init, []string{"SELECT", strconv.Itoa(option.SelectDB)}) + } + + timeout := option.Dialer.Timeout + if timeout <= 0 { + timeout = DefaultDialTimeout + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + var r2 bool + if !r2ps { + for i, r := range p.DoMulti(ctx, cmds.NewMultiCompleted(init)...) { + if i == 0 { + p.info, err = r.AsMap() + } else { + err = r.Error() + } + if err != nil { + if re, ok := err.(*RedisError); ok { + if !r2 && noHello.MatchString(re.string) { + r2 = true + continue + } else if strings.Contains(re.string, "CLIENT") { + err = fmt.Errorf("%s: %w", re.string, ErrNoCache) + } else if r2 { + continue + } + } + p.Close() + return nil, err + } + } + } + if proto := p.info["proto"]; proto.integer < 3 { + r2 = true + } + if !r2 && !r2ps { + if ver, ok := p.info["version"]; ok { + if v := strings.Split(ver.string, "."); len(v) != 0 { + vv, _ := strconv.ParseInt(v[0], 10, 32) + p.version = int32(vv) + } + } + p.onInvalidations = option.OnInvalidations + } else { + if !option.DisableCache { + p.Close() + return nil, ErrNoCache + } + init = init[:0] + if option.Password != "" && option.Username == "" { + init = append(init, []string{"AUTH", option.Password}) + } else if option.Username != "" { + init = append(init, []string{"AUTH", option.Username, option.Password}) + } + if option.ClientName != "" { + init = append(init, []string{"CLIENT", "SETNAME", option.ClientName}) + } + if option.SelectDB != 0 { + init = append(init, []string{"SELECT", strconv.Itoa(option.SelectDB)}) + } + if len(init) != 0 { + for _, r := range p.DoMulti(ctx, cmds.NewMultiCompleted(init)...) { + if err = r.Error(); err != nil { + p.Close() + return nil, err + } + } + } + p.version = 5 + } + if p.onInvalidations != nil || option.AlwaysPipelining { + p.background() + } + if p.timeout > 0 && p.pinggap > 0 { + go p.backgroundPing() + } + return p, nil +} + +func (p *pipe) background() { + atomic.CompareAndSwapInt32(&p.state, 0, 1) + p.once.Do(func() { go p._background() }) +} + +func (p *pipe) _exit(err error) { + p.error.CompareAndSwap(nil, &errs{error: err}) + atomic.CompareAndSwapInt32(&p.state, 1, 2) // stop accepting new requests + _ = p.conn.Close() // force both read & write goroutine to exit + p.clhks.Load().(func(error))(err) +} + +func (p *pipe) _background() { + go func() { + p._exit(p._backgroundWrite()) + close(p.close) + }() + { + p._exit(p._backgroundRead()) + atomic.CompareAndSwapInt32(&p.state, 2, 3) // make write goroutine to exit + atomic.AddInt32(&p.waits, 1) + go func() { + <-p.queue.PutOne(cmds.QuitCmd) + atomic.AddInt32(&p.waits, -1) + }() + } + + p.nsubs.Close() + p.psubs.Close() + p.ssubs.Close() + if old := p.pshks.Swap(emptypshks).(*pshks); old.close != nil { + old.close <- p.Error() + close(old.close) + } + + var ( + ones = make([]cmds.Completed, 1) + multi []cmds.Completed + ch chan RedisResult + cond *sync.Cond + ) + + // clean up cache and free pending calls + if p.cache != nil { + p.cache.FreeAndClose(ErrDoCacheAborted) + } + if p.onInvalidations != nil { + p.onInvalidations(nil) + } + for atomic.LoadInt32(&p.waits) != 0 { + select { + case <-p.close: + _, _, _ = p.queue.NextWriteCmd() + default: + } + if ones[0], multi, ch, cond = p.queue.NextResultCh(); ch != nil { + if multi == nil { + multi = ones + } + for range multi { + ch <- newErrResult(p.Error()) + } + cond.L.Unlock() + cond.Signal() + } else { + cond.L.Unlock() + cond.Signal() + runtime.Gosched() + } + } + <-p.close + atomic.StoreInt32(&p.state, 4) +} + +func (p *pipe) _backgroundWrite() (err error) { + var ( + ones = make([]cmds.Completed, 1) + multi []cmds.Completed + ch chan RedisResult + + flushDelay = p.maxFlushDelay + flushStart = time.Time{} + ) + + for atomic.LoadInt32(&p.state) < 3 { + if ones[0], multi, ch = p.queue.NextWriteCmd(); ch == nil { + if flushDelay != 0 { + flushStart = time.Now() + } + if p.w.Buffered() == 0 { + err = p.Error() + } else { + err = p.w.Flush() + } + if err == nil { + if atomic.LoadInt32(&p.state) == 1 { + ones[0], multi, ch = p.queue.WaitForWrite() + } else { + runtime.Gosched() + continue + } + if flushDelay != 0 && atomic.LoadInt32(&p.waits) > 1 { // do not delay for sequential usage + time.Sleep(flushDelay - time.Since(flushStart)) // ref: https://github.com/rueian/rueidis/issues/156 + } + } + } + if ch != nil && multi == nil { + multi = ones + } + for _, cmd := range multi { + err = writeCmd(p.w, cmd.Commands()) + } + if err != nil { + if err != ErrClosing { // ignore ErrClosing to allow final QUIT command to be sent + return + } + runtime.Gosched() + } + } + return +} + +func (p *pipe) _backgroundRead() (err error) { + var ( + msg RedisMessage + cond *sync.Cond + ones = make([]cmds.Completed, 1) + multi []cmds.Completed + ch chan RedisResult + ff int // fulfilled count + skip int // skip rest push messages + ver = p.version + prply bool // push reply + unsub bool // unsubscribe notification + r2ps = p.r2ps + ) + + defer func() { + if err != nil && ff < len(multi) { + for ; ff < len(multi); ff++ { + ch <- newErrResult(err) + } + cond.L.Unlock() + cond.Signal() + } + }() + + for { + if msg, err = readNextMessage(p.r); err != nil { + return + } + if msg.typ == '>' || (r2ps && len(msg.values) != 0 && msg.values[0].string != "pong") { + if prply, unsub = p.handlePush(msg.values); !prply { + continue + } + if skip > 0 { + skip-- + prply = false + unsub = false + continue + } + } else if ver == 6 && len(msg.values) != 0 { + // This is a workaround for Redis 6's broken invalidation protocol: https://github.com/redis/redis/issues/8935 + // When Redis 6 handles MULTI, MGET, or other multi-keys command, + // it will send invalidation message immediately if it finds the keys are expired, thus causing the multi-keys command response to be broken. + // We fix this by fetching the next message and patch it back to the response. + i := 0 + for j, v := range msg.values { + if v.typ == '>' { + p.handlePush(v.values) + } else { + if i != j { + msg.values[i] = v + } + i++ + } + } + for ; i < len(msg.values); i++ { + if msg.values[i], err = readNextMessage(p.r); err != nil { + return + } + } + } + // if unfulfilled multi commands are lead by opt-in and get success response + if ff == len(multi)-1 && multi[0].IsOptIn() && len(msg.values) >= 2 { + if cacheable := cmds.Cacheable(multi[len(multi)-2]); cacheable.IsMGet() { + cc := cacheable.MGetCacheCmd() + for i, cp := range msg.values[len(msg.values)-1].values { + cp.attrs = cacheMark + p.cache.Update(cacheable.MGetCacheKey(i), cc, cp, msg.values[i].integer) + } + } else { + for i := 1; i < len(msg.values); i += 2 { + cacheable = cmds.Cacheable(multi[i+2]) + ck, cc := cacheable.CacheKey() + cp := msg.values[i] + cp.attrs = cacheMark + p.cache.Update(ck, cc, cp, msg.values[i-1].integer) + } + } + } + if ff == len(multi) { + ff = 0 + ones[0], multi, ch, cond = p.queue.NextResultCh() // ch should not be nil, otherwise it must be a protocol bug + if ch == nil { + cond.L.Unlock() + // Redis will send sunsubscribe notification proactively in the event of slot migration. + // We should ignore them and go fetch next message. + // We also treat all the other unsubscribe notifications just like sunsubscribe, + // so that we don't need to track how many channels we have subscribed to deal with wildcard unsubscribe command + if unsub { + prply = false + unsub = false + continue + } + panic(protocolbug) + } + if multi == nil { + multi = ones + } + } + if prply { + // Redis will send sunsubscribe notification proactively in the event of slot migration. + // We should ignore them and go fetch next message. + // We also treat all the other unsubscribe notifications just like sunsubscribe, + // so that we don't need to track how many channels we have subscribed to deal with wildcard unsubscribe command + if unsub && (!multi[ff].NoReply() || !strings.HasSuffix(multi[ff].Commands()[0], "UNSUBSCRIBE")) { + prply = false + unsub = false + continue + } + prply = false + unsub = false + if !multi[ff].NoReply() { + panic(protocolbug) + } + skip = len(multi[ff].Commands()) - 2 + msg = RedisMessage{} // override successful subscribe/unsubscribe response to empty + } + ch <- newResult(msg, err) + if ff++; ff == len(multi) { + cond.L.Unlock() + cond.Signal() + } + } +} + +func (p *pipe) backgroundPing() { + var err error + var prev, recv int32 + + ticker := time.NewTicker(p.pinggap) + defer ticker.Stop() + for ; err == nil; prev = recv { + select { + case <-ticker.C: + recv = atomic.LoadInt32(&p.recvs) + if recv != prev || atomic.LoadInt32(&p.blcksig) != 0 || (atomic.LoadInt32(&p.state) == 0 && atomic.LoadInt32(&p.waits) != 0) { + continue + } + ch := make(chan error, 1) + tm := time.NewTimer(p.timeout) + go func() { ch <- p.Do(context.Background(), cmds.PingCmd).NonRedisError() }() + select { + case <-tm.C: + err = context.DeadlineExceeded + case err = <-ch: + tm.Stop() + } + if err != nil && atomic.LoadInt32(&p.blcksig) != 0 { + err = nil + } + case <-p.close: + return + } + } + if err != ErrClosing { + p._exit(err) + } +} + +func (p *pipe) handlePush(values []RedisMessage) (reply bool, unsubscribe bool) { + if len(values) < 2 { + return + } + // TODO: handle other push data + // tracking-redir-broken + // server-cpu-usage + switch values[0].string { + case "invalidate": + if p.cache != nil { + if values[1].IsNil() { + p.cache.Delete(nil) + } else { + p.cache.Delete(values[1].values) + } + } + if p.onInvalidations != nil { + if values[1].IsNil() { + p.onInvalidations(nil) + } else { + p.onInvalidations(values[1].values) + } + } + case "message": + if len(values) >= 3 { + m := PubSubMessage{Channel: values[1].string, Message: values[2].string} + p.nsubs.Publish(values[1].string, m) + p.pshks.Load().(*pshks).hooks.OnMessage(m) + } + case "pmessage": + if len(values) >= 4 { + m := PubSubMessage{Pattern: values[1].string, Channel: values[2].string, Message: values[3].string} + p.psubs.Publish(values[1].string, m) + p.pshks.Load().(*pshks).hooks.OnMessage(m) + } + case "smessage": + if len(values) >= 3 { + m := PubSubMessage{Channel: values[1].string, Message: values[2].string} + p.ssubs.Publish(values[1].string, m) + p.pshks.Load().(*pshks).hooks.OnMessage(m) + } + case "unsubscribe": + p.nsubs.Unsubscribe(values[1].string) + if len(values) >= 3 { + p.pshks.Load().(*pshks).hooks.OnSubscription(PubSubSubscription{Kind: values[0].string, Channel: values[1].string, Count: values[2].integer}) + } + return true, true + case "punsubscribe": + p.psubs.Unsubscribe(values[1].string) + if len(values) >= 3 { + p.pshks.Load().(*pshks).hooks.OnSubscription(PubSubSubscription{Kind: values[0].string, Channel: values[1].string, Count: values[2].integer}) + } + return true, true + case "sunsubscribe": + p.ssubs.Unsubscribe(values[1].string) + if len(values) >= 3 { + p.pshks.Load().(*pshks).hooks.OnSubscription(PubSubSubscription{Kind: values[0].string, Channel: values[1].string, Count: values[2].integer}) + } + return true, true + case "subscribe", "psubscribe", "ssubscribe": + if len(values) >= 3 { + p.pshks.Load().(*pshks).hooks.OnSubscription(PubSubSubscription{Kind: values[0].string, Channel: values[1].string, Count: values[2].integer}) + } + return true, false + } + return false, false +} +func (p *pipe) _r2pipe() (r2p *pipe) { + p.r2mu.Lock() + if p.r2pipe != nil { + r2p = p.r2pipe + } else { + var err error + if r2p, err = p.r2psFn(); err != nil { + r2p = epipeFn(err) + } else { + p.r2pipe = r2p + } + } + p.r2mu.Unlock() + return r2p +} + +func (p *pipe) Receive(ctx context.Context, subscribe cmds.Completed, fn func(message PubSubMessage)) error { + if p.nsubs == nil || p.psubs == nil || p.ssubs == nil { + return p.Error() + } + + if p.version < 6 && p.r2psFn != nil { + return p._r2pipe().Receive(ctx, subscribe, fn) + } + + var sb *subs + cmd, args := subscribe.Commands()[0], subscribe.Commands()[1:] + + switch cmd { + case "SUBSCRIBE": + sb = p.nsubs + case "PSUBSCRIBE": + sb = p.psubs + case "SSUBSCRIBE": + sb = p.ssubs + default: + panic(wrongreceive) + } + + if ch, cancel := sb.Subscribe(args); ch != nil { + defer cancel() + if err := p.Do(ctx, subscribe).Error(); err != nil { + return err + } + if ctxCh := ctx.Done(); ctxCh == nil { + for msg := range ch { + fn(msg) + } + } else { + next: + select { + case msg, ok := <-ch: + if ok { + fn(msg) + goto next + } + case <-ctx.Done(): + return ctx.Err() + } + } + } + return p.Error() +} + +func (p *pipe) CleanSubscriptions() { + if atomic.LoadInt32(&p.state) == 1 { + if p.version >= 7 { + p.DoMulti(context.Background(), cmds.UnsubscribeCmd, cmds.PUnsubscribeCmd, cmds.SUnsubscribeCmd) + } else { + p.DoMulti(context.Background(), cmds.UnsubscribeCmd, cmds.PUnsubscribeCmd) + } + } +} + +func (p *pipe) SetPubSubHooks(hooks PubSubHooks) <-chan error { + if p.version < 6 && p.r2psFn != nil { + return p._r2pipe().SetPubSubHooks(hooks) + } + if hooks.isZero() { + if old := p.pshks.Swap(emptypshks).(*pshks); old.close != nil { + close(old.close) + } + return nil + } + if hooks.OnMessage == nil { + hooks.OnMessage = func(m PubSubMessage) {} + } + if hooks.OnSubscription == nil { + hooks.OnSubscription = func(s PubSubSubscription) {} + } + ch := make(chan error, 1) + if old := p.pshks.Swap(&pshks{hooks: hooks, close: ch}).(*pshks); old.close != nil { + close(old.close) + } + if err := p.Error(); err != nil { + if old := p.pshks.Swap(emptypshks).(*pshks); old.close != nil { + old.close <- err + close(old.close) + } + } + if atomic.AddInt32(&p.waits, 1) == 1 && atomic.LoadInt32(&p.state) == 0 { + p.background() + } + atomic.AddInt32(&p.waits, -1) + return ch +} + +func (p *pipe) SetOnCloseHook(fn func(error)) { + p.clhks.Store(fn) +} + +func (p *pipe) Info() map[string]RedisMessage { + return p.info +} + +func (p *pipe) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { + if err := ctx.Err(); err != nil { + return newErrResult(ctx.Err()) + } + + if cmd.IsBlock() { + atomic.AddInt32(&p.blcksig, 1) + defer func() { + if resp.err == nil { + atomic.AddInt32(&p.blcksig, -1) + } + }() + } + + if cmd.NoReply() { + if p.version < 6 && p.r2psFn != nil { + return p._r2pipe().Do(ctx, cmd) + } + } + + waits := atomic.AddInt32(&p.waits, 1) // if this is 1, and background worker is not started, no need to queue + state := atomic.LoadInt32(&p.state) + + if state == 1 { + goto queue + } + + if state == 0 { + if waits != 1 { + goto queue + } + if cmd.NoReply() { + p.background() + goto queue + } + dl, ok := ctx.Deadline() + if !ok && ctx.Done() != nil { + p.background() + goto queue + } + resp = p.syncDo(dl, ok, cmd) + } else { + resp = newErrResult(p.Error()) + } + if left := atomic.AddInt32(&p.waits, -1); state == 0 && waits == 1 && left != 0 { + p.background() + } + atomic.AddInt32(&p.recvs, 1) + return resp + +queue: + ch := p.queue.PutOne(cmd) + if ctxCh := ctx.Done(); ctxCh == nil { + resp = <-ch + atomic.AddInt32(&p.waits, -1) + atomic.AddInt32(&p.recvs, 1) + } else { + select { + case resp = <-ch: + atomic.AddInt32(&p.waits, -1) + atomic.AddInt32(&p.recvs, 1) + case <-ctxCh: + resp = newErrResult(ctx.Err()) + go func() { + <-ch + atomic.AddInt32(&p.waits, -1) + atomic.AddInt32(&p.recvs, 1) + }() + } + } + return resp +} + +func (p *pipe) DoMulti(ctx context.Context, multi ...cmds.Completed) []RedisResult { + resp := make([]RedisResult, len(multi)) + if err := ctx.Err(); err != nil { + for i := 0; i < len(resp); i++ { + resp[i] = newErrResult(err) + } + return resp + } + + isOptIn := multi[0].IsOptIn() // len(multi) > 0 should have already been checked by upper layer + noReply := 0 + isBlock := false + + for _, cmd := range multi { + if cmd.NoReply() { + noReply++ + } + } + + if p.version < 6 && noReply != 0 { + if noReply != len(multi) { + for i := 0; i < len(resp); i++ { + resp[i] = newErrResult(ErrRESP2PubSubMixed) + } + return resp + } else if p.r2psFn != nil { + return p._r2pipe().DoMulti(ctx, multi...) + } + } + + for _, cmd := range multi { + if cmd.IsBlock() { + isBlock = true + break + } + } + + if isBlock { + atomic.AddInt32(&p.blcksig, 1) + defer func() { + for _, r := range resp { + if r.err != nil { + return + } + } + atomic.AddInt32(&p.blcksig, -1) + }() + } + + waits := atomic.AddInt32(&p.waits, 1) // if this is 1, and background worker is not started, no need to queue + state := atomic.LoadInt32(&p.state) + + if state == 1 { + goto queue + } + + if state == 0 { + if waits != 1 { + goto queue + } + if isOptIn || noReply != 0 { + p.background() + goto queue + } + dl, ok := ctx.Deadline() + if !ok && ctx.Done() != nil { + p.background() + goto queue + } + resp = p.syncDoMulti(dl, ok, resp, multi) + } else { + err := newErrResult(p.Error()) + for i := 0; i < len(resp); i++ { + resp[i] = err + } + } + if left := atomic.AddInt32(&p.waits, -1); state == 0 && waits == 1 && left != 0 { + p.background() + } + atomic.AddInt32(&p.recvs, 1) + return resp + +queue: + ch := p.queue.PutMulti(multi) + var i int + if ctxCh := ctx.Done(); ctxCh == nil { + for ; i < len(resp); i++ { + resp[i] = <-ch + } + } else { + for ; i < len(resp); i++ { + select { + case resp[i] = <-ch: + case <-ctxCh: + goto abort + } + } + } + atomic.AddInt32(&p.waits, -1) + atomic.AddInt32(&p.recvs, 1) + return resp +abort: + go func(i int) { + for ; i < len(resp); i++ { + <-ch + } + atomic.AddInt32(&p.waits, -1) + atomic.AddInt32(&p.recvs, 1) + }(i) + err := newErrResult(ctx.Err()) + for ; i < len(resp); i++ { + resp[i] = err + } + return resp +} + +func (p *pipe) syncDo(dl time.Time, dlOk bool, cmd cmds.Completed) (resp RedisResult) { + if dlOk { + p.conn.SetDeadline(dl) + defer p.conn.SetDeadline(time.Time{}) + } else if p.timeout > 0 && !cmd.IsBlock() { + p.conn.SetDeadline(time.Now().Add(p.timeout)) + defer p.conn.SetDeadline(time.Time{}) + } + + var msg RedisMessage + err := writeCmd(p.w, cmd.Commands()) + if err == nil { + if err = p.w.Flush(); err == nil { + msg, err = syncRead(p.r) + } + } + if err != nil { + if errors.Is(err, os.ErrDeadlineExceeded) { + err = context.DeadlineExceeded + } + p.error.CompareAndSwap(nil, &errs{error: err}) + atomic.CompareAndSwapInt32(&p.state, 1, 3) // stopping the worker and let it do the cleaning + p.background() // start the background worker + } + return newResult(msg, err) +} + +func (p *pipe) syncDoMulti(dl time.Time, dlOk bool, resp []RedisResult, multi []cmds.Completed) []RedisResult { + if dlOk { + p.conn.SetDeadline(dl) + defer p.conn.SetDeadline(time.Time{}) + } else if p.timeout > 0 { + for _, cmd := range multi { + if cmd.IsBlock() { + goto process + } + } + p.conn.SetDeadline(time.Now().Add(p.timeout)) + defer p.conn.SetDeadline(time.Time{}) + } +process: + var err error + var msg RedisMessage + + for _, cmd := range multi { + _ = writeCmd(p.w, cmd.Commands()) + } + if err = p.w.Flush(); err != nil { + goto abort + } + for i := 0; i < len(resp); i++ { + if msg, err = syncRead(p.r); err != nil { + goto abort + } + resp[i] = newResult(msg, err) + } + return resp +abort: + if errors.Is(err, os.ErrDeadlineExceeded) { + err = context.DeadlineExceeded + } + p.error.CompareAndSwap(nil, &errs{error: err}) + atomic.CompareAndSwapInt32(&p.state, 1, 3) // stopping the worker and let it do the cleaning + p.background() // start the background worker + for i := 0; i < len(resp); i++ { + resp[i] = newErrResult(err) + } + return resp +} + +func syncRead(r *bufio.Reader) (m RedisMessage, err error) { +next: + if m, err = readNextMessage(r); err != nil { + return m, err + } + if m.typ == '>' { + goto next + } + return m, nil +} + +func remainTTL(now time.Time, ttl time.Duration, pttl int64) int64 { + if pttl >= 0 { + if sttl := time.Duration(pttl) * time.Millisecond; sttl < ttl { + ttl = sttl + } + } + return now.Add(ttl).Unix() +} + +func (p *pipe) DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) RedisResult { + if p.cache == nil { + return p.Do(ctx, cmds.Completed(cmd)) + } + if cmd.IsMGet() { + return p.doCacheMGet(ctx, cmd, ttl) + } + ck, cc := cmd.CacheKey() + now := time.Now() + if v, entry := p.cache.GetOrPrepare(ck, cc, now, ttl); v.typ != 0 { + return newResult(v, nil) + } else if entry != nil { + return newResult(entry.Wait(ctx)) + } + resp := p.DoMulti( + ctx, + cmds.OptInCmd, + cmds.MultiCmd, + cmds.NewCompleted([]string{"PTTL", ck}), + cmds.Completed(cmd), + cmds.ExecCmd, + ) + exec, err := resp[4].ToArray() + if err != nil { + if _, ok := err.(*RedisError); ok { + err = ErrDoCacheAborted + } + p.cache.Cancel(ck, cc, err) + return newErrResult(err) + } + exec[1].setTTL(remainTTL(now, ttl, exec[0].integer)) + return newResult(exec[1], nil) +} + +func (p *pipe) doCacheMGet(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) RedisResult { + commands := cmd.Commands() + entries := make(map[int]*entry) + builder := cmds.NewBuilder(cmds.InitSlot) + result := RedisResult{val: RedisMessage{typ: '*', values: nil}} + mgetcc := cmd.MGetCacheCmd() + keys := len(commands) - 1 + if mgetcc[0] == 'J' { + keys-- // the last one of JSON.MGET is a path, not a key + } + var now = time.Now() + var rewrite cmds.Arbitrary + for i, key := range commands[1 : keys+1] { + v, entry := p.cache.GetOrPrepare(key, mgetcc, now, ttl) + if v.typ != 0 { // cache hit for one key + if len(result.val.values) == 0 { + result.val.values = make([]RedisMessage, keys) + } + result.val.values[i] = v + continue + } + if entry != nil { + entries[i] = entry // store entries for later entry.Wait() to avoid MGET deadlock each others. + continue + } + if rewrite.IsZero() { + rewrite = builder.Arbitrary(commands[0]) + } + rewrite = rewrite.Args(key) + } + + var partial []RedisMessage + if !rewrite.IsZero() { + var rewritten cmds.Completed + var keys int + if mgetcc[0] == 'J' { // rewrite JSON.MGET path + rewritten = rewrite.Args(commands[len(commands)-1]).MultiGet() + keys = len(rewritten.Commands()) - 2 + } else { + rewritten = rewrite.MultiGet() + keys = len(rewritten.Commands()) - 1 + } + + multi := make([]cmds.Completed, 0, keys+4) + multi = append(multi, cmds.OptInCmd, cmds.MultiCmd) + for _, key := range rewritten.Commands()[1 : keys+1] { + multi = append(multi, builder.Pttl().Key(key).Build()) + } + multi = append(multi, rewritten, cmds.ExecCmd) + + resp := p.DoMulti(ctx, multi...) + exec, err := resp[len(multi)-1].ToArray() + if err != nil { + if _, ok := err.(*RedisError); ok { + err = ErrDoCacheAborted + } + for _, key := range rewritten.Commands()[1 : keys+1] { + p.cache.Cancel(key, mgetcc, err) + } + return newErrResult(err) + } + defer func() { + for _, cmd := range multi[2 : len(multi)-1] { + cmds.Put(cmd.CommandSlice()) + } + }() + last := len(exec) - 1 + for i := range exec[last].values { + exec[last].values[i].setTTL(remainTTL(now, ttl, exec[i].integer)) + } + if len(rewritten.Commands()) == len(commands) { // all cache miss + return newResult(exec[last], nil) + } + partial = exec[last].values + } else { // all cache hit + result.val.attrs = cacheMark + } + + if len(result.val.values) == 0 { + result.val.values = make([]RedisMessage, keys) + } + for i, entry := range entries { + v, err := entry.Wait(ctx) + if err != nil { + return newErrResult(err) + } + result.val.values[i] = v + } + + j := 0 + for _, ret := range partial { + for ; j < len(result.val.values); j++ { + if result.val.values[j].typ == 0 { + result.val.values[j] = ret + break + } + } + } + return result +} + +func (p *pipe) DoMultiCache(ctx context.Context, multi ...CacheableTTL) []RedisResult { + if p.cache == nil { + commands := make([]cmds.Completed, len(multi)) + for i, ct := range multi { + commands[i] = cmds.Completed(ct.Cmd) + } + return p.DoMulti(ctx, commands...) + } + results := make([]RedisResult, len(multi)) + entries := make(map[int]*entry) + missing := []cmds.Completed{cmds.OptInCmd, cmds.MultiCmd} + now := time.Now() + for i, ct := range multi { + if ct.Cmd.IsMGet() { + panic(panicmgetcsc) + } + ck, cc := ct.Cmd.CacheKey() + v, entry := p.cache.GetOrPrepare(ck, cc, now, ct.TTL) + if v.typ != 0 { // cache hit for one key + results[i] = newResult(v, nil) + continue + } + if entry != nil { + entries[i] = entry // store entries for later entry.Wait() to avoid MGET deadlock each others. + continue + } + missing = append(missing, cmds.NewCompleted([]string{"PTTL", ck}), cmds.Completed(ct.Cmd)) + } + + var exec []RedisMessage + var err error + if len(missing) > 2 { + missing = append(missing, cmds.ExecCmd) + resp := p.DoMulti(ctx, missing...) + exec, err = resp[len(missing)-1].ToArray() + if err != nil { + if _, ok := err.(*RedisError); ok { + err = ErrDoCacheAborted + } + for i := 3; i < len(missing); i += 2 { + cacheable := cmds.Cacheable(missing[i]) + ck, cc := cacheable.CacheKey() + p.cache.Cancel(ck, cc, err) + } + for i := range results { + results[i] = newErrResult(err) + } + return results + } + } + + for i, entry := range entries { + results[i] = newResult(entry.Wait(ctx)) + } + + j := 0 + for i := 1; i < len(exec); i += 2 { + for ; j < len(results); j++ { + if results[j].val.typ == 0 && results[j].err == nil { + exec[i].setTTL(remainTTL(now, multi[j].TTL, exec[i-1].integer)) + results[j] = newResult(exec[i], nil) + break + } + } + } + return results +} + +func (p *pipe) Error() error { + if err, ok := p.error.Load().(*errs); ok { + return err.error + } + return nil +} + +func (p *pipe) Close() { + p.error.CompareAndSwap(nil, errClosing) + block := atomic.AddInt32(&p.blcksig, 1) + waits := atomic.AddInt32(&p.waits, 1) + stopping1 := atomic.CompareAndSwapInt32(&p.state, 0, 2) + stopping2 := atomic.CompareAndSwapInt32(&p.state, 1, 2) + if p.queue != nil { + if stopping1 && waits == 1 { // make sure there is no sync read + p.background() + } + if block == 1 && (stopping1 || stopping2) { // make sure there is no block cmd + <-p.queue.PutOne(cmds.QuitCmd) + } + } + atomic.AddInt32(&p.waits, -1) + atomic.AddInt32(&p.blcksig, -1) + if p.conn != nil { + p.conn.Close() + } + p.r2mu.Lock() + if p.r2pipe != nil { + p.r2pipe.Close() + } + p.r2mu.Unlock() +} + +type pshks struct { + hooks PubSubHooks + close chan error +} + +var emptypshks = &pshks{ + hooks: PubSubHooks{ + OnMessage: func(m PubSubMessage) {}, + OnSubscription: func(s PubSubSubscription) {}, + }, + close: nil, +} + +var emptyclhks = func(error) {} + +func deadFn() *pipe { + dead := &pipe{state: 3} + dead.error.Store(errClosing) + dead.pshks.Store(emptypshks) + dead.clhks.Store(emptyclhks) + return dead +} + +func epipeFn(err error) *pipe { + dead := &pipe{state: 3} + dead.error.Store(&errs{error: err}) + dead.pshks.Store(emptypshks) + dead.clhks.Store(emptyclhks) + return dead +} + +const ( + protocolbug = "protocol bug, message handled out of order" + wrongreceive = "only SUBSCRIBE, SSUBSCRIBE, or PSUBSCRIBE command are allowed in Receive" + panicmgetcsc = "MGET and JSON.MGET in DoMultiCache are not implemented, use DoCache instead" +) + +var cacheMark = &(RedisMessage{}) +var errClosing = &errs{error: ErrClosing} + +type errs struct{ error } diff --git a/vendor/github.com/rueian/rueidis/pool.go b/vendor/github.com/rueian/rueidis/pool.go new file mode 100644 index 00000000000..d45ede84f73 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/pool.go @@ -0,0 +1,67 @@ +package rueidis + +import "sync" + +func newPool(cap int, dead wire, makeFn func() wire) *pool { + if cap <= 0 { + cap = DefaultPoolSize + } + + return &pool{ + size: 0, + dead: dead, + make: makeFn, + list: make([]wire, 0, cap), + cond: sync.NewCond(&sync.Mutex{}), + } +} + +type pool struct { + dead wire + cond *sync.Cond + make func() wire + list []wire + size int + down bool +} + +func (p *pool) Acquire() (v wire) { + p.cond.L.Lock() + for len(p.list) == 0 && p.size == cap(p.list) && !p.down { + p.cond.Wait() + } + if p.down { + v = p.dead + } else if len(p.list) == 0 { + p.size++ + v = p.make() + } else { + i := len(p.list) - 1 + v = p.list[i] + p.list = p.list[:i] + } + p.cond.L.Unlock() + return v +} + +func (p *pool) Store(v wire) { + p.cond.L.Lock() + if !p.down && v.Error() == nil { + p.list = append(p.list, v) + } else { + p.size-- + v.Close() + } + p.cond.L.Unlock() + p.cond.Signal() +} + +func (p *pool) Close() { + p.cond.L.Lock() + p.down = true + for _, w := range p.list { + w.Close() + } + p.cond.L.Unlock() + p.cond.Broadcast() +} diff --git a/vendor/github.com/rueian/rueidis/pubsub.go b/vendor/github.com/rueian/rueidis/pubsub.go new file mode 100644 index 00000000000..abbfb298962 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/pubsub.go @@ -0,0 +1,134 @@ +package rueidis + +import ( + "sync" + "sync/atomic" +) + +// PubSubMessage represent a pubsub message from redis +type PubSubMessage struct { + // Pattern is only available with pmessage. + Pattern string + // Channel is the channel the message belongs to + Channel string + // Message is the message content + Message string +} + +// PubSubSubscription represent a pubsub "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" event. +type PubSubSubscription struct { + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + // Channel is the event subject. + Channel string + // Count is the current number of subscriptions for connection. + Count int64 +} + +// PubSubHooks can be registered into DedicatedClient to process pubsub messages without using Client.Receive +type PubSubHooks struct { + // OnMessage will be called when receiving "message" and "pmessage" event. + OnMessage func(m PubSubMessage) + // OnSubscription will be called when receiving "subscribe", "unsubscribe", "psubscribe" and "punsubscribe" event. + OnSubscription func(s PubSubSubscription) +} + +func (h *PubSubHooks) isZero() bool { + return h.OnMessage == nil && h.OnSubscription == nil +} + +func newSubs() *subs { + return &subs{chs: make(map[string]chs), sub: make(map[uint64]*sub)} +} + +type subs struct { + chs map[string]chs + sub map[uint64]*sub + cnt uint64 + mu sync.RWMutex +} + +type chs struct { + sub map[uint64]*sub +} + +type sub struct { + ch chan PubSubMessage + cs []string +} + +func (s *subs) Publish(channel string, msg PubSubMessage) { + if atomic.LoadUint64(&s.cnt) != 0 { + s.mu.RLock() + for _, sb := range s.chs[channel].sub { + sb.ch <- msg + } + s.mu.RUnlock() + } +} + +func (s *subs) Subscribe(channels []string) (ch chan PubSubMessage, cancel func()) { + id := atomic.AddUint64(&s.cnt, 1) + s.mu.Lock() + if s.chs != nil { + ch = make(chan PubSubMessage, 16) + sb := &sub{cs: channels, ch: ch} + s.sub[id] = sb + for _, channel := range channels { + c := s.chs[channel].sub + if c == nil { + c = make(map[uint64]*sub, 1) + s.chs[channel] = chs{sub: c} + } + c[id] = sb + } + cancel = func() { + go func() { + for range ch { + } + }() + s.mu.Lock() + if s.chs != nil { + s.remove(id) + } + s.mu.Unlock() + } + } + s.mu.Unlock() + return ch, cancel +} + +func (s *subs) remove(id uint64) { + if sb := s.sub[id]; sb != nil { + for _, channel := range sb.cs { + if c := s.chs[channel].sub; c != nil { + delete(c, id) + } + } + close(sb.ch) + delete(s.sub, id) + } +} + +func (s *subs) Unsubscribe(channel string) { + if atomic.LoadUint64(&s.cnt) != 0 { + s.mu.Lock() + for id := range s.chs[channel].sub { + s.remove(id) + } + delete(s.chs, channel) + s.mu.Unlock() + } +} + +func (s *subs) Close() { + var sbs map[uint64]*sub + s.mu.Lock() + sbs = s.sub + s.chs = nil + s.sub = nil + s.mu.Unlock() + for _, sb := range sbs { + close(sb.ch) + } +} diff --git a/vendor/github.com/rueian/rueidis/resp.go b/vendor/github.com/rueian/rueidis/resp.go new file mode 100644 index 00000000000..5d251b49aed --- /dev/null +++ b/vendor/github.com/rueian/rueidis/resp.go @@ -0,0 +1,255 @@ +package rueidis + +import ( + "bufio" + "errors" + "io" + "strconv" + "strings" + "unsafe" +) + +var errChunked = errors.New("unbounded redis message") +var errOldNull = errors.New("RESP2 null") + +type reader func(i *bufio.Reader) (RedisMessage, error) + +var readers = [256]reader{} + +func init() { + readers['$'] = readBlobString + readers['+'] = readSimpleString + readers['-'] = readSimpleString + readers[':'] = readInteger + readers['_'] = readNull + readers[','] = readSimpleString + readers['#'] = readBoolean + readers['!'] = readBlobString + readers['='] = readBlobString + readers['('] = readSimpleString + readers['*'] = readArray + readers['%'] = readMap + readers['~'] = readArray + readers['|'] = readMap + readers['>'] = readArray + readers['.'] = readNull +} + +func readSimpleString(i *bufio.Reader) (m RedisMessage, err error) { + m.string, err = readS(i) + return +} + +func readBlobString(i *bufio.Reader) (m RedisMessage, err error) { + m.string, err = readB(i) + if err == errChunked { + sb := strings.Builder{} + for { + if _, err = i.Discard(1); err != nil { // discard the ';' + return RedisMessage{}, err + } + length, err := readI(i) + if err != nil { + return RedisMessage{}, err + } + if length == 0 { + return RedisMessage{string: sb.String()}, nil + } + sb.Grow(int(length)) + if _, err = io.CopyN(&sb, i, length); err != nil { + return RedisMessage{}, err + } + if _, err = i.Discard(2); err != nil { + return RedisMessage{}, err + } + } + } + return +} + +func readInteger(i *bufio.Reader) (m RedisMessage, err error) { + m.integer, err = readI(i) + return +} + +func readBoolean(i *bufio.Reader) (m RedisMessage, err error) { + b, err := i.ReadByte() + if err != nil { + return RedisMessage{}, err + } + if b == 't' { + m.integer = 1 + } + _, err = i.Discard(2) + return +} + +func readNull(i *bufio.Reader) (m RedisMessage, err error) { + _, err = i.Discard(2) + return +} + +func readArray(i *bufio.Reader) (m RedisMessage, err error) { + length, err := readI(i) + if err == nil { + if length == -1 { + return m, errOldNull + } + m.values, err = readA(i, length) + } else if err == errChunked { + m.values, err = readE(i) + } + return m, err +} + +func readMap(i *bufio.Reader) (m RedisMessage, err error) { + length, err := readI(i) + if err == nil { + m.values, err = readA(i, length*2) + } else if err == errChunked { + m.values, err = readE(i) + } + return m, err +} + +func readS(i *bufio.Reader) (string, error) { + bs, err := i.ReadBytes('\n') + if err != nil { + return "", err + } + if trim := len(bs) - 2; trim < 0 { + return "", errors.New(unexpectedNoCRLF) + } else { + bs = bs[:trim] + } + return *(*string)(unsafe.Pointer(&bs)), nil +} + +func readI(i *bufio.Reader) (int64, error) { + var v int64 + var neg bool + for { + c, err := i.ReadByte() + if err != nil { + return 0, err + } + switch { + case c >= '0' && c <= '9': + v = v*10 + int64(c-'0') + case c == '\r': + _, err = i.Discard(1) + if neg { + return v * -1, err + } + return v, err + case c == '-': + neg = true + case c == '?': + if _, err = i.Discard(2); err == nil { + err = errChunked + } + return 0, err + default: + return 0, errors.New(unexpectedNumByte + strconv.Itoa(int(c))) + } + } +} + +func readB(i *bufio.Reader) (string, error) { + length, err := readI(i) + if err != nil { + return "", err + } + if length == -1 { + return "", errOldNull + } + bs := make([]byte, length) + if _, err = io.ReadFull(i, bs); err != nil { + return "", err + } + if _, err = i.Discard(2); err != nil { + return "", err + } + return *(*string)(unsafe.Pointer(&bs)), nil +} + +func readE(i *bufio.Reader) ([]RedisMessage, error) { + v := make([]RedisMessage, 0) + for { + n, err := readNextMessage(i) + if err != nil { + return nil, err + } + if n.typ == '.' { + return v, err + } + v = append(v, n) + } +} + +func readA(i *bufio.Reader, length int64) (v []RedisMessage, err error) { + v = make([]RedisMessage, length) + for n := int64(0); n < length; n++ { + if v[n], err = readNextMessage(i); err != nil { + return nil, err + } + } + return v, nil +} + +func writeB(o *bufio.Writer, id byte, str string) (err error) { + _ = writeS(o, id, strconv.Itoa(len(str))) + _, _ = o.WriteString(str) + _, err = o.WriteString("\r\n") + return err +} + +func writeS(o *bufio.Writer, id byte, str string) (err error) { + _ = o.WriteByte(id) + _, _ = o.WriteString(str) + _, err = o.WriteString("\r\n") + return err +} + +func readNextMessage(i *bufio.Reader) (m RedisMessage, err error) { + var attrs *RedisMessage + var typ byte + for { + if typ, err = i.ReadByte(); err != nil { + return RedisMessage{}, err + } + fn := readers[typ] + if fn == nil { + return RedisMessage{}, errors.New(unknownMessageType + strconv.Itoa(int(typ))) + } + if m, err = fn(i); err != nil { + if err == errOldNull { + return RedisMessage{typ: '_'}, nil + } + return RedisMessage{}, err + } + m.typ = typ + if m.typ == '|' { // handle the attributes + a := m // clone the original m first, and then take address of the clone + attrs = &a // to avoid go compiler allocating the m on heap which causing worse performance. + m = RedisMessage{} + continue + } + m.attrs = attrs + return m, nil + } +} + +func writeCmd(o *bufio.Writer, cmd []string) (err error) { + err = writeS(o, '*', strconv.Itoa(len(cmd))) + for _, m := range cmd { + err = writeB(o, '$', m) + } + return err +} + +const ( + unexpectedNoCRLF = "received unexpected simple string message ending without CRLF" + unexpectedNumByte = "received unexpected number byte: " + unknownMessageType = "received unknown message type: " +) diff --git a/vendor/github.com/rueian/rueidis/ring.go b/vendor/github.com/rueian/rueidis/ring.go new file mode 100644 index 00000000000..0a5ced514a7 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/ring.go @@ -0,0 +1,136 @@ +package rueidis + +import ( + "sync" + "sync/atomic" + + "github.com/rueian/rueidis/internal/cmds" +) + +type queue interface { + PutOne(m cmds.Completed) chan RedisResult + PutMulti(m []cmds.Completed) chan RedisResult + NextWriteCmd() (cmds.Completed, []cmds.Completed, chan RedisResult) + WaitForWrite() (cmds.Completed, []cmds.Completed, chan RedisResult) + NextResultCh() (cmds.Completed, []cmds.Completed, chan RedisResult, *sync.Cond) +} + +var _ queue = (*ring)(nil) + +func newRing(factor int) *ring { + if factor <= 0 { + factor = DefaultRingScale + } + r := &ring{store: make([]node, 2<<(factor-1))} + r.mask = uint64(len(r.store) - 1) + for i := range r.store { + m := &sync.Mutex{} + r.store[i].c1 = sync.NewCond(m) + r.store[i].c2 = sync.NewCond(m) + r.store[i].ch = make(chan RedisResult, 0) // this channel can't be buffered + } + return r +} + +type ring struct { + store []node // store's size must be 2^N to work with the mask + _ [5]uint64 + write uint64 + _ [7]uint64 + read1 uint64 + read2 uint64 + mask uint64 +} + +type node struct { + c1 *sync.Cond + c2 *sync.Cond + ch chan RedisResult + one cmds.Completed + multi []cmds.Completed + mark uint32 + slept bool +} + +func (r *ring) PutOne(m cmds.Completed) chan RedisResult { + n := &r.store[atomic.AddUint64(&r.write, 1)&r.mask] + n.c1.L.Lock() + for n.mark != 0 { + n.c1.Wait() + } + n.one = m + n.multi = nil + n.mark = 1 + s := n.slept + n.c1.L.Unlock() + if s { + n.c2.Broadcast() + } + return n.ch +} + +func (r *ring) PutMulti(m []cmds.Completed) chan RedisResult { + n := &r.store[atomic.AddUint64(&r.write, 1)&r.mask] + n.c1.L.Lock() + for n.mark != 0 { + n.c1.Wait() + } + n.one = cmds.Completed{} + n.multi = m + n.mark = 1 + s := n.slept + n.c1.L.Unlock() + if s { + n.c2.Broadcast() + } + return n.ch +} + +// NextWriteCmd should be only called by one dedicated thread +func (r *ring) NextWriteCmd() (one cmds.Completed, multi []cmds.Completed, ch chan RedisResult) { + r.read1++ + p := r.read1 & r.mask + n := &r.store[p] + n.c1.L.Lock() + if n.mark == 1 { + one, multi, ch = n.one, n.multi, n.ch + n.mark = 2 + } else { + r.read1-- + } + n.c1.L.Unlock() + return +} + +// WaitForWrite should be only called by one dedicated thread +func (r *ring) WaitForWrite() (one cmds.Completed, multi []cmds.Completed, ch chan RedisResult) { + r.read1++ + p := r.read1 & r.mask + n := &r.store[p] + n.c1.L.Lock() + for n.mark != 1 { + n.slept = true + n.c2.Wait() // c1 and c2 share the same mutex + n.slept = false + } + one, multi, ch = n.one, n.multi, n.ch + n.mark = 2 + n.c1.L.Unlock() + return +} + +// NextResultCh should be only called by one dedicated thread +func (r *ring) NextResultCh() (one cmds.Completed, multi []cmds.Completed, ch chan RedisResult, cond *sync.Cond) { + r.read2++ + p := r.read2 & r.mask + n := &r.store[p] + cond = n.c1 + n.c1.L.Lock() + if n.mark == 2 { + one, multi, ch = n.one, n.multi, n.ch + n.mark = 0 + } else { + r.read2-- + } + return +} diff --git a/vendor/github.com/rueian/rueidis/rueidis.go b/vendor/github.com/rueian/rueidis/rueidis.go new file mode 100644 index 00000000000..8ac168eb7b7 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/rueidis.go @@ -0,0 +1,309 @@ +// Package rueidis is a fast Golang Redis RESP3 client that does auto pipelining and supports client side caching. +package rueidis + +import ( + "context" + "crypto/tls" + "errors" + "math/rand" + "net" + "strings" + "time" + + "github.com/rueian/rueidis/internal/cmds" +) + +const ( + // DefaultCacheBytes is the default value of ClientOption.CacheSizeEachConn, which is 128 MiB + DefaultCacheBytes = 128 * (1 << 20) + // DefaultRingScale is the default value of ClientOption.RingScaleEachConn, which results into having a ring of size 2^10 for each connection + DefaultRingScale = 10 + // DefaultPoolSize is the default value of ClientOption.BlockingPoolSize + DefaultPoolSize = 1000 + // DefaultDialTimeout is the default value of ClientOption.Dialer.Timeout + DefaultDialTimeout = 5 * time.Second + // DefaultTCPKeepAlive is the default value of ClientOption.Dialer.KeepAlive + DefaultTCPKeepAlive = 1 * time.Second + // DefaultReadBuffer is the default value of bufio.NewReaderSize for each connection, which is 0.5MiB + DefaultReadBuffer = 1 << 19 + // DefaultWriteBuffer is the default value of bufio.NewWriterSize for each connection, which is 0.5MiB + DefaultWriteBuffer = 1 << 19 +) + +var ( + // ErrClosing means the Client.Close had been called + ErrClosing = errors.New("rueidis client is closing or unable to connect redis") + // ErrNoAddr means the ClientOption.InitAddress is empty + ErrNoAddr = errors.New("no alive address in InitAddress") + // ErrNoCache means your redis does not support client-side caching and must set ClientOption.DisableCache to true + ErrNoCache = errors.New("ClientOption.DisableCache must be true for redis not supporting client-side caching or not supporting RESP3") + // ErrRESP2PubSubMixed means your redis does not support RESP3 and rueidis can't handle SUBSCRIBE/PSUBSCRIBE/SSUBSCRIBE in mixed case + ErrRESP2PubSubMixed = errors.New("rueidis does not support SUBSCRIBE/PSUBSCRIBE/SSUBSCRIBE mixed with other commands in RESP2") + // ErrDoCacheAborted means redis abort EXEC request or connection closed + ErrDoCacheAborted = errors.New("failed to fetch the cache because EXEC was aborted by redis or connection closed") +) + +// ClientOption should be passed to NewClient to construct a Client +type ClientOption struct { + // TCP & TLS + // Dialer can be used to customized how rueidis connect to a redis instance via TCP, including: + // - Timeout, the default is DefaultDialTimeout + // - KeepAlive, the default is DefaultTCPKeepAlive + // The Dialer.KeepAlive interval is used to detect an unresponsive idle tcp connection. + // OS takes at least (tcp_keepalive_probes+1)*Dialer.KeepAlive time to conclude an idle connection to be unresponsive. + // For example: DefaultTCPKeepAlive = 1s and the default of tcp_keepalive_probes on Linux is 9. + // Therefore, it takes at least 10s to kill an idle and unresponsive tcp connection on Linux by default. + Dialer net.Dialer + TLSConfig *tls.Config + + // OnInvalidations is a callback function in case of client-side caching invalidation received. + // Note that this function must be fast, otherwise other redis messages will be blocked. + OnInvalidations func([]RedisMessage) + + // Sentinel options, including MasterSet and Auth options + Sentinel SentinelOption + + // Redis AUTH parameters + Username string + Password string + ClientName string + + // InitAddress point to redis nodes. + // Rueidis will connect to them one by one and issue CLUSTER SLOT command to initialize the cluster client until success. + // If len(InitAddress) == 1 and the address is not running in cluster mode, rueidis will fall back to the single client mode. + // If ClientOption.Sentinel.MasterSet is set, then InitAddress will be used to connect sentinels + InitAddress []string + + // ClientTrackingOptions will be appended to CLIENT TRACKING ON command when the connection is established. + // The default is []string{"OPTIN"} + ClientTrackingOptions []string + + SelectDB int + + // CacheSizeEachConn is redis client side cache size that bind to each TCP connection to a single redis instance. + // The default is DefaultCacheBytes. + CacheSizeEachConn int + + // RingScaleEachConn sets the size of the ring buffer in each connection to (2 ^ RingScaleEachConn). + // The default is RingScaleEachConn, which results into having a ring of size 2^10 for each connection. + // Reduce this value can reduce the memory consumption of each connection at the cost of potential throughput degradation. + // Values smaller than 8 is typically not recommended. + RingScaleEachConn int + + // ReadBufferEachConn is the size of the bufio.NewReaderSize for each connection, default to DefaultReadBuffer (0.5 MiB). + ReadBufferEachConn int + // WriteBufferEachConn is the size of the bufio.NewWriterSize for each connection, default to DefaultWriteBuffer (0.5 MiB). + WriteBufferEachConn int + + // BlockingPoolSize is the size of the connection pool shared by blocking commands (ex BLPOP, XREAD with BLOCK). + // The default is DefaultPoolSize. + BlockingPoolSize int + + // PipelineMultiplex determines how many tcp connections used to pipeline commands to one redis instance. + // The default for single and sentinel clients is 2, which means 4 connections (2^2). + // The default for cluster client is 0, which means 1 connection (2^0). + PipelineMultiplex int + + // ConnWriteTimeout is applied net.Conn.SetWriteDeadline and periodic PING to redis + // Since the Dialer.KeepAlive will not be triggered if there is data in the outgoing buffer, + // ConnWriteTimeout should be set in order to detect local congestion or unresponsive redis server. + // This default is ClientOption.Dialer.KeepAlive * (9+1), where 9 is the default of tcp_keepalive_probes on Linux. + ConnWriteTimeout time.Duration + + // MaxFlushDelay when greater than zero pauses pipeline write loop for some time (not larger than MaxFlushDelay) + // after each flushing of data to the connection. This gives pipeline a chance to collect more commands to send + // to Redis. Adding this delay increases latency, reduces throughput – but in most cases may significantly reduce + // application and Redis CPU utilization due to less executed system calls. By default, Rueidis flushes data to the + // connection without extra delays. Depending on network latency and application-specific conditions the value + // of MaxFlushDelay may vary, sth like 20 microseconds should not affect latency/throughput a lot but still + // produce notable CPU usage reduction under load. Ref: https://github.com/rueian/rueidis/issues/156 + MaxFlushDelay time.Duration + + // ShuffleInit is a handy flag that shuffles the InitAddress after passing to the NewClient() if it is true + ShuffleInit bool + // DisableRetry disables retrying read-only commands under network errors + DisableRetry bool + // DisableCache falls back Client.DoCache/Client.DoMultiCache to Client.Do/Client.DoMulti + DisableCache bool + // AlwaysPipelining makes rueidis.Client always pipeline redis commands even if they are not issued concurrently. + AlwaysPipelining bool +} + +// SentinelOption contains MasterSet, +type SentinelOption struct { + // TCP & TLS, same as ClientOption but for connecting sentinel + Dialer net.Dialer + TLSConfig *tls.Config + + // MasterSet is the redis master set name monitored by sentinel cluster. + // If this field is set, then ClientOption.InitAddress will be used to connect to sentinel cluster. + MasterSet string + + // Redis AUTH parameters for sentinel + Username string + Password string + ClientName string +} + +// Client is the redis client interface for both single redis instance and redis cluster. It should be created from the NewClient() +type Client interface { + // B is the getter function to the command builder for the client + // If the client is a cluster client, the command builder also prohibits cross key slots in one command. + B() cmds.Builder + // Do is the method sending user's redis command building from the B() to a redis node. + // client.Do(ctx, client.B().Get().Key("k").Build()).ToString() + // All concurrent non-blocking commands will be pipelined automatically and have better throughput. + // Blocking commands will use another separated connection pool. + // The cmd parameter is recycled after passing into Do() and should not be reused. + Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) + // DoMulti takes multiple redis commands and sends them together, reducing RTT from the user code. + // The multi parameters are recycled after passing into DoMulti() and should not be reused. + DoMulti(ctx context.Context, multi ...cmds.Completed) (resp []RedisResult) + // DoCache is similar to Do, but it uses opt-in client side caching and requires a client side TTL. + // The explicit client side TTL specifies the maximum TTL on the client side. + // If the key's TTL on the server is smaller than the client side TTL, the client side TTL will be capped. + // client.Do(ctx, client.B().Get().Key("k").Cache(), time.Minute).ToString() + // The above example will send the following command to redis if cache miss: + // CLIENT CACHING YES + // PTTL k + // GET k + // The in-memory cache size is configured by ClientOption.CacheSizeEachConn. + // The cmd parameter is recycled after passing into DoCache() and should not be reused. + DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) (resp RedisResult) + // DoMultiCache is similar to DoCache, but works with multiple cacheable commands across different slots. + // It will first group commands by slots and will send only cache missed commands to redis. + DoMultiCache(ctx context.Context, multi ...CacheableTTL) (resp []RedisResult) + + // Receive accepts SUBSCRIBE, SSUBSCRIBE, PSUBSCRIBE command and a message handler. + // Receive will block and then return value only when the following cases: + // 1. return nil when received any unsubscribe/punsubscribe message related to the provided `subscribe` command. + // 2. return ErrClosing when the client is closed manually. + // 3. return ctx.Err() when the `ctx` is done. + // 4. return non-nil err when the provided `subscribe` command failed. + Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) error + + // Dedicated acquire a connection from the blocking connection pool, no one else can use the connection + // during Dedicated. The main usage of Dedicated is CAS operation, which is WATCH + MULTI + EXEC. + // However, one should try to avoid CAS operation but use Lua script instead, because occupying a connection + // is not good for performance. + Dedicated(fn func(DedicatedClient) error) (err error) + + // Dedicate does the same as Dedicated, but it exposes DedicatedClient directly + // and requires user to invoke cancel() manually to put connection back to the pool. + Dedicate() (client DedicatedClient, cancel func()) + + // Nodes returns each redis node this client known as rueidis.Client. This is useful if you want to + // send commands to some specific redis nodes in the cluster. + Nodes() map[string]Client + + // Close will make further calls to the client be rejected with ErrClosing, + // and Close will wait until all pending calls finished. + Close() +} + +// DedicatedClient is obtained from Client.Dedicated() and it will be bound to single redis connection and +// no other commands can be pipelined in to this connection during Client.Dedicated(). +// If the DedicatedClient is obtained from cluster client, the first command to it must have a Key() to identify the redis node. +type DedicatedClient interface { + // B is inherited from the Client + B() cmds.Builder + // Do is the same as Client's + // The cmd parameter is recycled after passing into Do() and should not be reused. + Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) + // DoMulti takes multiple redis commands and sends them together, reducing RTT from the user code. + // The multi parameters are recycled after passing into DoMulti() and should not be reused. + DoMulti(ctx context.Context, multi ...cmds.Completed) (resp []RedisResult) + // Receive is the same as Client's + Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) error + // SetPubSubHooks is an alternative way to processing Pub/Sub messages instead of using Receive. + // SetPubSubHooks is non-blocking and allows users to subscribe/unsubscribe channels later. + // Note that the hooks will be called sequentially but in another goroutine. + // The return value will be either: + // 1. an error channel, if the hooks passed in is not zero, or + // 2. nil, if the hooks passed in is zero. (used for reset hooks) + // In the former case, the error channel is guaranteed to be close when the hooks will not be called anymore, + // and has at most one error describing the reason why the hooks will not be called anymore. + // Users can use the error channel to detect disconnection. + SetPubSubHooks(hooks PubSubHooks) <-chan error + // Close closes the dedicated connection and prevent the connection be put back into the pool. + Close() +} + +// CT is a shorthand constructor for CacheableTTL +func CT(cmd cmds.Cacheable, ttl time.Duration) CacheableTTL { + return CacheableTTL{Cmd: cmd, TTL: ttl} +} + +// CacheableTTL is parameter container of DoMultiCache +type CacheableTTL struct { + Cmd cmds.Cacheable + TTL time.Duration +} + +// NewClient uses ClientOption to initialize the Client for both cluster client and single client. +// It will first try to connect as cluster client. If the len(ClientOption.InitAddress) == 1 and +// the address does not enable cluster mode, the NewClient() will use single client instead. +func NewClient(option ClientOption) (client Client, err error) { + if option.ReadBufferEachConn <= 0 { + option.ReadBufferEachConn = DefaultReadBuffer + } + if option.WriteBufferEachConn <= 0 { + option.WriteBufferEachConn = DefaultWriteBuffer + } + if option.CacheSizeEachConn <= 0 { + option.CacheSizeEachConn = DefaultCacheBytes + } + if option.Dialer.Timeout == 0 { + option.Dialer.Timeout = DefaultDialTimeout + } + if option.Dialer.KeepAlive == 0 { + option.Dialer.KeepAlive = DefaultTCPKeepAlive + } + if option.ConnWriteTimeout == 0 { + option.ConnWriteTimeout = option.Dialer.KeepAlive * 10 + } + if option.ShuffleInit { + rand.Shuffle(len(option.InitAddress), func(i, j int) { + option.InitAddress[i], option.InitAddress[j] = option.InitAddress[j], option.InitAddress[i] + }) + } + if option.Sentinel.MasterSet != "" { + option.PipelineMultiplex = singleClientMultiplex(option.PipelineMultiplex) + return newSentinelClient(&option, makeConn) + } + if client, err = newClusterClient(&option, makeConn); err != nil { + if len(option.InitAddress) == 1 && (err.Error() == redisErrMsgCommandNotAllow || strings.Contains(strings.ToUpper(err.Error()), "CLUSTER")) { + option.PipelineMultiplex = singleClientMultiplex(option.PipelineMultiplex) + client, err = newSingleClient(&option, client.(*clusterClient).single(), makeConn) + } else if client != (*clusterClient)(nil) { + client.Close() + return nil, err + } + } + return client, err +} + +func singleClientMultiplex(multiplex int) int { + if multiplex == 0 { + multiplex = 2 + } + if multiplex < 0 { + multiplex = 0 + } + return multiplex +} + +func makeConn(dst string, opt *ClientOption) conn { + return makeMux(dst, opt, dial) +} + +func dial(dst string, opt *ClientOption) (conn net.Conn, err error) { + if opt.TLSConfig != nil { + conn, err = tls.DialWithDialer(&opt.Dialer, "tcp", dst, opt.TLSConfig) + } else { + conn, err = opt.Dialer.Dial("tcp", dst) + } + return conn, err +} + +const redisErrMsgCommandNotAllow = "command is not allowed" diff --git a/vendor/github.com/rueian/rueidis/sentinel.go b/vendor/github.com/rueian/rueidis/sentinel.go new file mode 100644 index 00000000000..f615426defa --- /dev/null +++ b/vendor/github.com/rueian/rueidis/sentinel.go @@ -0,0 +1,353 @@ +package rueidis + +import ( + "container/list" + "context" + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/rueian/rueidis/internal/cmds" +) + +func newSentinelClient(opt *ClientOption, connFn connFn) (client *sentinelClient, err error) { + client = &sentinelClient{ + cmd: cmds.NewBuilder(cmds.NoSlot), + mOpt: opt, + sOpt: newSentinelOpt(opt), + connFn: connFn, + sentinels: list.New(), + retry: !opt.DisableRetry, + } + + for _, sentinel := range opt.InitAddress { + client.sentinels.PushBack(sentinel) + } + + if err = client.refresh(); err != nil { + client.Close() + return nil, err + } + + return client, nil +} + +type sentinelClient struct { + mConn atomic.Value + sConn conn + mOpt *ClientOption + sOpt *ClientOption + connFn connFn + sentinels *list.List + mAddr string + sAddr string + sc call + mu sync.Mutex + stop uint32 + cmd cmds.Builder + retry bool +} + +func (c *sentinelClient) B() cmds.Builder { + return c.cmd +} + +func (c *sentinelClient) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) { +retry: + resp = c.mConn.Load().(conn).Do(ctx, cmd) + if c.retry && cmd.IsReadOnly() && c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + if resp.NonRedisError() == nil { // not recycle cmds if error, since cmds may be used later in pipe. consider recycle them by pipe + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *sentinelClient) DoMulti(ctx context.Context, multi ...cmds.Completed) (resps []RedisResult) { + if len(multi) == 0 { + return nil + } +retry: + resps = c.mConn.Load().(conn).DoMulti(ctx, multi...) + if c.retry && allReadOnly(multi) { + for _, resp := range resps { + if c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + } + } + for i, cmd := range multi { + if resps[i].NonRedisError() == nil { + cmds.Put(cmd.CommandSlice()) + } + } + return resps +} + +func (c *sentinelClient) DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) (resp RedisResult) { +retry: + resp = c.mConn.Load().(conn).DoCache(ctx, cmd, ttl) + if c.retry && c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + if err := resp.NonRedisError(); err == nil || err == ErrDoCacheAborted { + cmds.Put(cmd.CommandSlice()) + } + return resp +} + +func (c *sentinelClient) DoMultiCache(ctx context.Context, multi ...CacheableTTL) (resps []RedisResult) { + if len(multi) == 0 { + return nil + } +retry: + resps = c.mConn.Load().(conn).DoMultiCache(ctx, multi...) + if c.retry { + for _, resp := range resps { + if c.isRetryable(resp.NonRedisError(), ctx) { + goto retry + } + } + } + for i, cmd := range multi { + if err := resps[i].NonRedisError(); err == nil || err == ErrDoCacheAborted { + cmds.Put(cmd.Cmd.CommandSlice()) + } + } + return resps +} + +func (c *sentinelClient) Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) (err error) { +retry: + err = c.mConn.Load().(conn).Receive(ctx, subscribe, fn) + if c.retry { + if _, ok := err.(*RedisError); !ok && c.isRetryable(err, ctx) { + goto retry + } + } + if err == nil { + cmds.Put(subscribe.CommandSlice()) + } + return err +} + +func (c *sentinelClient) Dedicated(fn func(DedicatedClient) error) (err error) { + master := c.mConn.Load().(conn) + wire := master.Acquire() + dsc := &dedicatedSingleClient{cmd: c.cmd, conn: master, wire: wire, retry: c.retry} + err = fn(dsc) + dsc.release() + return err +} + +func (c *sentinelClient) Dedicate() (DedicatedClient, func()) { + master := c.mConn.Load().(conn) + wire := master.Acquire() + dsc := &dedicatedSingleClient{cmd: c.cmd, conn: master, wire: wire, retry: c.retry} + return dsc, dsc.release +} + +func (c *sentinelClient) Nodes() map[string]Client { + conn := c.mConn.Load().(conn) + return map[string]Client{conn.Addr(): newSingleClientWithConn(conn, c.cmd, c.retry)} +} + +func (c *sentinelClient) Close() { + atomic.StoreUint32(&c.stop, 1) + c.mu.Lock() + if c.sConn != nil { + c.sConn.Close() + } + if master := c.mConn.Load(); master != nil { + master.(conn).Close() + } + c.mu.Unlock() +} + +func (c *sentinelClient) isRetryable(err error, ctx context.Context) (should bool) { + return err != nil && atomic.LoadUint32(&c.stop) == 0 && ctx.Err() == nil +} + +func (c *sentinelClient) addSentinel(addr string) { + c.mu.Lock() + c._addSentinel(addr) + c.mu.Unlock() +} + +func (c *sentinelClient) _addSentinel(addr string) { + for e := c.sentinels.Front(); e != nil; e = e.Next() { + if e.Value.(string) == addr { + return + } + } + c.sentinels.PushFront(addr) +} + +func (c *sentinelClient) switchMasterRetry(addr string) { + c.mu.Lock() + err := c._switchMaster(addr) + c.mu.Unlock() + if err != nil { + go c.refreshRetry() + } +} + +func (c *sentinelClient) _switchMaster(addr string) (err error) { + var master conn + if atomic.LoadUint32(&c.stop) == 1 { + return nil + } + if c.mAddr == addr { + master = c.mConn.Load().(conn) + if master.Error() != nil { + master = nil + } + } + if master == nil { + master = c.connFn(addr, c.mOpt) + if err = master.Dial(); err != nil { + return err + } + } + if resp, err := master.Do(context.Background(), cmds.RoleCmd).ToArray(); err != nil { + master.Close() + return err + } else if resp[0].string != "master" { + master.Close() + return errNotMaster + } + c.mAddr = addr + if old := c.mConn.Swap(master); old != nil { + if prev := old.(conn); prev != master { + prev.Close() + } + } + return nil +} + +func (c *sentinelClient) refreshRetry() { +retry: + if err := c.refresh(); err != nil { + goto retry + } +} + +func (c *sentinelClient) refresh() (err error) { + return c.sc.Do(c._refresh) +} + +func (c *sentinelClient) _refresh() (err error) { + var master string + var sentinels []string + + c.mu.Lock() + head := c.sentinels.Front() + for e := head; e != nil; { + if atomic.LoadUint32(&c.stop) == 1 { + c.mu.Unlock() + return nil + } + addr := e.Value.(string) + + if c.sAddr != addr || c.sConn == nil || c.sConn.Error() != nil { + if c.sConn != nil { + c.sConn.Close() + } + c.sAddr = addr + c.sConn = c.connFn(addr, c.sOpt) + err = c.sConn.Dial() + } + if err == nil { + if master, sentinels, err = c.listWatch(c.sConn); err == nil { + for _, sentinel := range sentinels { + c._addSentinel(sentinel) + } + if err = c._switchMaster(master); err == nil { + break + } + } + c.sConn.Close() + } + c.sentinels.MoveToBack(e) + if e = c.sentinels.Front(); e == head { + break + } + } + c.mu.Unlock() + + if err == nil { + if master := c.mConn.Load(); master == nil { + err = ErrNoAddr + } else { + err = master.(conn).Error() + } + } + return err +} + +func (c *sentinelClient) listWatch(cc conn) (master string, sentinels []string, err error) { + ctx := context.Background() + sentinelsCMD := c.cmd.SentinelSentinels().Master(c.mOpt.Sentinel.MasterSet).Build() + getMasterCMD := c.cmd.SentinelGetMasterAddrByName().Master(c.mOpt.Sentinel.MasterSet).Build() + defer func() { + if err == nil { // not recycle cmds if error, since cmds may be used later in pipe. consider recycle them by pipe + cmds.Put(sentinelsCMD.CommandSlice()) + cmds.Put(getMasterCMD.CommandSlice()) + } + }() + + go func(cc conn) { + if err := cc.Receive(ctx, cmds.SentinelSubscribe, func(event PubSubMessage) { + switch event.Channel { + case "+sentinel": + m := strings.SplitN(event.Message, " ", 4) + c.addSentinel(fmt.Sprintf("%s:%s", m[2], m[3])) + case "+switch-master": + m := strings.SplitN(event.Message, " ", 5) + if m[0] == c.sOpt.Sentinel.MasterSet { + c.switchMasterRetry(fmt.Sprintf("%s:%s", m[3], m[4])) + } + case "+reboot": + m := strings.SplitN(event.Message, " ", 4) + if m[0] == "master" && m[1] == c.sOpt.Sentinel.MasterSet { + c.switchMasterRetry(fmt.Sprintf("%s:%s", m[2], m[3])) + } + } + }); err != nil && atomic.LoadUint32(&c.stop) == 0 { + c.refreshRetry() + } + }(cc) + + resp := cc.DoMulti(ctx, sentinelsCMD, getMasterCMD) + others, err := resp[0].ToArray() + if err != nil { + return "", nil, err + } + for _, other := range others { + if m, err := other.AsStrMap(); err == nil { + sentinels = append(sentinels, fmt.Sprintf("%s:%s", m["ip"], m["port"])) + } + } + m, err := resp[1].AsStrSlice() + if err != nil { + return "", nil, err + } + return fmt.Sprintf("%s:%s", m[0], m[1]), sentinels, nil +} + +func newSentinelOpt(opt *ClientOption) *ClientOption { + o := *opt + o.Username = o.Sentinel.Username + o.Password = o.Sentinel.Password + o.ClientName = o.Sentinel.ClientName + o.Dialer = o.Sentinel.Dialer + o.TLSConfig = o.Sentinel.TLSConfig + o.SelectDB = 0 // https://github.com/rueian/rueidis/issues/138 + return &o +} + +var errNotMaster = errors.New("the redis is not master") diff --git a/vendor/github.com/rueian/rueidis/singleflight.go b/vendor/github.com/rueian/rueidis/singleflight.go new file mode 100644 index 00000000000..657b6f174d3 --- /dev/null +++ b/vendor/github.com/rueian/rueidis/singleflight.go @@ -0,0 +1,39 @@ +package rueidis + +import "sync" + +type call struct { + wg *sync.WaitGroup + mu sync.Mutex + cn int +} + +func (c *call) Do(fn func() error) error { + var wg *sync.WaitGroup + c.mu.Lock() + c.cn++ + if c.wg != nil { + wg = c.wg + c.mu.Unlock() + wg.Wait() + return nil + } + wg = &sync.WaitGroup{} + wg.Add(1) + c.wg = wg + c.mu.Unlock() + + err := fn() + c.mu.Lock() + c.wg = nil + c.cn = 0 + c.mu.Unlock() + wg.Done() + return err +} + +func (c *call) suppressing() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.cn +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 5edf27f2423..41625dc17d7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -433,3 +433,22 @@ func MarkForNoDownsample(ctx context.Context, logger log.Logger, bkt objstore.Bu level.Info(logger).Log("msg", "block has been marked for no downsample", "block", id) return nil } + +// RemoveMark removes the file which marked the block for deletion, no-downsample or no-compact. +func RemoveMark(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, removeMark prometheus.Counter, markedFilename string) error { + markedFile := path.Join(id.String(), markedFilename) + markedFileExists, err := bkt.Exists(ctx, markedFile) + if err != nil { + return errors.Wrapf(err, "check if %s file exists in bucket", markedFile) + } + if !markedFileExists { + level.Warn(logger).Log("msg", "requested to remove the mark, but file does not exist", "err", errors.Errorf("file %s does not exist in bucket", markedFile)) + return nil + } + if err := bkt.Delete(ctx, markedFile); err != nil { + return errors.Wrapf(err, "delete file %s from bucket", markedFile) + } + removeMark.Inc() + level.Info(logger).Log("msg", "mark has been removed from the block", "block", id) + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go index cb5a7ee2aa7..77852073015 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/redis_client.go @@ -5,21 +5,23 @@ package cacheutil import ( "context" - "fmt" - "sync" + "crypto/tls" + "net" + "strings" "time" "unsafe" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/go-redis/redis/v8" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/rueian/rueidis" "gopkg.in/yaml.v3" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" + "github.com/thanos-io/thanos/pkg/model" thanos_tls "github.com/thanos-io/thanos/pkg/tls" ) @@ -117,6 +119,16 @@ type RedisClientConfig struct { // TLSConfig to use to connect to the redis server. TLSConfig TLSConfig `yaml:"tls_config"` + + // If not zero then client-side caching is enabled. + // Client-side caching is when data is stored in memory + // instead of fetching data each time. + // See https://redis.io/docs/manual/client-side-caching/ for info. + CacheSize model.Bytes `yaml:"cache_size"` + + // MasterName specifies the master's name. Must be not empty + // for Redis Sentinel. + MasterName string `yaml:"master_name"` } func (c *RedisClientConfig) validate() error { @@ -124,12 +136,18 @@ func (c *RedisClientConfig) validate() error { return errors.New("no redis addr provided") } + if c.TLSEnabled { + if (c.TLSConfig.CertFile != "") != (c.TLSConfig.KeyFile != "") { + return errors.New("both client key and certificate must be provided") + } + } + return nil } -// RedisClient is a wrap of redis.Client. type RedisClient struct { - *redis.Client + client rueidis.Client + config RedisClientConfig // getMultiGate used to enforce the max number of concurrent GetMulti() operations. @@ -161,39 +179,55 @@ func NewRedisClientWithConfig(logger log.Logger, name string, config RedisClient return nil, err } - opts := &redis.Options{ - Addr: config.Addr, - Username: config.Username, - Password: config.Password, - DB: config.DB, - DialTimeout: config.DialTimeout, - ReadTimeout: config.ReadTimeout, - WriteTimeout: config.WriteTimeout, - MinIdleConns: config.MinIdleConns, - MaxConnAge: config.MaxConnAge, - IdleTimeout: config.IdleTimeout, + if reg != nil { + reg = prometheus.WrapRegistererWith(prometheus.Labels{"name": name}, reg) } + var tlsConfig *tls.Config if config.TLSEnabled { - tlsConfig := config.TLSConfig + userTLSConfig := config.TLSConfig - tlsClientConfig, err := thanos_tls.NewClientConfig(logger, tlsConfig.CertFile, tlsConfig.KeyFile, - tlsConfig.CAFile, tlsConfig.ServerName, tlsConfig.InsecureSkipVerify) + tlsClientConfig, err := thanos_tls.NewClientConfig(logger, userTLSConfig.CertFile, userTLSConfig.KeyFile, + userTLSConfig.CAFile, userTLSConfig.ServerName, userTLSConfig.InsecureSkipVerify) if err != nil { return nil, err } - opts.TLSConfig = tlsClientConfig + tlsConfig = tlsClientConfig } - redisClient := redis.NewClient(opts) - if reg != nil { - reg = prometheus.WrapRegistererWith(prometheus.Labels{"name": name}, reg) + clientSideCacheDisabled := false + if config.CacheSize == 0 { + clientSideCacheDisabled = true + } + + clientOpts := rueidis.ClientOption{ + InitAddress: strings.Split(config.Addr, ","), + ShuffleInit: true, + Username: config.Username, + Password: config.Password, + SelectDB: config.DB, + CacheSizeEachConn: int(config.CacheSize), + Dialer: net.Dialer{Timeout: config.DialTimeout}, + ConnWriteTimeout: config.WriteTimeout, + DisableCache: clientSideCacheDisabled, + TLSConfig: tlsConfig, + } + + if config.MasterName != "" { + clientOpts.Sentinel = rueidis.SentinelOption{ + MasterSet: config.MasterName, + } + } + + client, err := rueidis.NewClient(clientOpts) + if err != nil { + return nil, err } c := &RedisClient{ - Client: redisClient, + client: client, config: config, logger: logger, getMultiGate: gate.New( @@ -219,9 +253,8 @@ func NewRedisClientWithConfig(logger log.Logger, name string, config RedisClient // SetAsync implement RemoteCacheClient. func (c *RedisClient) SetAsync(ctx context.Context, key string, value []byte, ttl time.Duration) error { start := time.Now() - if _, err := c.Set(ctx, key, value, ttl).Result(); err != nil { - level.Warn(c.logger).Log("msg", "failed to set item into redis", "err", err, "key", key, - "value_size", len(value)) + if err := c.client.Do(ctx, c.client.B().Set().Key(key).Value(rueidis.BinaryString(value)).ExSeconds(int64(ttl.Seconds())).Build()).Error(); err != nil { + level.Warn(c.logger).Log("msg", "failed to set item into redis", "err", err, "key", key, "value_size", len(value)) return nil } c.durationSet.Observe(time.Since(start).Seconds()) @@ -234,29 +267,16 @@ func (c *RedisClient) SetMulti(ctx context.Context, data map[string][]byte, ttl return } start := time.Now() - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) + sets := make(rueidis.Commands, 0, len(data)) + ittl := int64(ttl.Seconds()) + for k, v := range data { + sets = append(sets, c.client.B().Setex().Key(k).Seconds(ittl).Value(rueidis.BinaryString(v)).Build()) } - err := doWithBatch(ctx, len(data), c.config.SetMultiBatchSize, c.setMultiGate, func(startIndex, endIndex int) error { - currentKeys := keys[startIndex:endIndex] - _, err := c.Pipelined(ctx, func(p redis.Pipeliner) error { - for _, key := range currentKeys { - p.SetEX(ctx, key, data[key], ttl) - } - return nil - }) - if err != nil { - level.Warn(c.logger).Log("msg", "failed to set multi items from redis", - "err", err, "items", len(data)) - return nil + for _, resp := range c.client.DoMulti(ctx, sets...) { + if err := resp.Error(); err != nil { + level.Warn(c.logger).Log("msg", "failed to set multi items from redis", "err", err, "items", len(data)) + return } - return nil - }) - if err != nil { - level.Warn(c.logger).Log("msg", "failed to set multi items from redis", "err", err, - "items", len(data)) - return } c.durationSetMulti.Observe(time.Since(start).Seconds()) } @@ -268,32 +288,22 @@ func (c *RedisClient) GetMulti(ctx context.Context, keys []string) map[string][] } start := time.Now() results := make(map[string][]byte, len(keys)) - var mu sync.Mutex - err := doWithBatch(ctx, len(keys), c.config.GetMultiBatchSize, c.getMultiGate, func(startIndex, endIndex int) error { - currentKeys := keys[startIndex:endIndex] - resp, err := c.MGet(ctx, currentKeys...).Result() - if err != nil { - level.Warn(c.logger).Log("msg", "failed to mget items from redis", "err", err, "items", len(resp)) - return nil - } - mu.Lock() - defer mu.Unlock() - for i := 0; i < len(resp); i++ { - key := currentKeys[i] - switch val := resp[i].(type) { - case string: - results[key] = stringToBytes(val) - case nil: // miss - default: - level.Warn(c.logger).Log("msg", - fmt.Sprintf("unexpected redis mget result type:%T %v", resp[i], resp[i])) - } - } - return nil - }) + + if c.config.ReadTimeout > 0 { + timeoutCtx, cancel := context.WithTimeout(ctx, c.config.ReadTimeout) + defer cancel() + ctx = timeoutCtx + } + + // NOTE(GiedriusS): TTL is the default one in case PTTL fails. 8 hours should be good enough IMHO. + resps, err := rueidis.MGetCache(c.client, ctx, 8*time.Hour, keys) if err != nil { - level.Warn(c.logger).Log("msg", "failed to mget items from redis", "err", err, "items", len(keys)) - return nil + level.Warn(c.logger).Log("msg", "failed to mget items from redis", "err", err, "items", len(resps)) + } + for key, resp := range resps { + if val, err := resp.ToString(); err == nil { + results[key] = stringToBytes(val) + } } c.durationGetMulti.Observe(time.Since(start).Seconds()) return results @@ -301,9 +311,7 @@ func (c *RedisClient) GetMulti(ctx context.Context, keys []string) map[string][] // Stop implement RemoteCacheClient. func (c *RedisClient) Stop() { - if err := c.Close(); err != nil { - level.Error(c.logger).Log("msg", "redis close err") - } + c.client.Close() } // stringToBytes converts string to byte slice (copied from vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go). diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 4aa551ae3e9..e698421dc01 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -17,6 +17,7 @@ import ( "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/tsdb" @@ -455,7 +456,7 @@ func expandChunkIterator(it chunkenc.Iterator, buf *[]sample) error { // If it does, we skip it. lastT := int64(0) - for it.Next() { + for it.Next() != chunkenc.ValNone { t, v := it.At() if value.IsStaleNaN(v) { continue @@ -609,12 +610,13 @@ func NewApplyCounterResetsIterator(chks ...chunkenc.Iterator) *ApplyCounterReset return &ApplyCounterResetsSeriesIterator{chks: chks} } -func (it *ApplyCounterResetsSeriesIterator) Next() bool { +// TODO(rabenhorst): Native histogram support needs to be added, float type is hardcoded. +func (it *ApplyCounterResetsSeriesIterator) Next() chunkenc.ValueType { for { if it.i >= len(it.chks) { - return false + return chunkenc.ValNone } - if ok := it.chks[it.i].Next(); !ok { + if it.chks[it.i].Next() == chunkenc.ValNone { it.i++ // While iterators are ordered, they are not generally guaranteed to be // non-overlapping. Ensure that the series does not go back in time by seeking at least @@ -631,7 +633,7 @@ func (it *ApplyCounterResetsSeriesIterator) Next() bool { it.total++ it.lastT, it.lastV = t, v it.totalV = v - return true + return chunkenc.ValFloat } // If the timestamp increased, it is not the special last sample. if t > it.lastT { @@ -642,7 +644,7 @@ func (it *ApplyCounterResetsSeriesIterator) Next() bool { } it.lastT, it.lastV = t, v it.total++ - return true + return chunkenc.ValFloat } // We hit a sample that indicates what the true last value was. For the // next chunk we use it to determine whether there was a counter reset between them. @@ -657,16 +659,28 @@ func (it *ApplyCounterResetsSeriesIterator) At() (t int64, v float64) { return it.lastT, it.totalV } -func (it *ApplyCounterResetsSeriesIterator) Seek(x int64) bool { +// TODO(rabenhorst): Needs to be implemented for native histogram support. +func (it *ApplyCounterResetsSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + panic("not implemented") +} + +func (it *ApplyCounterResetsSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + panic("not implemented") +} + +func (it *ApplyCounterResetsSeriesIterator) AtT() int64 { + return it.lastT +} + +func (it *ApplyCounterResetsSeriesIterator) Seek(x int64) chunkenc.ValueType { // Don't use underlying Seek, but iterate over next to not miss counter resets. for { if t, _ := it.At(); t >= x { - return true + return chunkenc.ValFloat } - ok := it.Next() - if !ok { - return false + if it.Next() == chunkenc.ValNone { + return chunkenc.ValNone } } } @@ -692,35 +706,49 @@ func NewAverageChunkIterator(cnt, sum chunkenc.Iterator) *AverageChunkIterator { return &AverageChunkIterator{cntIt: cnt, sumIt: sum} } -func (it *AverageChunkIterator) Next() bool { +// TODO(rabenhorst): Native histogram support needs to be added, float type is hardcoded. +func (it *AverageChunkIterator) Next() chunkenc.ValueType { cok, sok := it.cntIt.Next(), it.sumIt.Next() if cok != sok { it.err = errors.New("sum and count iterator not aligned") - return false + return chunkenc.ValNone } - if !cok { - return false + if cok == chunkenc.ValNone { + return chunkenc.ValNone } cntT, cntV := it.cntIt.At() sumT, sumV := it.sumIt.At() if cntT != sumT { it.err = errors.New("sum and count timestamps not aligned") - return false + return chunkenc.ValNone } it.t, it.v = cntT, sumV/cntV - return true + return chunkenc.ValFloat } -func (it *AverageChunkIterator) Seek(t int64) bool { +func (it *AverageChunkIterator) Seek(t int64) chunkenc.ValueType { it.err = errors.New("seek used, but not implemented") - return false + return chunkenc.ValNone } func (it *AverageChunkIterator) At() (int64, float64) { return it.t, it.v } +// TODO(rabenhorst): Needs to be implemented for native histogram support. +func (it *AverageChunkIterator) AtHistogram() (int64, *histogram.Histogram) { + panic("not implemented") +} + +func (it *AverageChunkIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + panic("not implemented") +} + +func (it *AverageChunkIterator) AtT() int64 { + return it.t +} + func (it *AverageChunkIterator) Err() error { if it.cntIt.Err() != nil { return it.cntIt.Err() diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go index e053c7a6c48..2055ade87ba 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/chunk_iter.go @@ -312,7 +312,7 @@ func (a *aggrChunkIterator) toChunk(at downsample.AggrType, minTime, maxTime int lastT int64 lastV float64 ) - for it.Next() { + for it.Next() != chunkenc.ValNone { lastT, lastV = it.At() appender.Append(lastT, lastV) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go index 9737de279f7..0aa76651cc9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go @@ -6,6 +6,7 @@ package dedup import ( "math" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -375,7 +376,7 @@ func (it *counterErrAdjustSeriesIterator) At() (int64, float64) { type dedupSeriesIterator struct { a, b adjustableSeriesIterator - aok, bok bool + aval, bval chunkenc.ValueType // TODO(bwplotka): Don't base on LastT, but on detected scrape interval. This will allow us to be more // responsive to gaps: https://github.com/thanos-io/thanos/issues/981, let's do it in next PR. @@ -392,12 +393,12 @@ func newDedupSeriesIterator(a, b adjustableSeriesIterator) *dedupSeriesIterator b: b, lastT: math.MinInt64, lastV: float64(math.MinInt64), - aok: a.Next(), - bok: b.Next(), + aval: a.Next(), + bval: b.Next(), } } -func (it *dedupSeriesIterator) Next() bool { +func (it *dedupSeriesIterator) Next() chunkenc.ValueType { lastValue := it.lastV lastUseA := it.useA defer func() { @@ -409,27 +410,27 @@ func (it *dedupSeriesIterator) Next() bool { }() // Advance both iterators to at least the next highest timestamp plus the potential penalty. - if it.aok { - it.aok = it.a.Seek(it.lastT + 1 + it.penA) + if it.aval != chunkenc.ValNone { + it.aval = it.a.Seek(it.lastT + 1 + it.penA) } - if it.bok { - it.bok = it.b.Seek(it.lastT + 1 + it.penB) + if it.bval != chunkenc.ValNone { + it.bval = it.b.Seek(it.lastT + 1 + it.penB) } // Handle basic cases where one iterator is exhausted before the other. - if !it.aok { + if it.aval == chunkenc.ValNone { it.useA = false - if it.bok { + if it.bval != chunkenc.ValNone { it.lastT, it.lastV = it.b.At() it.penB = 0 } - return it.bok + return it.bval } - if !it.bok { + if it.bval == chunkenc.ValNone { it.useA = true it.lastT, it.lastV = it.a.At() it.penA = 0 - return true + return it.aval } // General case where both iterators still have data. We pick the one // with the smaller timestamp. @@ -458,7 +459,7 @@ func (it *dedupSeriesIterator) Next() bool { it.penA = 0 it.lastT = ta it.lastV = va - return true + return it.aval } if it.lastT != math.MinInt64 { it.penA = 2 * (tb - it.lastT) @@ -468,27 +469,28 @@ func (it *dedupSeriesIterator) Next() bool { it.penB = 0 it.lastT = tb it.lastV = vb - return true + return it.bval } func (it *dedupSeriesIterator) adjustAtValue(lastValue float64) { - if it.aok { + if it.aval != chunkenc.ValNone { it.a.adjustAtValue(lastValue) } - if it.bok { + if it.bval != chunkenc.ValNone { it.b.adjustAtValue(lastValue) } } -func (it *dedupSeriesIterator) Seek(t int64) bool { +// TODO(rabenhorst): Native histogram support needs to be implemented, float type hardcoded. +func (it *dedupSeriesIterator) Seek(t int64) chunkenc.ValueType { // Don't use underlying Seek, but iterate over next to not miss gaps. for { ts, _ := it.At() if ts >= t { - return true + return chunkenc.ValFloat } - if !it.Next() { - return false + if it.Next() == chunkenc.ValNone { + return chunkenc.ValNone } } } @@ -500,6 +502,25 @@ func (it *dedupSeriesIterator) At() (int64, float64) { return it.b.At() } +// TODO(rabenhorst): Needs to be implemented for native histogram support. +func (it *dedupSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + panic("not implemented") +} + +func (it *dedupSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + panic("not implemented") +} + +func (it *dedupSeriesIterator) AtT() int64 { + var t int64 + if it.useA { + t, _ = it.a.At() + } else { + t, _ = it.b.At() + } + return t +} + func (it *dedupSeriesIterator) Err() error { if it.a.Err() != nil { return it.a.Err() @@ -518,9 +539,9 @@ func NewBoundedSeriesIterator(it chunkenc.Iterator, mint, maxt int64) *boundedSe return &boundedSeriesIterator{it: it, mint: mint, maxt: maxt} } -func (it *boundedSeriesIterator) Seek(t int64) (ok bool) { +func (it *boundedSeriesIterator) Seek(t int64) chunkenc.ValueType { if t > it.maxt { - return false + return chunkenc.ValNone } if t < it.mint { t = it.mint @@ -532,21 +553,40 @@ func (it *boundedSeriesIterator) At() (t int64, v float64) { return it.it.At() } -func (it *boundedSeriesIterator) Next() bool { - if !it.it.Next() { - return false +// TODO(rabenhorst): Needs to be implemented for native histogram support. +func (it *boundedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + panic("not implemented") +} + +func (it *boundedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + panic("not implemented") +} + +func (it *boundedSeriesIterator) AtT() int64 { + t, _ := it.it.At() + return t +} + +func (it *boundedSeriesIterator) Next() chunkenc.ValueType { + valueType := it.it.Next() + if valueType == chunkenc.ValNone { + return chunkenc.ValNone } t, _ := it.it.At() // Advance the iterator if we are before the valid interval. if t < it.mint { - if !it.Seek(it.mint) { - return false + if it.Seek(it.mint) == chunkenc.ValNone { + return chunkenc.ValNone } t, _ = it.it.At() } // Once we passed the valid interval, there is no going back. - return t <= it.maxt + if t <= it.maxt { + return valueType + } + + return chunkenc.ValNone } func (it *boundedSeriesIterator) Err() error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go index 890a98b0025..00c970b9296 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -17,7 +18,7 @@ var PushdownMarker = labels.Label{Name: "__thanos_pushed_down", Value: "true"} type pushdownSeriesIterator struct { a, b chunkenc.Iterator - aok, bok bool + aval, bval chunkenc.ValueType aused, bused bool function func(float64, float64) float64 @@ -44,24 +45,32 @@ func newPushdownSeriesIterator(a, b chunkenc.Iterator, function string) *pushdow } } -func (it *pushdownSeriesIterator) Next() bool { +func (it *pushdownSeriesIterator) Next() chunkenc.ValueType { // Push A if we've used A before. Push B if we've used B before. // Push both if we've used both before. switch { case !it.aused && !it.bused: - return false + return chunkenc.ValNone case it.aused && !it.bused: - it.aok = it.a.Next() + it.aval = it.a.Next() case !it.aused && it.bused: - it.bok = it.b.Next() + it.bval = it.b.Next() case it.aused && it.bused: - it.aok = it.a.Next() - it.bok = it.b.Next() + it.aval = it.a.Next() + it.bval = it.b.Next() } it.aused = false it.bused = false - return it.aok || it.bok + if it.aval != chunkenc.ValNone { + return it.aval + } + + if it.bval != chunkenc.ValNone { + return it.bval + } + + return chunkenc.ValNone } func (it *pushdownSeriesIterator) At() (int64, float64) { @@ -69,7 +78,7 @@ func (it *pushdownSeriesIterator) At() (int64, float64) { var timestamp int64 var val float64 - if it.aok && it.bok { + if it.aval != chunkenc.ValNone && it.bval != chunkenc.ValNone { ta, va := it.a.At() tb, vb := it.b.At() if ta == tb { @@ -88,7 +97,7 @@ func (it *pushdownSeriesIterator) At() (int64, float64) { it.bused = true } } - } else if it.aok { + } else if it.aval != chunkenc.ValNone { ta, va := it.a.At() val = va timestamp = ta @@ -103,14 +112,29 @@ func (it *pushdownSeriesIterator) At() (int64, float64) { return timestamp, val } -func (it *pushdownSeriesIterator) Seek(t int64) bool { +// TODO(rabenhorst): Needs to be implemented for native histogram support. +func (it *pushdownSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { + panic("not implemented") +} + +func (it *pushdownSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { + panic("not implemented") +} + +func (it *pushdownSeriesIterator) AtT() int64 { + t, _ := it.a.At() + return t +} + +// TODO(rabenhorst): Native histogram support needs to be implemented, currently float type is hardcoded. +func (it *pushdownSeriesIterator) Seek(t int64) chunkenc.ValueType { for { ts, _ := it.At() if ts >= t { - return true + return chunkenc.ValFloat } - if !it.Next() { - return false + if it.Next() == chunkenc.ValNone { + return chunkenc.ValNone } } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/testing.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/testing.go index e1e3ab05955..4931f2dd766 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extprom/testing.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/testing.go @@ -8,8 +8,8 @@ import ( "strings" "testing" + "github.com/efficientgo/core/testutil" "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/testutil" ) // CurrentGaugeValuesFor returns gauge values for given metric names. Useful for testing based on registry, diff --git a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go index ae6ecf9872f..36a16a3112a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go +++ b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go @@ -21,6 +21,10 @@ var ( Name: "gate_queries_in_flight", Help: "Number of queries that are currently in flight.", } + TotalCounterOpts = prometheus.CounterOpts{ + Name: "gate_queries_total", + Help: "Total number of queries.", + } DurationHistogramOpts = prometheus.HistogramOpts{ Name: "gate_duration_seconds", Help: "How many seconds it took for queries to wait at the gate.", @@ -87,9 +91,12 @@ func New(reg prometheus.Registerer, maxConcurrent int) Gate { return InstrumentGateDuration( promauto.With(reg).NewHistogram(DurationHistogramOpts), - InstrumentGateInFlight( - promauto.With(reg).NewGauge(InFlightGaugeOpts), - promgate.New(maxConcurrent), + InstrumentGateTotal( + promauto.With(reg).NewCounter(TotalCounterOpts), + InstrumentGateInFlight( + promauto.With(reg).NewGauge(InFlightGaugeOpts), + promgate.New(maxConcurrent), + ), ), ) } @@ -159,3 +166,31 @@ func (g *instrumentedInFlightGate) Done() { g.inflight.Dec() g.g.Done() } + +type instrumentedTotalGate struct { + g Gate + total prometheus.Counter +} + +// InstrumentGateTotal instruments the provided Gate to track total requests. +func InstrumentGateTotal(total prometheus.Counter, g Gate) Gate { + return &instrumentedTotalGate{ + g: g, + total: total, + } +} + +// Start implements the Gate interface. +func (g *instrumentedTotalGate) Start(ctx context.Context) error { + g.total.Inc() + if err := g.g.Start(ctx); err != nil { + return err + } + + return nil +} + +// Done implements the Gate interface. +func (g *instrumentedTotalGate) Done() { + g.g.Done() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go b/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go index de5dc6b3b1d..404600f6214 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go +++ b/vendor/github.com/thanos-io/thanos/pkg/httpconfig/http.go @@ -141,7 +141,7 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig return nil, err } - // If a authorization_credentials is provided, create a round tripper that will set the + // If an authorization_credentials is provided, create a round tripper that will set the // Authorization header correctly on each request. if cfg.Authorization != nil && len(cfg.Authorization.Credentials) > 0 { rt = config_util.NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt) @@ -173,7 +173,7 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig return newRT(tlsConfig) } - return config_util.NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT) + return config_util.NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, cfg.TLSConfig.CertFile, cfg.TLSConfig.KeyFile, newRT) } // NewHTTPClient returns a new HTTP client. diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index 508b4c62cab..d9245035f45 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -279,7 +279,7 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. if len(stores) == 0 { err := errors.New("No StoreAPIs matched for this query") - level.Warn(reqLogger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) + level.Debug(reqLogger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) if sendErr := srv.Send(storepb.NewWarnSeriesResponse(err)); sendErr != nil { level.Error(reqLogger).Log("err", sendErr) return status.Error(codes.Unknown, errors.Wrap(sendErr, "send series response").Error()) diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/copy.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/copy.go new file mode 100644 index 00000000000..6464cd02ba8 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/copy.go @@ -0,0 +1,55 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package e2eutil + +import ( + "io" + "os" + "path/filepath" + "testing" + + "github.com/efficientgo/core/testutil" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/runutil" +) + +func Copy(t testing.TB, src, dst string) { + testutil.Ok(t, copyRecursive(src, dst)) +} + +func copyRecursive(src, dst string) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + + if info.IsDir() { + return os.MkdirAll(filepath.Join(dst, relPath), os.ModePerm) + } + + if !info.Mode().IsRegular() { + return errors.Errorf("%s is not a regular file", path) + } + + source, err := os.Open(filepath.Clean(path)) + if err != nil { + return err + } + defer runutil.CloseWithErrCapture(&err, source, "close file") + + destination, err := os.Create(filepath.Join(dst, relPath)) + if err != nil { + return err + } + defer runutil.CloseWithErrCapture(&err, destination, "close file") + + _, err = io.Copy(destination, source) + return err + }) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/port.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/port.go new file mode 100644 index 00000000000..986f1c7d7fd --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/port.go @@ -0,0 +1,20 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package e2eutil + +import "net" + +// FreePort returns port that is free now. +func FreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", ":0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + return l.Addr().(*net.TCPAddr).Port, l.Close() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go new file mode 100644 index 00000000000..8dd4405acc5 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go @@ -0,0 +1,645 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package e2eutil + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math" + "math/rand" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "sort" + "strings" + "syscall" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/index" + "golang.org/x/sync/errgroup" + + "github.com/efficientgo/core/testutil" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/runutil" +) + +const ( + defaultPrometheusVersion = "v0.37.0" + defaultAlertmanagerVersion = "v0.20.0" + defaultMinioVersion = "RELEASE.2022-07-30T05-21-40Z" + + // Space delimited list of versions. + promPathsEnvVar = "THANOS_TEST_PROMETHEUS_PATHS" + alertmanagerBinEnvVar = "THANOS_TEST_ALERTMANAGER_PATH" + minioBinEnvVar = "THANOS_TEST_MINIO_PATH" + + // A placeholder for actual Prometheus instance address in the scrape config. + PromAddrPlaceHolder = "PROMETHEUS_ADDRESS" +) + +func PrometheusBinary() string { + return "prometheus-" + defaultPrometheusVersion +} + +func AlertmanagerBinary() string { + b := os.Getenv(alertmanagerBinEnvVar) + if b == "" { + return fmt.Sprintf("alertmanager-%s", defaultAlertmanagerVersion) + } + return b +} + +func MinioBinary() string { + b := os.Getenv(minioBinEnvVar) + if b == "" { + return fmt.Sprintf("minio-%s", defaultMinioVersion) + } + return b +} + +// Prometheus represents a test instance for integration testing. +// It can be populated with data before being started. +type Prometheus struct { + dir string + db *tsdb.DB + prefix string + binPath string + + running bool + cmd *exec.Cmd + disabledCompaction bool + addr string + + config string +} + +func NewTSDB() (*tsdb.DB, error) { + dir, err := os.MkdirTemp("", "prometheus-test") + if err != nil { + return nil, err + } + opts := tsdb.DefaultOptions() + opts.RetentionDuration = math.MaxInt64 + return tsdb.Open(dir, nil, nil, opts, nil) +} + +func ForeachPrometheus(t *testing.T, testFn func(t testing.TB, p *Prometheus)) { + paths := os.Getenv(promPathsEnvVar) + if paths == "" { + paths = PrometheusBinary() + } + + for _, path := range strings.Split(paths, " ") { + if ok := t.Run(path, func(t *testing.T) { + p, err := newPrometheus(path, "") + testutil.Ok(t, err) + + testFn(t, p) + testutil.Ok(t, p.Stop()) + }); !ok { + return + } + } +} + +// NewPrometheus creates a new test Prometheus instance that will listen on local address. +// Use ForeachPrometheus if you want to test against set of Prometheus versions. +// TODO(bwplotka): Improve it with https://github.com/thanos-io/thanos/issues/758. +func NewPrometheus() (*Prometheus, error) { + return newPrometheus("", "") +} + +// NewPrometheusOnPath creates a new test Prometheus instance that will listen on local address and given prefix path. +func NewPrometheusOnPath(prefix string) (*Prometheus, error) { + return newPrometheus("", prefix) +} + +func newPrometheus(binPath, prefix string) (*Prometheus, error) { + if binPath == "" { + binPath = PrometheusBinary() + } + + db, err := NewTSDB() + if err != nil { + return nil, err + } + + // Just touch an empty config file. We don't need to actually scrape anything. + _, err = os.Create(filepath.Join(db.Dir(), "prometheus.yml")) + if err != nil { + return nil, err + } + + return &Prometheus{ + dir: db.Dir(), + db: db, + prefix: prefix, + binPath: binPath, + addr: "", + }, nil +} + +// Start running the Prometheus instance and return. +func (p *Prometheus) Start() error { + if p.running { + return errors.New("Already started") + } + + if err := p.db.Close(); err != nil { + return err + } + return p.start() +} + +func (p *Prometheus) start() error { + p.running = true + + port, err := FreePort() + if err != nil { + return err + } + + var extra []string + if p.disabledCompaction { + extra = append(extra, + "--storage.tsdb.min-block-duration=2h", + "--storage.tsdb.max-block-duration=2h", + ) + } + p.addr = fmt.Sprintf("localhost:%d", port) + // Write the final config to the config file. + // The address placeholder will be replaced with the actual address. + if err := p.writeConfig(strings.ReplaceAll(p.config, PromAddrPlaceHolder, p.addr)); err != nil { + return err + } + args := append([]string{ + "--storage.tsdb.retention=2d", // Pass retention cause prometheus since 2.8.0 don't show default value for that flags in web/api: https://github.com/prometheus/prometheus/pull/5433. + "--storage.tsdb.path=" + p.db.Dir(), + "--web.listen-address=" + p.addr, + "--web.route-prefix=" + p.prefix, + "--web.enable-admin-api", + "--config.file=" + filepath.Join(p.db.Dir(), "prometheus.yml"), + }, extra...) + + p.cmd = exec.Command(p.binPath, args...) + p.cmd.SysProcAttr = SysProcAttr() + + go func() { + if b, err := p.cmd.CombinedOutput(); err != nil { + fmt.Fprintln(os.Stderr, "running Prometheus failed", err) + fmt.Fprintln(os.Stderr, string(b)) + } + }() + time.Sleep(2 * time.Second) + + return nil +} + +func (p *Prometheus) WaitPrometheusUp(ctx context.Context, logger log.Logger) error { + if !p.running { + return errors.New("method Start was not invoked.") + } + return runutil.Retry(time.Second, ctx.Done(), func() error { + r, err := http.Get(fmt.Sprintf("http://%s/-/ready", p.addr)) + if err != nil { + return err + } + defer runutil.ExhaustCloseWithLogOnErr(logger, r.Body, "failed to exhaust and close body") + + if r.StatusCode != 200 { + return errors.Errorf("Got non 200 response: %v", r.StatusCode) + } + return nil + }) +} + +func (p *Prometheus) Restart() error { + if err := p.cmd.Process.Signal(syscall.SIGTERM); err != nil { + return errors.Wrap(err, "failed to kill Prometheus. Kill it manually") + } + _ = p.cmd.Wait() + return p.start() +} + +// Dir returns TSDB dir. +func (p *Prometheus) Dir() string { + return p.dir +} + +// Addr returns correct address after Start method. +func (p *Prometheus) Addr() string { + return p.addr + p.prefix +} + +func (p *Prometheus) DisableCompaction() { + p.disabledCompaction = true +} + +// SetConfig updates the contents of the config. +func (p *Prometheus) SetConfig(s string) { + p.config = s +} + +// writeConfig writes the Prometheus config to the config file. +func (p *Prometheus) writeConfig(config string) (err error) { + f, err := os.Create(filepath.Join(p.dir, "prometheus.yml")) + if err != nil { + return err + } + defer runutil.CloseWithErrCapture(&err, f, "prometheus config") + _, err = f.Write([]byte(config)) + return err +} + +// Stop terminates Prometheus and clean up its data directory. +func (p *Prometheus) Stop() error { + if !p.running { + return nil + } + + if p.cmd.Process != nil { + if err := p.cmd.Process.Signal(syscall.SIGTERM); err != nil { + return errors.Wrapf(err, "failed to Prometheus. Kill it manually and clean %s dir", p.db.Dir()) + } + } + time.Sleep(time.Second / 2) + return p.cleanup() +} + +func (p *Prometheus) cleanup() error { + p.running = false + return os.RemoveAll(p.db.Dir()) +} + +// Appender returns a new appender to populate the Prometheus instance with data. +// All appenders must be closed before Start is called and no new ones must be opened +// afterwards. +func (p *Prometheus) Appender() storage.Appender { + if p.running { + panic("Appender must not be called after start") + } + return p.db.Appender(context.Background()) +} + +// CreateEmptyBlock produces empty block like it was the case before fix: https://github.com/prometheus/tsdb/pull/374. +// (Prometheus pre v2.7.0). +func CreateEmptyBlock(dir string, mint, maxt int64, extLset labels.Labels, resolution int64) (ulid.ULID, error) { + entropy := rand.New(rand.NewSource(time.Now().UnixNano())) + uid := ulid.MustNew(ulid.Now(), entropy) + + if err := os.Mkdir(path.Join(dir, uid.String()), os.ModePerm); err != nil { + return ulid.ULID{}, errors.Wrap(err, "close index") + } + + if err := os.Mkdir(path.Join(dir, uid.String(), "chunks"), os.ModePerm); err != nil { + return ulid.ULID{}, errors.Wrap(err, "close index") + } + + w, err := index.NewWriter(context.Background(), path.Join(dir, uid.String(), "index")) + if err != nil { + return ulid.ULID{}, errors.Wrap(err, "new index") + } + + if err := w.Close(); err != nil { + return ulid.ULID{}, errors.Wrap(err, "close index") + } + + m := tsdb.BlockMeta{ + Version: 1, + ULID: uid, + MinTime: mint, + MaxTime: maxt, + Compaction: tsdb.BlockMetaCompaction{ + Level: 1, + Sources: []ulid.ULID{uid}, + }, + } + b, err := json.Marshal(&m) + if err != nil { + return ulid.ULID{}, err + } + + if err := os.WriteFile(path.Join(dir, uid.String(), "meta.json"), b, os.ModePerm); err != nil { + return ulid.ULID{}, errors.Wrap(err, "saving meta.json") + } + + if _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(dir, uid.String()), metadata.Thanos{ + Labels: extLset.Map(), + Downsample: metadata.ThanosDownsample{Resolution: resolution}, + Source: metadata.TestSource, + }, nil); err != nil { + return ulid.ULID{}, errors.Wrap(err, "finalize block") + } + + return uid, nil +} + +// CreateBlock writes a block with the given series and numSamples samples each. +// Samples will be in the time range [mint, maxt). +func CreateBlock( + ctx context.Context, + dir string, + series []labels.Labels, + numSamples int, + mint, maxt int64, + extLset labels.Labels, + resolution int64, + hashFunc metadata.HashFunc, +) (id ulid.ULID, err error) { + return createBlock(ctx, dir, series, numSamples, mint, maxt, extLset, resolution, false, hashFunc) +} + +// CreateBlockWithTombstone is same as CreateBlock but leaves tombstones which mimics the Prometheus local block. +func CreateBlockWithTombstone( + ctx context.Context, + dir string, + series []labels.Labels, + numSamples int, + mint, maxt int64, + extLset labels.Labels, + resolution int64, + hashFunc metadata.HashFunc, +) (id ulid.ULID, err error) { + return createBlock(ctx, dir, series, numSamples, mint, maxt, extLset, resolution, true, hashFunc) +} + +// CreateBlockWithBlockDelay writes a block with the given series and numSamples samples each. +// Samples will be in the time range [mint, maxt) +// Block ID will be created with a delay of time duration blockDelay. +func CreateBlockWithBlockDelay( + ctx context.Context, + dir string, + series []labels.Labels, + numSamples int, + mint, maxt int64, + blockDelay time.Duration, + extLset labels.Labels, + resolution int64, + hashFunc metadata.HashFunc, +) (ulid.ULID, error) { + blockID, err := createBlock(ctx, dir, series, numSamples, mint, maxt, extLset, resolution, false, hashFunc) + if err != nil { + return ulid.ULID{}, errors.Wrap(err, "block creation") + } + + id, err := ulid.New(uint64(timestamp.FromTime(timestamp.Time(int64(blockID.Time())).Add(-blockDelay))), bytes.NewReader(blockID.Entropy())) + if err != nil { + return ulid.ULID{}, errors.Wrap(err, "create block id") + } + + m, err := metadata.ReadFromDir(path.Join(dir, blockID.String())) + if err != nil { + return ulid.ULID{}, errors.Wrap(err, "open meta file") + } + + m.ULID = id + m.Compaction.Sources = []ulid.ULID{id} + + if err := m.WriteToDir(log.NewNopLogger(), path.Join(dir, blockID.String())); err != nil { + return ulid.ULID{}, errors.Wrap(err, "write meta.json file") + } + + return id, os.Rename(path.Join(dir, blockID.String()), path.Join(dir, id.String())) +} + +func createBlock( + ctx context.Context, + dir string, + series []labels.Labels, + numSamples int, + mint, maxt int64, + extLset labels.Labels, + resolution int64, + tombstones bool, + hashFunc metadata.HashFunc, +) (id ulid.ULID, err error) { + headOpts := tsdb.DefaultHeadOptions() + headOpts.ChunkDirRoot = filepath.Join(dir, "chunks") + headOpts.ChunkRange = 10000000000 + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) + if err != nil { + return id, errors.Wrap(err, "create head block") + } + defer func() { + runutil.CloseWithErrCapture(&err, h, "TSDB Head") + if e := os.RemoveAll(headOpts.ChunkDirRoot); e != nil { + err = errors.Wrap(e, "delete chunks dir") + } + }() + + var g errgroup.Group + var timeStepSize = (maxt - mint) / int64(numSamples+1) + var batchSize = len(series) / runtime.GOMAXPROCS(0) + + for len(series) > 0 { + l := batchSize + if len(series) < 1000 { + l = len(series) + } + batch := series[:l] + series = series[l:] + + g.Go(func() error { + t := mint + + for i := 0; i < numSamples; i++ { + app := h.Appender(ctx) + + for _, lset := range batch { + _, err := app.Append(0, lset, t, rand.Float64()) + if err != nil { + if rerr := app.Rollback(); rerr != nil { + err = errors.Wrapf(err, "rollback failed: %v", rerr) + } + + return errors.Wrap(err, "add sample") + } + } + if err := app.Commit(); err != nil { + return errors.Wrap(err, "commit") + } + t += timeStepSize + } + return nil + }) + } + if err := g.Wait(); err != nil { + return id, err + } + c, err := tsdb.NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{maxt - mint}, nil, nil) + if err != nil { + return id, errors.Wrap(err, "create compactor") + } + + id, err = c.Write(dir, h, mint, maxt, nil) + if err != nil { + return id, errors.Wrap(err, "write block") + } + + if id.Compare(ulid.ULID{}) == 0 { + return id, errors.Errorf("nothing to write, asked for %d samples", numSamples) + } + + blockDir := filepath.Join(dir, id.String()) + + files := []metadata.File{} + if hashFunc != metadata.NoneFunc { + paths := []string{} + if err := filepath.Walk(blockDir, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + return nil + } + paths = append(paths, path) + return nil + }); err != nil { + return id, errors.Wrapf(err, "walking %s", dir) + } + + for _, p := range paths { + pHash, err := metadata.CalculateHash(p, metadata.SHA256Func, log.NewNopLogger()) + if err != nil { + return id, errors.Wrapf(err, "calculating hash of %s", blockDir+p) + } + files = append(files, metadata.File{ + RelPath: strings.TrimPrefix(p, blockDir+"/"), + Hash: &pHash, + }) + } + } + + if _, err = metadata.InjectThanos(log.NewNopLogger(), blockDir, metadata.Thanos{ + Labels: extLset.Map(), + Downsample: metadata.ThanosDownsample{Resolution: resolution}, + Source: metadata.TestSource, + Files: files, + }, nil); err != nil { + return id, errors.Wrap(err, "finalize block") + } + + if !tombstones { + if err = os.Remove(filepath.Join(dir, id.String(), "tombstones")); err != nil { + return id, errors.Wrap(err, "remove tombstones") + } + } + + return id, nil +} + +var indexFilename = "index" + +type indexWriterSeries struct { + labels labels.Labels + chunks []chunks.Meta // series file offset of chunks +} + +type indexWriterSeriesSlice []*indexWriterSeries + +// PutOutOfOrderIndex updates the index in blockDir with an index containing an out-of-order chunk +// copied from https://github.com/prometheus/prometheus/blob/b1ed4a0a663d0c62526312311c7529471abbc565/tsdb/index/index_test.go#L346 +func PutOutOfOrderIndex(blockDir string, minTime int64, maxTime int64) error { + + if minTime >= maxTime || minTime+4 >= maxTime { + return fmt.Errorf("minTime must be at least 4 less than maxTime to not create overlapping chunks") + } + + lbls := []labels.Labels{ + []labels.Label{ + {Name: "lbl1", Value: "1"}, + }, + } + + // Sort labels as the index writer expects series in sorted order. + sort.Sort(labels.Slice(lbls)) + + symbols := map[string]struct{}{} + for _, lset := range lbls { + for _, l := range lset { + symbols[l.Name] = struct{}{} + symbols[l.Value] = struct{}{} + } + } + + var input indexWriterSeriesSlice + + // Generate ChunkMetas for every label set. + // Ignoring gosec as it is only used for tests. + for _, lset := range lbls { + var metas []chunks.Meta + // only need two chunks that are out-of-order + chk1 := chunks.Meta{ + MinTime: maxTime - 2, + MaxTime: maxTime - 1, + Ref: chunks.ChunkRef(rand.Uint64()), // nolint:gosec + Chunk: chunkenc.NewXORChunk(), + } + metas = append(metas, chk1) + chk2 := chunks.Meta{ + MinTime: minTime + 1, + MaxTime: minTime + 2, + Ref: chunks.ChunkRef(rand.Uint64()), // nolint:gosec + Chunk: chunkenc.NewXORChunk(), + } + metas = append(metas, chk2) + + input = append(input, &indexWriterSeries{ + labels: lset, + chunks: metas, + }) + } + + iw, err := index.NewWriter(context.Background(), filepath.Join(blockDir, indexFilename)) + if err != nil { + return err + } + + syms := []string{} + for s := range symbols { + syms = append(syms, s) + } + sort.Strings(syms) + for _, s := range syms { + if err := iw.AddSymbol(s); err != nil { + return err + } + } + + // Population procedure as done by compaction. + var ( + postings = index.NewMemPostings() + values = map[string]map[string]struct{}{} + ) + + for i, s := range input { + if err := iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...); err != nil { + return err + } + + for _, l := range s.labels { + valset, ok := values[l.Name] + if !ok { + valset = map[string]struct{}{} + values[l.Name] = valset + } + valset[l.Value] = struct{}{} + } + postings.Add(storage.SeriesRef(i), s.labels) + } + + return iw.Close() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr.go new file mode 100644 index 00000000000..53aaa7039f9 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr.go @@ -0,0 +1,13 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +//go:build !linux +// +build !linux + +package e2eutil + +import "syscall" + +func SysProcAttr() *syscall.SysProcAttr { + return &syscall.SysProcAttr{} +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr_linux.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr_linux.go new file mode 100644 index 00000000000..dd77ed32a18 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/sysprocattr_linux.go @@ -0,0 +1,13 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package e2eutil + +import "syscall" + +func SysProcAttr() *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + // For linux only, kill this if the go test process dies before the cleanup. + Pdeathsig: syscall.SIGKILL, + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go deleted file mode 100644 index 5f7dc15ea71..00000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package testutil - -import ( - "testing" -) - -// TB represents union of test and benchmark. -// This allows the same test suite to be run by both benchmark and test, helping to reuse more code. -// The reason is that usually benchmarks are not being run on CI, especially for short tests, so you need to recreate -// usually similar tests for `Test(t *testing.T)` methods. Example of usage is presented here: -// -// func TestTestOrBench(t *testing.T) { -// tb := NewTB(t) -// tb.Run("1", func(tb TB) { testorbenchComplexTest(tb) }) -// tb.Run("2", func(tb TB) { testorbenchComplexTest(tb) }) -// } -// -// func BenchmarkTestOrBench(b *testing.B) { -// tb := NewTB(t) -// tb.Run("1", func(tb TB) { testorbenchComplexTest(tb) }) -// tb.Run("2", func(tb TB) { testorbenchComplexTest(tb) }) -// } -type TB interface { - testing.TB - IsBenchmark() bool - Run(name string, f func(t TB)) bool - - SetBytes(n int64) - N() int - ResetTimer() -} - -// tb implements TB as well as testing.TB interfaces. -type tb struct { - testing.TB -} - -// NewTB creates tb from testing.TB. -func NewTB(t testing.TB) TB { return &tb{TB: t} } - -// Run benchmarks/tests f as a subbenchmark/subtest with the given name. It reports -// whether there were any failures. -// -// A subbenchmark/subtest is like any other benchmark/test. -func (t *tb) Run(name string, f func(t TB)) bool { - if b, ok := t.TB.(*testing.B); ok { - return b.Run(name, func(nested *testing.B) { f(&tb{TB: nested}) }) - } - if t, ok := t.TB.(*testing.T); ok { - return t.Run(name, func(nested *testing.T) { f(&tb{TB: nested}) }) - } - panic("not a benchmark and not a test") -} - -// N returns number of iterations to do for benchmark, 1 in case of test. -func (t *tb) N() int { - if b, ok := t.TB.(*testing.B); ok { - return b.N - } - return 1 -} - -// SetBytes records the number of bytes processed in a single operation for benchmark, noop otherwise. -// If this is called, the benchmark will report ns/op and MB/s. -func (t *tb) SetBytes(n int64) { - if b, ok := t.TB.(*testing.B); ok { - b.SetBytes(n) - } -} - -// ResetTimer resets a timer, if it's a benchmark, noop otherwise. -func (t *tb) ResetTimer() { - if b, ok := t.TB.(*testing.B); ok { - b.ResetTimer() - } -} - -// IsBenchmark returns true if it's a benchmark. -func (t *tb) IsBenchmark() bool { - _, ok := t.TB.(*testing.B) - return ok -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go deleted file mode 100644 index 4de113feddd..00000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package testutil - -import ( - "context" - "fmt" - "math/rand" - "path/filepath" - "reflect" - "runtime" - "runtime/debug" - "sort" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" - "github.com/pmezard/go-difflib/difflib" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/index" - "go.uber.org/goleak" -) - -// Assert fails the test if the condition is false. -func Assert(tb testing.TB, condition bool, v ...interface{}) { - tb.Helper() - if condition { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d: "+msg+"\033[39m\n\n", filepath.Base(file), line) -} - -// Ok fails the test if an err is not nil. -func Ok(tb testing.TB, err error, v ...interface{}) { - tb.Helper() - if err == nil { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) -} - -// NotOk fails the test if an err is nil. -func NotOk(tb testing.TB, err error, v ...interface{}) { - tb.Helper() - if err != nil { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line) -} - -// Equals fails the test if exp is not equal to act. -func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { - tb.Helper() - if reflect.DeepEqual(exp, act) { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatal(sprintfWithLimit("\033[31m%s:%d:"+msg+"\n\n\texp: %#v\n\n\tgot: %#v%s\033[39m\n\n", filepath.Base(file), line, exp, act, diff(exp, act))) -} - -// Contains fails the test if needle is not contained within haystack, if haystack or needle is -// an empty slice, or if needle is longer than haystack. -func Contains(tb testing.TB, haystack, needle []string) { - _, file, line, _ := runtime.Caller(1) - - if !contains(haystack, needle) { - tb.Fatalf(sprintfWithLimit("\033[31m%s:%d: %#v does not contain %#v\033[39m\n\n", filepath.Base(file), line, haystack, needle)) - } -} - -func contains(haystack, needle []string) bool { - if len(haystack) == 0 || len(needle) == 0 { - return false - } - - if len(haystack) < len(needle) { - return false - } - - for i := 0; i < len(haystack); i++ { - outer := i - - for j := 0; j < len(needle); j++ { - // End of the haystack but not the end of the needle, end - if outer == len(haystack) { - return false - } - - // No match, try the next index of the haystack - if haystack[outer] != needle[j] { - break - } - - // End of the needle and it still matches, end - if j == len(needle)-1 { - return true - } - - // This element matches between the two slices, try the next one - outer++ - } - } - - return false -} - -func sprintfWithLimit(act string, v ...interface{}) string { - s := fmt.Sprintf(act, v...) - if len(s) > 10000 { - return s[:10000] + "...(output trimmed)" - } - return s -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - return "\n\nDiff:\n" + diff -} - -// GatherAndCompare compares the metrics of a Gatherers pair. -func GatherAndCompare(t *testing.T, g1, g2 prometheus.Gatherer, filter string) { - g1m, err := g1.Gather() - Ok(t, err) - g2m, err := g2.Gather() - Ok(t, err) - - var m1 *dto.MetricFamily - for _, m := range g1m { - if *m.Name == filter { - m1 = m - } - } - var m2 *dto.MetricFamily - for _, m := range g2m { - if *m.Name == filter { - m2 = m - } - } - Equals(t, m1.String(), m2.String()) -} - -// TolerantVerifyLeakMain verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeakMain(m *testing.M) { - goleak.VerifyTestMain(m, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - // https://github.com/baidubce/bce-sdk-go/blob/9a8c1139e6a3ad23080b9b8c51dec88df8ce3cda/util/log/logger.go#L359 - goleak.IgnoreTopFunction("github.com/baidubce/bce-sdk-go/util/log.NewLogger.func1"), - ) -} - -// TolerantVerifyLeak verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeak(t *testing.T) { - goleak.VerifyNone(t, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - // https://github.com/baidubce/bce-sdk-go/blob/9a8c1139e6a3ad23080b9b8c51dec88df8ce3cda/util/log/logger.go#L359 - goleak.IgnoreTopFunction("github.com/baidubce/bce-sdk-go/util/log.NewLogger.func1"), - ) -} - -// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. -func FaultOrPanicToErr(f func()) (err error) { - // Set this go routine to panic on segfault to allow asserting on those. - debug.SetPanicOnFault(true) - defer func() { - if r := recover(); r != nil { - err = errors.Errorf("invoked function panicked or caused segmentation fault: %v", r) - } - debug.SetPanicOnFault(false) - }() - - f() - - return err -} - -var indexFilename = "index" - -type indexWriterSeries struct { - labels labels.Labels - chunks []chunks.Meta // series file offset of chunks -} - -type indexWriterSeriesSlice []*indexWriterSeries - -// PutOutOfOrderIndex updates the index in blockDir with an index containing an out-of-order chunk -// copied from https://github.com/prometheus/prometheus/blob/b1ed4a0a663d0c62526312311c7529471abbc565/tsdb/index/index_test.go#L346 -func PutOutOfOrderIndex(blockDir string, minTime int64, maxTime int64) error { - - if minTime >= maxTime || minTime+4 >= maxTime { - return fmt.Errorf("minTime must be at least 4 less than maxTime to not create overlapping chunks") - } - - lbls := []labels.Labels{ - []labels.Label{ - {Name: "lbl1", Value: "1"}, - }, - } - - // Sort labels as the index writer expects series in sorted order. - sort.Sort(labels.Slice(lbls)) - - symbols := map[string]struct{}{} - for _, lset := range lbls { - for _, l := range lset { - symbols[l.Name] = struct{}{} - symbols[l.Value] = struct{}{} - } - } - - var input indexWriterSeriesSlice - - // Generate ChunkMetas for every label set. - for _, lset := range lbls { - var metas []chunks.Meta - // only need two chunks that are out-of-order - chk1 := chunks.Meta{ - MinTime: maxTime - 2, - MaxTime: maxTime - 1, - Ref: chunks.ChunkRef(rand.Uint64()), - Chunk: chunkenc.NewXORChunk(), - } - metas = append(metas, chk1) - chk2 := chunks.Meta{ - MinTime: minTime + 1, - MaxTime: minTime + 2, - Ref: chunks.ChunkRef(rand.Uint64()), - Chunk: chunkenc.NewXORChunk(), - } - metas = append(metas, chk2) - - input = append(input, &indexWriterSeries{ - labels: lset, - chunks: metas, - }) - } - - iw, err := index.NewWriter(context.Background(), filepath.Join(blockDir, indexFilename)) - if err != nil { - return err - } - - syms := []string{} - for s := range symbols { - syms = append(syms, s) - } - sort.Strings(syms) - for _, s := range syms { - if err := iw.AddSymbol(s); err != nil { - return err - } - } - - // Population procedure as done by compaction. - var ( - postings = index.NewMemPostings() - values = map[string]map[string]struct{}{} - ) - - for i, s := range input { - if err := iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...); err != nil { - return err - } - - for _, l := range s.labels { - valset, ok := values[l.Name] - if !ok { - valset = map[string]struct{}{} - values[l.Name] = valset - } - valset[l.Value] = struct{}{} - } - postings.Add(storage.SeriesRef(i), s.labels) - } - - return iw.Close() -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/tls/options.go b/vendor/github.com/thanos-io/thanos/pkg/tls/options.go index ba032c859c2..362f73740bb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tls/options.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tls/options.go @@ -17,8 +17,8 @@ import ( ) // NewServerConfig provides new server TLS configuration. -func NewServerConfig(logger log.Logger, cert, key, clientCA string) (*tls.Config, error) { - if key == "" && cert == "" { +func NewServerConfig(logger log.Logger, certPath, keyPath, clientCA string) (*tls.Config, error) { + if keyPath == "" && certPath == "" { if clientCA != "" { return nil, errors.New("when a client CA is used a server key and certificate must also be provided") } @@ -29,17 +29,23 @@ func NewServerConfig(logger log.Logger, cert, key, clientCA string) (*tls.Config level.Info(logger).Log("msg", "enabling server side TLS") - if key == "" || cert == "" { + if keyPath == "" || certPath == "" { return nil, errors.New("both server key and certificate must be provided") } tlsCfg := &tls.Config{ MinVersion: tls.VersionTLS13, } + // Certificate is loaded during server startup to check for any errors. + certificate, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, errors.Wrap(err, "server credentials") + } mngr := &serverTLSManager{ - srvCertPath: cert, - srvKeyPath: key, + srvCertPath: certPath, + srvKeyPath: keyPath, + srvCert: &certificate, } tlsCfg.GetCertificate = mngr.getCertificate diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/testutil.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/testutil.go index 5f4c081b54e..aa15553fc09 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/testutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/testutil.go @@ -6,7 +6,7 @@ package tracing import ( "testing" - "github.com/thanos-io/thanos/pkg/testutil" + "github.com/efficientgo/core/testutil" opentracing "github.com/opentracing/opentracing-go" "go.opentelemetry.io/otel/sdk/trace/tracetest" diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go index 6c498f3d482..4b3f6ad9f72 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go +++ b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go @@ -7,6 +7,7 @@ import ( bytes "bytes" context "context" fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -215,26 +216,28 @@ func init() { func init() { proto.RegisterFile("httpgrpc/httpgrpc.proto", fileDescriptor_6670c8e151665986) } var fileDescriptor_6670c8e151665986 = []byte{ - // 295 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0x28, 0x29, 0x29, - 0x48, 0x2f, 0x2a, 0x48, 0xd6, 0x87, 0x31, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x38, 0x60, - 0x7c, 0xa5, 0x72, 0x2e, 0x6e, 0x8f, 0x90, 0x90, 0x80, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, - 0x21, 0x31, 0x2e, 0xb6, 0xdc, 0xd4, 0x92, 0x8c, 0xfc, 0x14, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, - 0x20, 0x28, 0x4f, 0x48, 0x80, 0x8b, 0xb9, 0xb4, 0x28, 0x47, 0x82, 0x09, 0x2c, 0x08, 0x62, 0x0a, - 0x69, 0x71, 0xb1, 0x67, 0xa4, 0x26, 0xa6, 0xa4, 0x16, 0x15, 0x4b, 0x30, 0x2b, 0x30, 0x6b, 0x70, - 0x1b, 0x09, 0xe8, 0xc1, 0x2d, 0xf1, 0x00, 0x4b, 0x04, 0xc1, 0x14, 0x08, 0x09, 0x71, 0xb1, 0x24, - 0xe5, 0xa7, 0x54, 0x4a, 0xb0, 0x28, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x4a, 0x49, 0x5c, 0x3c, - 0x10, 0x8b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x41, 0x6a, 0x9c, 0xf3, 0x53, 0x52, 0xc1, 0xf6, - 0xb2, 0x06, 0x81, 0xd9, 0xc8, 0x76, 0x30, 0x11, 0x6b, 0x07, 0x33, 0x92, 0x1d, 0x46, 0x5c, 0x6c, - 0x10, 0x65, 0x20, 0xf7, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x05, 0x62, 0x82, 0x7c, 0x5a, 0x96, 0x98, - 0x53, 0x9a, 0x0a, 0x31, 0x9a, 0x33, 0x08, 0xca, 0x33, 0x72, 0xe4, 0x62, 0x01, 0xb9, 0x4b, 0xc8, - 0x92, 0x8b, 0xcd, 0x23, 0x31, 0x2f, 0x25, 0x27, 0x55, 0x48, 0x14, 0xc9, 0x52, 0x44, 0x50, 0x49, - 0x89, 0xa1, 0x0b, 0x43, 0x3c, 0xa2, 0xc4, 0xe0, 0x64, 0x77, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, - 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0x36, 0x3c, 0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, - 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, - 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, - 0x21, 0x0a, 0x1e, 0x27, 0x49, 0x6c, 0xe0, 0x48, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xd9, - 0x2e, 0xd7, 0x22, 0xbf, 0x01, 0x00, 0x00, + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x3f, 0x4f, 0xc2, 0x40, + 0x14, 0xc0, 0x7b, 0x14, 0xab, 0x1c, 0x0c, 0xe4, 0xa2, 0xd8, 0x30, 0x5c, 0x08, 0x89, 0x09, 0x71, + 0xa0, 0x09, 0x4e, 0x8e, 0xea, 0xc2, 0xa6, 0xb9, 0x30, 0xb9, 0xb5, 0xf4, 0xa5, 0x35, 0x50, 0x5e, + 0x6d, 0xaf, 0x10, 0x36, 0x3f, 0x82, 0x1f, 0xc3, 0x8f, 0xe2, 0xc8, 0xc8, 0x28, 0xe5, 0x0b, 0xf8, + 0x11, 0xcc, 0x5d, 0x29, 0x36, 0x4e, 0x6e, 0xbf, 0xf7, 0x27, 0xf7, 0x7b, 0xef, 0x1e, 0xbd, 0x0c, + 0xa5, 0x8c, 0x83, 0x24, 0x9e, 0x3a, 0x25, 0x0c, 0xe3, 0x04, 0x25, 0xb2, 0xb3, 0x32, 0xee, 0x9e, + 0x07, 0x18, 0xa0, 0x4e, 0x3a, 0x8a, 0x8a, 0x7a, 0x7f, 0x45, 0x9b, 0xe3, 0xc9, 0xe4, 0x49, 0xc0, + 0x6b, 0x06, 0xa9, 0x64, 0x1d, 0x6a, 0x45, 0x20, 0x43, 0xf4, 0x6d, 0xd2, 0x23, 0x83, 0x86, 0x38, + 0x44, 0xac, 0x4d, 0xcd, 0x2c, 0x99, 0xdb, 0x35, 0x9d, 0x54, 0xc8, 0xae, 0xe9, 0x69, 0x08, 0xae, + 0x0f, 0x49, 0x6a, 0x9b, 0x3d, 0x73, 0xd0, 0x1c, 0xb5, 0x87, 0x47, 0xf5, 0x58, 0x17, 0x44, 0xd9, + 0xc0, 0x18, 0xad, 0x7b, 0xe8, 0xaf, 0xed, 0x7a, 0x8f, 0x0c, 0x5a, 0x42, 0x73, 0xdf, 0xa3, 0xad, + 0x42, 0x9c, 0xc6, 0xb8, 0x48, 0x41, 0xf5, 0x3c, 0xa0, 0x0f, 0xda, 0x7b, 0x22, 0x34, 0x57, 0x1d, + 0xb5, 0xff, 0x3a, 0xcc, 0x8a, 0x63, 0x44, 0xad, 0xa2, 0x4d, 0xcd, 0x3f, 0x83, 0xf5, 0x61, 0x29, + 0x85, 0x6a, 0xd3, 0xa5, 0x3b, 0xcf, 0xa0, 0x78, 0xba, 0x21, 0x0e, 0xd1, 0xe8, 0x8e, 0xd6, 0xd5, + 0x5c, 0xec, 0x96, 0x5a, 0x63, 0x77, 0xe1, 0xcf, 0x81, 0x5d, 0x54, 0xa4, 0xbf, 0x5f, 0xd5, 0xed, + 0xfc, 0x4d, 0x17, 0x8b, 0xf4, 0x8d, 0xfb, 0xc7, 0xed, 0x8e, 0x1b, 0xdf, 0x3b, 0x4e, 0xde, 0x72, + 0x4e, 0x3e, 0x72, 0x4e, 0x3e, 0x73, 0x4e, 0x36, 0x39, 0x27, 0x5f, 0x39, 0x27, 0xef, 0x7b, 0x6e, + 0x6c, 0xf6, 0xdc, 0xd8, 0xee, 0xb9, 0xf1, 0x7c, 0x15, 0xbc, 0xc8, 0x30, 0xf3, 0x86, 0x53, 0x8c, + 0x9c, 0x15, 0xb8, 0x4b, 0x58, 0x61, 0x32, 0x4b, 0x9d, 0x29, 0x46, 0x11, 0x2e, 0x8e, 0xa7, 0xf4, + 0x2c, 0x7d, 0xab, 0x9b, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xb5, 0xfe, 0x5d, 0xe6, 0x01, + 0x00, 0x00, } func (this *HTTPRequest) Equal(that interface{}) bool { diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto index c9c267486d1..0313dbb625c 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto +++ b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto @@ -2,7 +2,16 @@ syntax = "proto3"; package httpgrpc; -option go_package = "httpgrpc"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.gostring_all) = true; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.goproto_unkeyed_all) = false; +option (gogoproto.goproto_unrecognized_all) = false; +option (gogoproto.goproto_sizecache_all) = false; +option go_package = "github.com/weaveworks/common/httpgrpc"; service HTTP { rpc Handle(HTTPRequest) returns (HTTPResponse) {}; diff --git a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go index d478e18a439..f84521f0190 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go +++ b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go @@ -17,7 +17,6 @@ import ( "github.com/sercand/kuberesolver" "golang.org/x/net/context" "google.golang.org/grpc" - "google.golang.org/grpc/balancer/roundrobin" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/logging" @@ -132,9 +131,10 @@ func NewClient(address string) (*Client, error) { if err != nil { return nil, err } + const grpcServiceConfig = `{"loadBalancingPolicy":"round_robin"}` dialOptions := []grpc.DialOption{ - grpc.WithBalancerName(roundrobin.Name), + grpc.WithDefaultServiceConfig(grpcServiceConfig), grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient( otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), diff --git a/vendor/github.com/weaveworks/common/httpgrpc/tools.go b/vendor/github.com/weaveworks/common/httpgrpc/tools.go new file mode 100644 index 00000000000..866af4b6975 --- /dev/null +++ b/vendor/github.com/weaveworks/common/httpgrpc/tools.go @@ -0,0 +1,6 @@ +package httpgrpc + +import ( + // This is a workaround for go mod which fails to download gogoproto otherwise + _ "github.com/gogo/protobuf/gogoproto" +) diff --git a/vendor/github.com/weaveworks/common/instrument/instrument.go b/vendor/github.com/weaveworks/common/instrument/instrument.go index 1a1352c842c..07aa033c0c9 100644 --- a/vendor/github.com/weaveworks/common/instrument/instrument.go +++ b/vendor/github.com/weaveworks/common/instrument/instrument.go @@ -68,16 +68,9 @@ func (c *HistogramCollector) After(ctx context.Context, method, statusCode strin // (this will always work for a HistogramVec). func ObserveWithExemplar(ctx context.Context, histogram prometheus.Observer, seconds float64) { if traceID, ok := tracing.ExtractSampledTraceID(ctx); ok { - lbls := prometheus.Labels{"traceID": traceID} - if userID, err := user.ExtractUserID(ctx); err == nil { - lbls["user"] = userID - } - if orgID, err := user.ExtractOrgID(ctx); err == nil { - lbls["organization"] = orgID - } histogram.(prometheus.ExemplarObserver).ObserveWithExemplar( seconds, - lbls, + prometheus.Labels{"traceID": traceID}, ) return } diff --git a/vendor/github.com/weaveworks/common/middleware/logging.go b/vendor/github.com/weaveworks/common/middleware/logging.go index 015cc3b585f..780c72be56d 100644 --- a/vendor/github.com/weaveworks/common/middleware/logging.go +++ b/vendor/github.com/weaveworks/common/middleware/logging.go @@ -54,7 +54,7 @@ func (l Log) Wrap(next http.Handler) http.Handler { wrapped := newBadResponseLoggingWriter(w, &buf) next.ServeHTTP(wrapped, r) - statusCode, writeErr := wrapped.statusCode, wrapped.writeError + statusCode, writeErr := wrapped.getStatusCode(), wrapped.getWriteError() if writeErr != nil { if errors.Is(writeErr, context.Canceled) { diff --git a/vendor/github.com/weaveworks/common/middleware/response.go b/vendor/github.com/weaveworks/common/middleware/response.go index 0192c182fb2..61fc4a722dd 100644 --- a/vendor/github.com/weaveworks/common/middleware/response.go +++ b/vendor/github.com/weaveworks/common/middleware/response.go @@ -12,9 +12,15 @@ const ( maxResponseBodyInLogs = 4096 // At most 4k bytes from response bodies in our logs. ) -// badResponseLoggingWriter writes the body of "bad" responses (i.e. 5xx +type badResponseLoggingWriter interface { + http.ResponseWriter + getStatusCode() int + getWriteError() error +} + +// nonFlushingBadResponseLoggingWriter writes the body of "bad" responses (i.e. 5xx // responses) to a buffer. -type badResponseLoggingWriter struct { +type nonFlushingBadResponseLoggingWriter struct { rw http.ResponseWriter buffer io.Writer logBody bool @@ -23,27 +29,39 @@ type badResponseLoggingWriter struct { writeError error // The error returned when downstream Write() fails. } -// newBadResponseLoggingWriter makes a new badResponseLoggingWriter. -func newBadResponseLoggingWriter(rw http.ResponseWriter, buffer io.Writer) *badResponseLoggingWriter { - return &badResponseLoggingWriter{ +// flushingBadResponseLoggingWriter is a badResponseLoggingWriter that +// implements http.Flusher. +type flushingBadResponseLoggingWriter struct { + nonFlushingBadResponseLoggingWriter + f http.Flusher +} + +func newBadResponseLoggingWriter(rw http.ResponseWriter, buffer io.Writer) badResponseLoggingWriter { + b := nonFlushingBadResponseLoggingWriter{ rw: rw, buffer: buffer, logBody: false, bodyBytesLeft: maxResponseBodyInLogs, statusCode: http.StatusOK, } + + if f, ok := rw.(http.Flusher); ok { + return &flushingBadResponseLoggingWriter{b, f} + } + + return &b } // Header returns the header map that will be sent by WriteHeader. // Implements ResponseWriter. -func (b *badResponseLoggingWriter) Header() http.Header { +func (b *nonFlushingBadResponseLoggingWriter) Header() http.Header { return b.rw.Header() } // Write writes HTTP response data. -func (b *badResponseLoggingWriter) Write(data []byte) (int, error) { +func (b *nonFlushingBadResponseLoggingWriter) Write(data []byte) (int, error) { if b.statusCode == 0 { - // WriteHeader has (probably) not been called, so we need to call it with StatusOK to fuflil the interface contract. + // WriteHeader has (probably) not been called, so we need to call it with StatusOK to fulfill the interface contract. // https://godoc.org/net/http#ResponseWriter b.WriteHeader(http.StatusOK) } @@ -58,7 +76,7 @@ func (b *badResponseLoggingWriter) Write(data []byte) (int, error) { } // WriteHeader writes the HTTP response header. -func (b *badResponseLoggingWriter) WriteHeader(statusCode int) { +func (b *nonFlushingBadResponseLoggingWriter) WriteHeader(statusCode int) { b.statusCode = statusCode if statusCode >= 500 { b.logBody = true @@ -67,7 +85,7 @@ func (b *badResponseLoggingWriter) WriteHeader(statusCode int) { } // Hijack hijacks the first response writer that is a Hijacker. -func (b *badResponseLoggingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { +func (b *nonFlushingBadResponseLoggingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { hj, ok := b.rw.(http.Hijacker) if ok { return hj.Hijack() @@ -75,7 +93,15 @@ func (b *badResponseLoggingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) return nil, nil, fmt.Errorf("badResponseLoggingWriter: can't cast underlying response writer to Hijacker") } -func (b *badResponseLoggingWriter) captureResponseBody(data []byte) { +func (b *nonFlushingBadResponseLoggingWriter) getStatusCode() int { + return b.statusCode +} + +func (b *nonFlushingBadResponseLoggingWriter) getWriteError() error { + return b.writeError +} + +func (b *nonFlushingBadResponseLoggingWriter) captureResponseBody(data []byte) { if len(data) > b.bodyBytesLeft { b.buffer.Write(data[:b.bodyBytesLeft]) io.WriteString(b.buffer, "...") @@ -86,3 +112,7 @@ func (b *badResponseLoggingWriter) captureResponseBody(data []byte) { b.bodyBytesLeft -= len(data) } } + +func (b *flushingBadResponseLoggingWriter) Flush() { + b.f.Flush() +} diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go index a956665b0ea..86a2b7ce1a3 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/weaveworks/common/server/server.go @@ -70,6 +70,8 @@ type Config struct { GRPCListenPort int `yaml:"grpc_listen_port"` GRPCConnLimit int `yaml:"grpc_listen_conn_limit"` + CipherSuites string `yaml:"tls_cipher_suites"` + MinVersion string `yaml:"tls_min_version"` HTTPTLSConfig TLSConfig `yaml:"http_tls_config"` GRPCTLSConfig TLSConfig `yaml:"grpc_tls_config"` @@ -124,6 +126,8 @@ var infinty = time.Duration(math.MaxInt64) func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.HTTPListenAddress, "server.http-listen-address", "", "HTTP server listen address.") f.StringVar(&cfg.HTTPListenNetwork, "server.http-listen-network", DefaultNetwork, "HTTP server listen network, default tcp") + f.StringVar(&cfg.CipherSuites, "server.tls-cipher-suites", "", "Comma-separated list of cipher suites to use. If blank, the default Go cipher suites is used.") + f.StringVar(&cfg.MinVersion, "server.tls-min-version", "", "Minimum TLS version to use. Allowed values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. If blank, the Go TLS minimum version is used.") f.StringVar(&cfg.HTTPTLSConfig.TLSCertPath, "server.http-tls-cert-path", "", "HTTP server cert path.") f.StringVar(&cfg.HTTPTLSConfig.TLSKeyPath, "server.http-tls-key-path", "", "HTTP server key path.") f.StringVar(&cfg.HTTPTLSConfig.ClientAuth, "server.http-tls-client-auth", "", "HTTP TLS Client Auth type.") @@ -181,19 +185,36 @@ type Server struct { // New makes a new Server func New(cfg Config) (*Server, error) { + // If user doesn't supply a logging implementation, by default instantiate + // logrus. + log := cfg.Log + if log == nil { + log = logging.NewLogrus(cfg.LogLevel) + } + + // If user doesn't supply a registerer/gatherer, use Prometheus' by default. + reg := cfg.Registerer + if reg == nil { + reg = prometheus.DefaultRegisterer + } + gatherer := cfg.Gatherer + if gatherer == nil { + gatherer = prometheus.DefaultGatherer + } + tcpConnections := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: cfg.MetricsNamespace, Name: "tcp_connections", Help: "Current number of accepted TCP connections.", }, []string{"protocol"}) - prometheus.MustRegister(tcpConnections) + reg.MustRegister(tcpConnections) tcpConnectionsLimit := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: cfg.MetricsNamespace, Name: "tcp_connections_limit", Help: "The max number of TCP connections that can be accepted (0 means no limit).", }, []string{"protocol"}) - prometheus.MustRegister(tcpConnectionsLimit) + reg.MustRegister(tcpConnectionsLimit) network := cfg.HTTPListenNetwork if network == "" { @@ -226,32 +247,26 @@ func New(cfg Config) (*Server, error) { grpcListener = netutil.LimitListener(grpcListener, cfg.GRPCConnLimit) } - // If user doesn't supply a logging implementation, by default instantiate - // logrus. - log := cfg.Log - if log == nil { - log = logging.NewLogrus(cfg.LogLevel) - } - - // If user doesn't supply a registerer/gatherer, use Prometheus' by default. - reg := cfg.Registerer - if reg == nil { - reg = prometheus.DefaultRegisterer + cipherSuites, err := stringToCipherSuites(cfg.CipherSuites) + if err != nil { + return nil, err } - gatherer := cfg.Gatherer - if gatherer == nil { - gatherer = prometheus.DefaultGatherer + minVersion, err := stringToTLSVersion(cfg.MinVersion) + if err != nil { + return nil, err } // Setup TLS var httpTLSConfig *tls.Config if len(cfg.HTTPTLSConfig.TLSCertPath) > 0 && len(cfg.HTTPTLSConfig.TLSKeyPath) > 0 { // Note: ConfigToTLSConfig from prometheus/exporter-toolkit is awaiting security review. - httpTLSConfig, err = web.ConfigToTLSConfig(&web.TLSStruct{ - TLSCertPath: cfg.HTTPTLSConfig.TLSCertPath, - TLSKeyPath: cfg.HTTPTLSConfig.TLSKeyPath, - ClientAuth: cfg.HTTPTLSConfig.ClientAuth, - ClientCAs: cfg.HTTPTLSConfig.ClientCAs, + httpTLSConfig, err = web.ConfigToTLSConfig(&web.TLSConfig{ + TLSCertPath: cfg.HTTPTLSConfig.TLSCertPath, + TLSKeyPath: cfg.HTTPTLSConfig.TLSKeyPath, + ClientAuth: cfg.HTTPTLSConfig.ClientAuth, + ClientCAs: cfg.HTTPTLSConfig.ClientCAs, + CipherSuites: cipherSuites, + MinVersion: minVersion, }) if err != nil { return nil, fmt.Errorf("error generating http tls config: %v", err) @@ -260,11 +275,13 @@ func New(cfg Config) (*Server, error) { var grpcTLSConfig *tls.Config if len(cfg.GRPCTLSConfig.TLSCertPath) > 0 && len(cfg.GRPCTLSConfig.TLSKeyPath) > 0 { // Note: ConfigToTLSConfig from prometheus/exporter-toolkit is awaiting security review. - grpcTLSConfig, err = web.ConfigToTLSConfig(&web.TLSStruct{ - TLSCertPath: cfg.GRPCTLSConfig.TLSCertPath, - TLSKeyPath: cfg.GRPCTLSConfig.TLSKeyPath, - ClientAuth: cfg.GRPCTLSConfig.ClientAuth, - ClientCAs: cfg.GRPCTLSConfig.ClientCAs, + grpcTLSConfig, err = web.ConfigToTLSConfig(&web.TLSConfig{ + TLSCertPath: cfg.GRPCTLSConfig.TLSCertPath, + TLSKeyPath: cfg.GRPCTLSConfig.TLSKeyPath, + ClientAuth: cfg.GRPCTLSConfig.ClientAuth, + ClientCAs: cfg.GRPCTLSConfig.ClientCAs, + CipherSuites: cipherSuites, + MinVersion: minVersion, }) if err != nil { return nil, fmt.Errorf("error generating grpc tls config: %v", err) @@ -500,6 +517,17 @@ func (s *Server) Run() error { return <-errChan } +// HTTPListenAddr exposes `net.Addr` that `Server` is listening to for HTTP connections. +func (s *Server) HTTPListenAddr() net.Addr { + return s.httpListener.Addr() + +} + +// GRPCListenAddr exposes `net.Addr` that `Server` is listening to for GRPC connections. +func (s *Server) GRPCListenAddr() net.Addr { + return s.grpcListener.Addr() +} + // Stop unblocks Run(). func (s *Server) Stop() { s.handler.Stop() diff --git a/vendor/github.com/weaveworks/common/server/tls_config.go b/vendor/github.com/weaveworks/common/server/tls_config.go new file mode 100644 index 00000000000..b374a67c41b --- /dev/null +++ b/vendor/github.com/weaveworks/common/server/tls_config.go @@ -0,0 +1,55 @@ +package server + +import ( + "crypto/tls" + fmt "fmt" + "strings" + + "github.com/prometheus/exporter-toolkit/web" +) + +// Collect all cipher suite names and IDs recognized by Go, including insecure ones. +func allCiphers() map[string]web.Cipher { + acceptedCiphers := make(map[string]web.Cipher) + for _, suite := range tls.CipherSuites() { + acceptedCiphers[suite.Name] = web.Cipher(suite.ID) + } + for _, suite := range tls.InsecureCipherSuites() { + acceptedCiphers[suite.Name] = web.Cipher(suite.ID) + } + return acceptedCiphers +} + +func stringToCipherSuites(s string) ([]web.Cipher, error) { + if s == "" { + return nil, nil + } + ciphersSlice := []web.Cipher{} + possibleCiphers := allCiphers() + for _, cipher := range strings.Split(s, ",") { + intValue, ok := possibleCiphers[cipher] + if !ok { + return nil, fmt.Errorf("cipher suite %q not recognized", cipher) + } + ciphersSlice = append(ciphersSlice, intValue) + } + return ciphersSlice, nil +} + +// Using the same names that Kubernetes does +var tlsVersions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +func stringToTLSVersion(s string) (web.TLSVersion, error) { + if s == "" { + return 0, nil + } + if version, ok := tlsVersions[s]; ok { + return web.TLSVersion(version), nil + } + return 0, fmt.Errorf("TLS version %q not recognized", s) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go index 95ffc1078da..a0d81858261 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -27,7 +27,7 @@ type Zeroer interface { // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D = primitive.D // E represents a BSON element for a D. It is usually used inside a D. @@ -39,12 +39,12 @@ type E = primitive.E // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M = primitive.M // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index b0ae0e23ff2..5f903ebea6c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -17,7 +17,7 @@ // 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for // retrieving them. // -// ValueEncoders and ValueDecoders +// # ValueEncoders and ValueDecoders // // The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. // The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the @@ -31,7 +31,7 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// Registry and RegistryBuilder +// # Registry and RegistryBuilder // // A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type // documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a @@ -53,15 +53,15 @@ // values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would // change the behavior so these values decode as Go int instances instead: // -// intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// intType := reflect.TypeOf(int(0)) +// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // // 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder // methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the // registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. // These methods should be used to change the behavior for all values for a specific kind. // -// Registry Lookup Procedure +// # Registry Lookup Procedure // // When looking up an encoder in a Registry, the precedence rules are as follows: // @@ -79,7 +79,7 @@ // rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is // found. // -// DefaultValueEncoders and DefaultValueDecoders +// # DefaultValueEncoders and DefaultValueDecoders // // The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and // ValueDecoders for handling a wide range of Go types, including all of the types within the diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index f6f3800d404..80644023c24 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -254,6 +254,7 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON // documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents // to decode to bson.Raw, use the following code: +// // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { rb.typeMap[bt] = rt diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 6f406c16232..62708c5c745 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -34,21 +34,21 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // // The properties are defined below: // -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. // -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. // -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. // -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. // -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. // // TODO(skriptble): Add tags for undefined as nil and for null as nil. type StructTags struct { @@ -67,20 +67,20 @@ type StructTags struct { // If there is no name in the struct tag fields, the struct field name is lowercased. // The tag formats accepted are: // -// "[][,[,]]" +// "[][,[,]]" // -// `(...) bson:"[][,[,]]" (...)` +// `(...) bson:"[][,[,]]" (...)` // // An example: // -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } // // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 5e3825a2312..0134006d8ea 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -9,21 +9,22 @@ // The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description // of the codec system and examples of registering custom codecs, see the bsoncodec package. // -// Raw BSON +// # Raw BSON // // The Raw family of types is used to validate and retrieve elements from a slice of bytes. This // type is most useful when you want do lookups on BSON bytes without unmarshaling it into another // type. // // Example: -// var raw bson.Raw = ... // bytes from somewhere -// err := raw.Validate() -// if err != nil { return err } -// val := raw.Lookup("foo") -// i32, ok := val.Int32OK() -// // do something with i32... // -// Native Go Types +// var raw bson.Raw = ... // bytes from somewhere +// err := raw.Validate() +// if err != nil { return err } +// val := raw.Lookup("foo") +// i32, ok := val.Int32OK() +// // do something with i32... +// +// # Native Go Types // // The D and M types defined in this package can be used to build representations of BSON using native Go types. D is a // slice and M is a map. For more information about the use cases for these types, see the documentation on the type @@ -32,63 +33,64 @@ // Note that a D should not be constructed with duplicate key names, as that can cause undefined server behavior. // // Example: -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // // When decoding BSON to a D or M, the following type mappings apply when unmarshalling: // -// 1. BSON int32 unmarshals to an int32. -// 2. BSON int64 unmarshals to an int64. -// 3. BSON double unmarshals to a float64. -// 4. BSON string unmarshals to a string. -// 5. BSON boolean unmarshals to a bool. -// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). -// 7. BSON array unmarshals to a bson.A. -// 8. BSON ObjectId unmarshals to a primitive.ObjectID. -// 9. BSON datetime unmarshals to a primitive.DateTime. -// 10. BSON binary unmarshals to a primitive.Binary. -// 11. BSON regular expression unmarshals to a primitive.Regex. -// 12. BSON JavaScript unmarshals to a primitive.JavaScript. -// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. -// 14. BSON timestamp unmarshals to an primitive.Timestamp. -// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. -// 16. BSON min key unmarshals to an primitive.MinKey. -// 17. BSON max key unmarshals to an primitive.MaxKey. -// 18. BSON undefined unmarshals to a primitive.Undefined. -// 19. BSON null unmarshals to nil. -// 20. BSON DBPointer unmarshals to a primitive.DBPointer. -// 21. BSON symbol unmarshals to a primitive.Symbol. +// 1. BSON int32 unmarshals to an int32. +// 2. BSON int64 unmarshals to an int64. +// 3. BSON double unmarshals to a float64. +// 4. BSON string unmarshals to a string. +// 5. BSON boolean unmarshals to a bool. +// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). +// 7. BSON array unmarshals to a bson.A. +// 8. BSON ObjectId unmarshals to a primitive.ObjectID. +// 9. BSON datetime unmarshals to a primitive.DateTime. +// 10. BSON binary unmarshals to a primitive.Binary. +// 11. BSON regular expression unmarshals to a primitive.Regex. +// 12. BSON JavaScript unmarshals to a primitive.JavaScript. +// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. +// 14. BSON timestamp unmarshals to an primitive.Timestamp. +// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. +// 16. BSON min key unmarshals to an primitive.MinKey. +// 17. BSON max key unmarshals to an primitive.MaxKey. +// 18. BSON undefined unmarshals to a primitive.Undefined. +// 19. BSON null unmarshals to nil. +// 20. BSON DBPointer unmarshals to a primitive.DBPointer. +// 21. BSON symbol unmarshals to a primitive.Symbol. // // The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: // -// 1. time.Time marshals to a BSON datetime. -// 2. int8, int16, and int32 marshal to a BSON int32. -// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 -// otherwise. -// 4. int64 marshals to BSON int64. -// 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or -// undefined value into a string will yield the empty string.). +// 1. time.Time marshals to a BSON datetime. +// 2. int8, int16, and int32 marshal to a BSON int32. +// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 +// otherwise. +// 4. int64 marshals to BSON int64. +// 5. uint8 and uint16 marshal to a BSON int32. +// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, +// inclusive, and BSON int64 otherwise. +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// undefined value into a string will yield the empty string.). // -// Structs +// # Structs // // Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshalled or unmarshalled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is // marshalled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents // unmarshalled into an interface{} field will be unmarshalled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. @@ -98,13 +100,14 @@ // are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: -// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) +// +// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. // The name may be empty in order to specify options without overriding the default field name. The following options can be used // to configure behavior: // -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to // the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if // their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). // Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered @@ -113,16 +116,16 @@ // never considered empty and will be marshalled as embedded documents. // NOTE: It is recommended that this tag be used for all slice and map fields. // -// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of +// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of // the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other // types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled // into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, // it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be // decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // -// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when +// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when // marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be // pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a // map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be @@ -132,7 +135,7 @@ // This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be // marshalled. For fields that are not maps or structs, this tag is ignored. // -// Marshalling and Unmarshalling +// # Marshalling and Unmarshalling // // Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index ffe4eed07ae..ba7c9112e9b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -191,10 +191,9 @@ func (d Decimal128) IsNaN() bool { // IsInf returns: // -// +1 d == Infinity -// 0 other case -// -1 d == -Infinity -// +// +1 d == Infinity +// 0 other case +// -1 d == -Infinity func (d Decimal128) IsInf() int { if d.h>>58&(1<<5-1) != 0x1E { return 0 diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index 652898fea75..ded3673165d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -61,7 +61,9 @@ func (id ObjectID) Timestamp() time.Time { // Hex returns the hex encoding of the ObjectID as a string. func (id ObjectID) Hex() string { - return hex.EncodeToString(id[:]) + var buf [24]byte + hex.Encode(buf[:], id[:]) + return string(buf[:]) } func (id ObjectID) String() string { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index b3cba1bf9df..c72ccc1c4d4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -182,7 +182,7 @@ type MaxKey struct{} // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D []E // Map creates a map from the elements of the D. @@ -206,12 +206,12 @@ type E struct { // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M map[string]interface{} // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A []interface{} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go index 6c858a01099..e35bd0cd9ad 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go @@ -96,8 +96,8 @@ func (ds *DocumentSequence) Empty() bool { } } -//ResetIterator resets the iteration point for the Next method to the beginning of the document -//sequence. +// ResetIterator resets the iteration point for the Next method to the beginning of the document +// sequence. func (ds *DocumentSequence) ResetIterator() { if ds == nil { return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 05c1e08f161..6ef983af4cb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.36.0" + return "0.36.4" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go index 370715f694c..5c8260ceb6f 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go @@ -35,7 +35,8 @@ type InstrumentProvider interface { // Counter is an instrument that records increasing values. type Counter interface { - // Observe records the state of the instrument. + // Observe records the state of the instrument to be x. The value of x is + // assumed to be the exact Counter value to record. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -47,7 +48,8 @@ type Counter interface { // UpDownCounter is an instrument that records increasing or decreasing values. type UpDownCounter interface { - // Observe records the state of the instrument. + // Observe records the state of the instrument to be x. The value of x is + // assumed to be the exact UpDownCounter value to record. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -59,7 +61,7 @@ type UpDownCounter interface { // Gauge is an instrument that records independent readings. type Gauge interface { - // Observe records the state of the instrument. + // Observe records the state of the instrument to be x. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go index 41a561bc4a2..b07409c7931 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go @@ -35,7 +35,8 @@ type InstrumentProvider interface { // Counter is an instrument that records increasing values. type Counter interface { - // Observe records the state of the instrument. + // Observe records the state of the instrument to be x. The value of x is + // assumed to be the exact Counter value to record. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -47,7 +48,8 @@ type Counter interface { // UpDownCounter is an instrument that records increasing or decreasing values. type UpDownCounter interface { - // Observe records the state of the instrument. + // Observe records the state of the instrument to be x. The value of x is + // assumed to be the exact UpDownCounter value to record. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -59,7 +61,7 @@ type UpDownCounter interface { // Gauge is an instrument that records independent readings. type Gauge interface { - // Observe records the state of the instrument. + // Observe records the state of the instrument to be x. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go index 484ca51b715..96f4a1a56ec 100644 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -117,7 +117,7 @@ func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) } ps := decrypted[len(decrypted)-psLen:] decrypted = decrypted[:len(decrypted)-psLen] - if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + if !bytes.Equal(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) { return nil, ErrDecryption } diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 00000000000..2c033dff47e --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 00000000000..0c756c46cc8 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,248 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 00000000000..c22e74bd102 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,127 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStable sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // search returns the leftmost position where f returns true, or len(x) if f + // returns false for all x. This is the insertion position for target in x, + // and could point to an element that's either == target or not. + pos := search(len(x), func(i int) bool { return x[i] >= target }) + if pos >= len(x) || x[pos] != target { + return pos, false + } else { + return pos, true + } +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { + pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) + if pos >= len(x) || cmp(x[pos], target) != 0 { + return pos, false + } else { + return pos, true + } +} + +func search(n int, f func(int) bool) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !f(h) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 00000000000..2a632476c50 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 00000000000..efaa1c8b714 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index dddf6514458..b3e7bc85ce5 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -40,9 +40,10 @@ // Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure // OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc // -// For OIDC providers, the library can retrieve OIDC tokens either from a -// local file location (file-sourced credentials) or from a local server -// (URL-sourced credentials). +// For OIDC and SAML providers, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). // For file-sourced credentials, a background process needs to be continuously // refreshing the file location with a new OIDC token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file @@ -50,6 +51,11 @@ // For URL-sourced credentials, a local server needs to host a GET endpoint to // return the OIDC token. The response can be in plain text or JSON. // Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/using-workload-identity-federation#oidc // // # Credentials // diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 2bf5391a1de..9fc35535e7f 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -74,12 +74,14 @@ var ( regexp.MustCompile(`(?i)^sts\.googleapis\.com$`), regexp.MustCompile(`(?i)^sts\.[^\.\s\/\\]+\.googleapis\.com$`), regexp.MustCompile(`(?i)^[^\.\s\/\\]+-sts\.googleapis\.com$`), + regexp.MustCompile(`(?i)^sts-[^\.\s\/\\]+\.p\.googleapis\.com$`), } validImpersonateURLPatterns = []*regexp.Regexp{ regexp.MustCompile(`^[^\.\s\/\\]+\.iamcredentials\.googleapis\.com$`), regexp.MustCompile(`^iamcredentials\.googleapis\.com$`), regexp.MustCompile(`^iamcredentials\.[^\.\s\/\\]+\.googleapis\.com$`), regexp.MustCompile(`^[^\.\s\/\\]+-iamcredentials\.googleapis\.com$`), + regexp.MustCompile(`^iamcredentials-[^\.\s\/\\]+\.p\.googleapis\.com$`), } validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) diff --git a/vendor/golang.org/x/text/cases/cases.go b/vendor/golang.org/x/text/cases/cases.go new file mode 100644 index 00000000000..752cdf03167 --- /dev/null +++ b/vendor/golang.org/x/text/cases/cases.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_trieval.go + +// Package cases provides general and language-specific case mappers. +package cases // import "golang.org/x/text/cases" + +import ( + "golang.org/x/text/language" + "golang.org/x/text/transform" +) + +// References: +// - Unicode Reference Manual Chapter 3.13, 4.2, and 5.18. +// - https://www.unicode.org/reports/tr29/ +// - https://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt +// - https://www.unicode.org/Public/6.3.0/ucd/SpecialCasing.txt +// - https://www.unicode.org/Public/6.3.0/ucd/DerivedCoreProperties.txt +// - https://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt +// - https://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakTest.txt +// - http://userguide.icu-project.org/transforms/casemappings + +// TODO: +// - Case folding +// - Wide and Narrow? +// - Segmenter option for title casing. +// - ASCII fast paths +// - Encode Soft-Dotted property within trie somehow. + +// A Caser transforms given input to a certain case. It implements +// transform.Transformer. +// +// A Caser may be stateful and should therefore not be shared between +// goroutines. +type Caser struct { + t transform.SpanningTransformer +} + +// Bytes returns a new byte slice with the result of converting b to the case +// form implemented by c. +func (c Caser) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(c.t, b) + return b +} + +// String returns a string with the result of transforming s to the case form +// implemented by c. +func (c Caser) String(s string) string { + s, _, _ = transform.String(c.t, s) + return s +} + +// Reset resets the Caser to be reused for new input after a previous call to +// Transform. +func (c Caser) Reset() { c.t.Reset() } + +// Transform implements the transform.Transformer interface and transforms the +// given input to the case form implemented by c. +func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return c.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (c Caser) Span(src []byte, atEOF bool) (n int, err error) { + return c.t.Span(src, atEOF) +} + +// Upper returns a Caser for language-specific uppercasing. +func Upper(t language.Tag, opts ...Option) Caser { + return Caser{makeUpper(t, getOpts(opts...))} +} + +// Lower returns a Caser for language-specific lowercasing. +func Lower(t language.Tag, opts ...Option) Caser { + return Caser{makeLower(t, getOpts(opts...))} +} + +// Title returns a Caser for language-specific title casing. It uses an +// approximation of the default Unicode Word Break algorithm. +func Title(t language.Tag, opts ...Option) Caser { + return Caser{makeTitle(t, getOpts(opts...))} +} + +// Fold returns a Caser that implements Unicode case folding. The returned Caser +// is stateless and safe to use concurrently by multiple goroutines. +// +// Case folding does not normalize the input and may not preserve a normal form. +// Use the collate or search package for more convenient and linguistically +// sound comparisons. Use golang.org/x/text/secure/precis for string comparisons +// where security aspects are a concern. +func Fold(opts ...Option) Caser { + return Caser{makeFold(getOpts(opts...))} +} + +// An Option is used to modify the behavior of a Caser. +type Option func(o options) options + +// TODO: consider these options to take a boolean as well, like FinalSigma. +// The advantage of using this approach is that other providers of a lower-case +// algorithm could set different defaults by prefixing a user-provided slice +// of options with their own. This is handy, for instance, for the precis +// package which would override the default to not handle the Greek final sigma. + +var ( + // NoLower disables the lowercasing of non-leading letters for a title + // caser. + NoLower Option = noLower + + // Compact omits mappings in case folding for characters that would grow the + // input. (Unimplemented.) + Compact Option = compact +) + +// TODO: option to preserve a normal form, if applicable? + +type options struct { + noLower bool + simple bool + + // TODO: segmenter, max ignorable, alternative versions, etc. + + ignoreFinalSigma bool +} + +func getOpts(o ...Option) (res options) { + for _, f := range o { + res = f(res) + } + return +} + +func noLower(o options) options { + o.noLower = true + return o +} + +func compact(o options) options { + o.simple = true + return o +} + +// HandleFinalSigma specifies whether the special handling of Greek final sigma +// should be enabled. Unicode prescribes handling the Greek final sigma for all +// locales, but standards like IDNA and PRECIS override this default. +func HandleFinalSigma(enable bool) Option { + if enable { + return handleFinalSigma + } + return ignoreFinalSigma +} + +func ignoreFinalSigma(o options) options { + o.ignoreFinalSigma = true + return o +} + +func handleFinalSigma(o options) options { + o.ignoreFinalSigma = false + return o +} diff --git a/vendor/golang.org/x/text/cases/context.go b/vendor/golang.org/x/text/cases/context.go new file mode 100644 index 00000000000..e9aa9e1936f --- /dev/null +++ b/vendor/golang.org/x/text/cases/context.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +// A context is used for iterating over source bytes, fetching case info and +// writing to a destination buffer. +// +// Casing operations may need more than one rune of context to decide how a rune +// should be cased. Casing implementations should call checkpoint on context +// whenever it is known to be safe to return the runes processed so far. +// +// It is recommended for implementations to not allow for more than 30 case +// ignorables as lookahead (analogous to the limit in norm) and to use state if +// unbounded lookahead is needed for cased runes. +type context struct { + dst, src []byte + atEOF bool + + pDst int // pDst points past the last written rune in dst. + pSrc int // pSrc points to the start of the currently scanned rune. + + // checkpoints safe to return in Transform, where nDst <= pDst and nSrc <= pSrc. + nDst, nSrc int + err error + + sz int // size of current rune + info info // case information of currently scanned rune + + // State preserved across calls to Transform. + isMidWord bool // false if next cased letter needs to be title-cased. +} + +func (c *context) Reset() { + c.isMidWord = false +} + +// ret returns the return values for the Transform method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) ret() (nDst, nSrc int, err error) { + if c.err != nil || c.nSrc == len(c.src) { + return c.nDst, c.nSrc, c.err + } + // This point is only reached by mappers if there was no short destination + // buffer. This means that the source buffer was exhausted and that c.sz was + // set to 0 by next. + if c.atEOF && c.pSrc == len(c.src) { + return c.pDst, c.pSrc, nil + } + return c.nDst, c.nSrc, transform.ErrShortSrc +} + +// retSpan returns the return values for the Span method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) retSpan() (n int, err error) { + _, nSrc, err := c.ret() + return nSrc, err +} + +// checkpoint sets the return value buffer points for Transform to the current +// positions. +func (c *context) checkpoint() { + if c.err == nil { + c.nDst, c.nSrc = c.pDst, c.pSrc+c.sz + } +} + +// unreadRune causes the last rune read by next to be reread on the next +// invocation of next. Only one unreadRune may be called after a call to next. +func (c *context) unreadRune() { + c.sz = 0 +} + +func (c *context) next() bool { + c.pSrc += c.sz + if c.pSrc == len(c.src) || c.err != nil { + c.info, c.sz = 0, 0 + return false + } + v, sz := trie.lookup(c.src[c.pSrc:]) + c.info, c.sz = info(v), sz + if c.sz == 0 { + if c.atEOF { + // A zero size means we have an incomplete rune. If we are atEOF, + // this means it is an illegal rune, which we will consume one + // byte at a time. + c.sz = 1 + } else { + c.err = transform.ErrShortSrc + return false + } + } + return true +} + +// writeBytes adds bytes to dst. +func (c *context) writeBytes(b []byte) bool { + if len(c.dst)-c.pDst < len(b) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for _, ch := range b { + c.dst[c.pDst] = ch + c.pDst++ + } + return true +} + +// writeString writes the given string to dst. +func (c *context) writeString(s string) bool { + if len(c.dst)-c.pDst < len(s) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for i := 0; i < len(s); i++ { + c.dst[c.pDst] = s[i] + c.pDst++ + } + return true +} + +// copy writes the current rune to dst. +func (c *context) copy() bool { + return c.writeBytes(c.src[c.pSrc : c.pSrc+c.sz]) +} + +// copyXOR copies the current rune to dst and modifies it by applying the XOR +// pattern of the case info. It is the responsibility of the caller to ensure +// that this is a rune with a XOR pattern defined. +func (c *context) copyXOR() bool { + if !c.copy() { + return false + } + if c.info&xorIndexBit == 0 { + // Fast path for 6-bit XOR pattern, which covers most cases. + c.dst[c.pDst-1] ^= byte(c.info >> xorShift) + } else { + // Interpret XOR bits as an index. + // TODO: test performance for unrolling this loop. Verify that we have + // at least two bytes and at most three. + idx := c.info >> xorShift + for p := c.pDst - 1; ; p-- { + c.dst[p] ^= xorData[idx] + idx-- + if xorData[idx] == 0 { + break + } + } + } + return true +} + +// hasPrefix returns true if src[pSrc:] starts with the given string. +func (c *context) hasPrefix(s string) bool { + b := c.src[c.pSrc:] + if len(b) < len(s) { + return false + } + for i, c := range b[:len(s)] { + if c != s[i] { + return false + } + } + return true +} + +// caseType returns an info with only the case bits, normalized to either +// cLower, cUpper, cTitle or cUncased. +func (c *context) caseType() info { + cm := c.info & 0x7 + if cm < 4 { + return cm + } + if cm >= cXORCase { + // xor the last bit of the rune with the case type bits. + b := c.src[c.pSrc+c.sz-1] + return info(b&1) ^ cm&0x3 + } + if cm == cIgnorableCased { + return cLower + } + return cUncased +} + +// lower writes the lowercase version of the current rune to dst. +func lower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + return c.writeString(e[offset : offset+nLower]) + } + return c.copy() +} + +func isLower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// upper writes the uppercase version of the current rune to dst. +func upper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + // The first special case mapping is for lower. Set n to the second. + if n == noChange { + n = 0 + } + n, e = e[1]&lengthMask, e[n:] + } + if n != noChange { + return c.writeString(e[offset : offset+n]) + } + return c.copy() +} + +// isUpper writes the isUppercase version of the current rune to dst. +func isUpper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + n = e[1] & lengthMask + } + if n != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// title writes the title case version of the current rune to dst. +func title(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return c.copy() + } + if c.info&exceptionBit == 0 { + if ct == cLower { + return c.copyXOR() + } + return c.copy() + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + + nFirst := (e[1] >> lengthBits) & lengthMask + if nTitle := e[1] & lengthMask; nTitle != noChange { + if nFirst != noChange { + e = e[nFirst:] + } + return c.writeString(e[offset : offset+nTitle]) + } + if ct == cLower && nFirst != noChange { + // Use the uppercase version instead. + return c.writeString(e[offset : offset+nFirst]) + } + // Already in correct case. + return c.copy() +} + +// isTitle reports whether the current rune is in title case. +func isTitle(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return true + } + if c.info&exceptionBit == 0 { + if ct == cLower { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + if nTitle := e[1] & lengthMask; nTitle != noChange { + c.err = transform.ErrEndOfSpan + return false + } + nFirst := (e[1] >> lengthBits) & lengthMask + if ct == cLower && nFirst != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// foldFull writes the foldFull version of the current rune to dst. +func foldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return c.copy() + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + return c.copyXOR() + } + return c.copy() + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 { + if ct == cLower { + return c.copy() + } + n = (e[1] >> lengthBits) & lengthMask + } + return c.writeString(e[2 : 2+n]) +} + +// isFoldFull reports whether the current run is mapped to foldFull +func isFoldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return true + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 && ct == cLower { + return true + } + c.err = transform.ErrEndOfSpan + return false +} diff --git a/vendor/golang.org/x/text/cases/fold.go b/vendor/golang.org/x/text/cases/fold.go new file mode 100644 index 00000000000..85cc434fac0 --- /dev/null +++ b/vendor/golang.org/x/text/cases/fold.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +type caseFolder struct{ transform.NopResetter } + +// caseFolder implements the Transformer interface for doing case folding. +func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + foldFull(&c) + c.checkpoint() + } + return c.ret() +} + +func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isFoldFull(&c) { + c.checkpoint() + } + return c.retSpan() +} + +func makeFold(o options) transform.SpanningTransformer { + // TODO: Special case folding, through option Language, Special/Turkic, or + // both. + // TODO: Implement Compact options. + return &caseFolder{} +} diff --git a/vendor/golang.org/x/text/cases/icu.go b/vendor/golang.org/x/text/cases/icu.go new file mode 100644 index 00000000000..2dc84b39efa --- /dev/null +++ b/vendor/golang.org/x/text/cases/icu.go @@ -0,0 +1,62 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build icu +// +build icu + +package cases + +// Ideally these functions would be defined in a test file, but go test doesn't +// allow CGO in tests. The build tag should ensure either way that these +// functions will not end up in the package. + +// TODO: Ensure that the correct ICU version is set. + +/* +#cgo LDFLAGS: -licui18n.57 -licuuc.57 +#include +#include +#include +#include +#include +*/ +import "C" + +import "unsafe" + +func doICU(tag, caser, input string) string { + err := C.UErrorCode(0) + loc := C.CString(tag) + cm := C.ucasemap_open(loc, C.uint32_t(0), &err) + + buf := make([]byte, len(input)*4) + dst := (*C.char)(unsafe.Pointer(&buf[0])) + src := C.CString(input) + + cn := C.int32_t(0) + + switch caser { + case "fold": + cn = C.ucasemap_utf8FoldCase(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "lower": + cn = C.ucasemap_utf8ToLower(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "upper": + cn = C.ucasemap_utf8ToUpper(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "title": + cn = C.ucasemap_utf8ToTitle(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + } + return string(buf[:cn]) +} diff --git a/vendor/golang.org/x/text/cases/info.go b/vendor/golang.org/x/text/cases/info.go new file mode 100644 index 00000000000..87a7c3e9557 --- /dev/null +++ b/vendor/golang.org/x/text/cases/info.go @@ -0,0 +1,82 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +func (c info) cccVal() info { + if c&exceptionBit != 0 { + return info(exceptions[c>>exceptionShift]) & cccMask + } + return c & cccMask +} + +func (c info) cccType() info { + ccc := c.cccVal() + if ccc <= cccZero { + return cccZero + } + return ccc +} + +// TODO: Implement full Unicode breaking algorithm: +// 1) Implement breaking in separate package. +// 2) Use the breaker here. +// 3) Compare table size and performance of using the more generic breaker. +// +// Note that we can extend the current algorithm to be much more accurate. This +// only makes sense, though, if the performance and/or space penalty of using +// the generic breaker is big. Extra data will only be needed for non-cased +// runes, which means there are sufficient bits left in the caseType. +// ICU prohibits breaking in such cases as well. + +// For the purpose of title casing we use an approximation of the Unicode Word +// Breaking algorithm defined in Annex #29: +// https://www.unicode.org/reports/tr29/#Default_Grapheme_Cluster_Table. +// +// For our approximation, we group the Word Break types into the following +// categories, with associated rules: +// +// 1) Letter: +// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ. +// Rule: Never break between consecutive runes of this category. +// +// 2) Mid: +// MidLetter, MidNumLet, Single_Quote. +// (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn, +// Me, Cf, Lm or Sk). +// Rule: Don't break between Letter and Mid, but break between two Mids. +// +// 3) Break: +// Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and +// Other. +// These categories should always result in a break between two cased letters. +// Rule: Always break. +// +// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in +// preventing a break between two cased letters. For now we will ignore this +// (e.g. [ALetter] [ExtendNumLet] [Katakana] [ExtendNumLet] [ALetter] and +// [ALetter] [Numeric] [MidNum] [Numeric] [ALetter].) +// +// Note 2: the rule for Mid is very approximate, but works in most cases. To +// improve, we could store the categories in the trie value and use a FA to +// manage breaks. See TODO comment above. +// +// Note 3: according to the spec, it is possible for the Extend category to +// introduce breaks between other categories grouped in Letter. However, this +// is undesirable for our purposes. ICU prevents breaks in such cases as well. + +// isBreak returns whether this rune should introduce a break. +func (c info) isBreak() bool { + return c.cccVal() == cccBreak +} + +// isLetter returns whether the rune is of break type ALetter, Hebrew_Letter, +// Numeric, ExtendNumLet, or Extend. +func (c info) isLetter() bool { + ccc := c.cccVal() + if ccc == cccZero { + return !c.isCaseIgnorable() + } + return ccc != cccBreak +} diff --git a/vendor/golang.org/x/text/cases/map.go b/vendor/golang.org/x/text/cases/map.go new file mode 100644 index 00000000000..0f7c6a14bb7 --- /dev/null +++ b/vendor/golang.org/x/text/cases/map.go @@ -0,0 +1,816 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +// This file contains the definitions of case mappings for all supported +// languages. The rules for the language-specific tailorings were taken and +// modified from the CLDR transform definitions in common/transforms. + +import ( + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/text/internal" + "golang.org/x/text/language" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// A mapFunc takes a context set to the current rune and writes the mapped +// version to the same context. It may advance the context to the next rune. It +// returns whether a checkpoint is possible: whether the pDst bytes written to +// dst so far won't need changing as we see more source bytes. +type mapFunc func(*context) bool + +// A spanFunc takes a context set to the current rune and returns whether this +// rune would be altered when written to the output. It may advance the context +// to the next rune. It returns whether a checkpoint is possible. +type spanFunc func(*context) bool + +// maxIgnorable defines the maximum number of ignorables to consider for +// lookahead operations. +const maxIgnorable = 30 + +// supported lists the language tags for which we have tailorings. +const supported = "und af az el lt nl tr" + +func init() { + tags := []language.Tag{} + for _, s := range strings.Split(supported, " ") { + tags = append(tags, language.MustParse(s)) + } + matcher = internal.NewInheritanceMatcher(tags) + Supported = language.NewCoverage(tags) +} + +var ( + matcher *internal.InheritanceMatcher + + Supported language.Coverage + + // We keep the following lists separate, instead of having a single per- + // language struct, to give the compiler a chance to remove unused code. + + // Some uppercase mappers are stateless, so we can precompute the + // Transformers and save a bit on runtime allocations. + upperFunc = []struct { + upper mapFunc + span spanFunc + }{ + {nil, nil}, // und + {nil, nil}, // af + {aztrUpper(upper), isUpper}, // az + {elUpper, noSpan}, // el + {ltUpper(upper), noSpan}, // lt + {nil, nil}, // nl + {aztrUpper(upper), isUpper}, // tr + } + + undUpper transform.SpanningTransformer = &undUpperCaser{} + undLower transform.SpanningTransformer = &undLowerCaser{} + undLowerIgnoreSigma transform.SpanningTransformer = &undLowerIgnoreSigmaCaser{} + + lowerFunc = []mapFunc{ + nil, // und + nil, // af + aztrLower, // az + nil, // el + ltLower, // lt + nil, // nl + aztrLower, // tr + } + + titleInfos = []struct { + title mapFunc + lower mapFunc + titleSpan spanFunc + rewrite func(*context) + }{ + {title, lower, isTitle, nil}, // und + {title, lower, isTitle, afnlRewrite}, // af + {aztrUpper(title), aztrLower, isTitle, nil}, // az + {title, lower, isTitle, nil}, // el + {ltUpper(title), ltLower, noSpan, nil}, // lt + {nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl + {aztrUpper(title), aztrLower, isTitle, nil}, // tr + } +) + +func makeUpper(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := upperFunc[i].upper + if f == nil { + return undUpper + } + return &simpleCaser{f: f, span: upperFunc[i].span} +} + +func makeLower(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := lowerFunc[i] + if f == nil { + if o.ignoreFinalSigma { + return undLowerIgnoreSigma + } + return undLower + } + if o.ignoreFinalSigma { + return &simpleCaser{f: f, span: isLower} + } + return &lowerCaser{ + first: f, + midWord: finalSigma(f), + } +} + +func makeTitle(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + x := &titleInfos[i] + lower := x.lower + if o.noLower { + lower = (*context).copy + } else if !o.ignoreFinalSigma { + lower = finalSigma(lower) + } + return &titleCaser{ + title: x.title, + lower: lower, + titleSpan: x.titleSpan, + rewrite: x.rewrite, + } +} + +func noSpan(c *context) bool { + c.err = transform.ErrEndOfSpan + return false +} + +// TODO: consider a similar special case for the fast majority lower case. This +// is a bit more involved so will require some more precise benchmarking to +// justify it. + +type undUpperCaser struct{ transform.NopResetter } + +// undUpperCaser implements the Transformer interface for doing an upper case +// mapping for the root locale (und). It eliminates the need for an allocation +// as it prevents escaping by not using function pointers. +func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + upper(&c) + c.checkpoint() + } + return c.ret() +} + +func (t undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isUpper(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// undLowerIgnoreSigmaCaser implements the Transformer interface for doing +// a lower case mapping for the root locale (und) ignoring final sigma +// handling. This casing algorithm is used in some performance-critical packages +// like secure/precis and x/net/http/idna, which warrants its special-casing. +type undLowerIgnoreSigmaCaser struct{ transform.NopResetter } + +func (t undLowerIgnoreSigmaCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() && lower(&c) { + c.checkpoint() + } + return c.ret() + +} + +// Span implements a generic lower-casing. This is possible as isLower works +// for all lowercasing variants. All lowercase variants only vary in how they +// transform a non-lowercase letter. They will never change an already lowercase +// letter. In addition, there is no state. +func (t undLowerIgnoreSigmaCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isLower(&c) { + c.checkpoint() + } + return c.retSpan() +} + +type simpleCaser struct { + context + f mapFunc + span spanFunc +} + +// simpleCaser implements the Transformer interface for doing a case operation +// on a rune-by-rune basis. +func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() && t.f(&c) { + c.checkpoint() + } + return c.ret() +} + +func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && t.span(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// undLowerCaser implements the Transformer interface for doing a lower case +// mapping for the root locale (und) ignoring final sigma handling. This casing +// algorithm is used in some performance-critical packages like secure/precis +// and x/net/http/idna, which warrants its special-casing. +type undLowerCaser struct{ transform.NopResetter } + +func (t undLowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + + for isInterWord := true; c.next(); { + if isInterWord { + if c.info.isCased() { + if !lower(&c) { + break + } + isInterWord = false + } else if !c.copy() { + break + } + } else { + if c.info.isNotCasedAndNotCaseIgnorable() { + if !c.copy() { + break + } + isInterWord = true + } else if !c.hasPrefix("Σ") { + if !lower(&c) { + break + } + } else if !finalSigmaBody(&c) { + break + } + } + c.checkpoint() + } + return c.ret() +} + +func (t undLowerCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isLower(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// lowerCaser implements the Transformer interface. The default Unicode lower +// casing requires different treatment for the first and subsequent characters +// of a word, most notably to handle the Greek final Sigma. +type lowerCaser struct { + undLowerIgnoreSigmaCaser + + context + + first, midWord mapFunc +} + +func (t *lowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF} + c := &t.context + + for isInterWord := true; c.next(); { + if isInterWord { + if c.info.isCased() { + if !t.first(c) { + break + } + isInterWord = false + } else if !c.copy() { + break + } + } else { + if c.info.isNotCasedAndNotCaseIgnorable() { + if !c.copy() { + break + } + isInterWord = true + } else if !t.midWord(c) { + break + } + } + c.checkpoint() + } + return c.ret() +} + +// titleCaser implements the Transformer interface. Title casing algorithms +// distinguish between the first letter of a word and subsequent letters of the +// same word. It uses state to avoid requiring a potentially infinite lookahead. +type titleCaser struct { + context + + // rune mappings used by the actual casing algorithms. + title mapFunc + lower mapFunc + titleSpan spanFunc + + rewrite func(*context) +} + +// Transform implements the standard Unicode title case algorithm as defined in +// Chapter 3 of The Unicode Standard: +// toTitlecase(X): Find the word boundaries in X according to Unicode Standard +// Annex #29, "Unicode Text Segmentation." For each word boundary, find the +// first cased character F following the word boundary. If F exists, map F to +// Titlecase_Mapping(F); then map all characters C between F and the following +// word boundary to Lowercase_Mapping(C). +func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.ret() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.title(c) { + break + } + c.isMidWord = true + } else if !t.lower(c) { + break + } + } else if !c.copy() { + break + } else if p.isBreak() { + c.isMidWord = false + } + + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.ret() +} + +func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) { + t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.retSpan() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.titleSpan(c) { + break + } + c.isMidWord = true + } else if !isLower(c) { + break + } + } else if p.isBreak() { + c.isMidWord = false + } + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.retSpan() +} + +// finalSigma adds Greek final Sigma handing to another casing function. It +// determines whether a lowercased sigma should be σ or ς, by looking ahead for +// case-ignorables and a cased letters. +func finalSigma(f mapFunc) mapFunc { + return func(c *context) bool { + if !c.hasPrefix("Σ") { + return f(c) + } + return finalSigmaBody(c) + } +} + +func finalSigmaBody(c *context) bool { + // Current rune must be ∑. + + // ::NFD(); + // # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA + // Σ } [:case-ignorable:]* [:cased:] → σ; + // [:cased:] [:case-ignorable:]* { Σ → ς; + // ::Any-Lower; + // ::NFC(); + + p := c.pDst + c.writeString("ς") + + // TODO: we should do this here, but right now this will never have an + // effect as this is called when the prefix is Sigma, whereas Dutch and + // Afrikaans only test for an apostrophe. + // + // if t.rewrite != nil { + // t.rewrite(c) + // } + + // We need to do one more iteration after maxIgnorable, as a cased + // letter is not an ignorable and may modify the result. + wasMid := false + for i := 0; i < maxIgnorable+1; i++ { + if !c.next() { + return false + } + if !c.info.isCaseIgnorable() { + // All Midword runes are also case ignorable, so we are + // guaranteed to have a letter or word break here. As we are + // unreading the run, there is no need to unset c.isMidWord; + // the title caser will handle this. + if c.info.isCased() { + // p+1 is guaranteed to be in bounds: if writing ς was + // successful, p+1 will contain the second byte of ς. If not, + // this function will have returned after c.next returned false. + c.dst[p+1]++ // ς → σ + } + c.unreadRune() + return true + } + // A case ignorable may also introduce a word break, so we may need + // to continue searching even after detecting a break. + isMid := c.info.isMid() + if (wasMid && isMid) || c.info.isBreak() { + c.isMidWord = false + } + wasMid = isMid + c.copy() + } + return true +} + +// finalSigmaSpan would be the same as isLower. + +// elUpper implements Greek upper casing, which entails removing a predefined +// set of non-blocked modifiers. Note that these accents should not be removed +// for title casing! +// Example: "Οδός" -> "ΟΔΟΣ". +func elUpper(c *context) bool { + // From CLDR: + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Above:]]*? { [\u0313\u0314\u0301\u0300\u0306\u0342\u0308\u0304] → ; + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Iota_Subscript:]]*? { \u0345 → ; + + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !upper(c) { + return false + } + if !unicode.Is(unicode.Greek, r) { + return true + } + i := 0 + // Take the properties of the uppercased rune that is already written to the + // destination. This saves us the trouble of having to uppercase the + // decomposed rune again. + if b := norm.NFD.Properties(c.dst[oldPDst:]).Decomposition(); b != nil { + // Restore the destination position and process the decomposed rune. + r, sz := utf8.DecodeRune(b) + if r <= 0xFF { // See A.6.1 + return true + } + c.pDst = oldPDst + // Insert the first rune and ignore the modifiers. See A.6.2. + c.writeBytes(b[:sz]) + i = len(b[sz:]) / 2 // Greek modifiers are always of length 2. + } + + for ; i < maxIgnorable && c.next(); i++ { + switch r, _ := utf8.DecodeRune(c.src[c.pSrc:]); r { + // Above and Iota Subscript + case 0x0300, // U+0300 COMBINING GRAVE ACCENT + 0x0301, // U+0301 COMBINING ACUTE ACCENT + 0x0304, // U+0304 COMBINING MACRON + 0x0306, // U+0306 COMBINING BREVE + 0x0308, // U+0308 COMBINING DIAERESIS + 0x0313, // U+0313 COMBINING COMMA ABOVE + 0x0314, // U+0314 COMBINING REVERSED COMMA ABOVE + 0x0342, // U+0342 COMBINING GREEK PERISPOMENI + 0x0345: // U+0345 COMBINING GREEK YPOGEGRAMMENI + // No-op. Gobble the modifier. + + default: + switch v, _ := trie.lookup(c.src[c.pSrc:]); info(v).cccType() { + case cccZero: + c.unreadRune() + return true + + // We don't need to test for IotaSubscript as the only rune that + // qualifies (U+0345) was already excluded in the switch statement + // above. See A.4. + + case cccAbove: + return c.copy() + default: + // Some other modifier. We're still allowed to gobble Greek + // modifiers after this. + c.copy() + } + } + } + return i == maxIgnorable +} + +// TODO: implement elUpperSpan (low-priority: complex and infrequent). + +func ltLower(c *context) bool { + // From CLDR: + // # Introduce an explicit dot above when lowercasing capital I's and J's + // # whenever there are more accents above. + // # (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) + // # 0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I + // # 004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J + // # 012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK + // # 00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE + // # 00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE + // # 0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE + // ::NFD(); + // I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307; + // J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307; + // I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307; + // I \u0300 (Ì) → i \u0307 \u0300; + // I \u0301 (Í) → i \u0307 \u0301; + // I \u0303 (Ĩ) → i \u0307 \u0303; + // ::Any-Lower(); + // ::NFC(); + + i := 0 + if r := c.src[c.pSrc]; r < utf8.RuneSelf { + lower(c) + if r != 'I' && r != 'J' { + return true + } + } else { + p := norm.NFD.Properties(c.src[c.pSrc:]) + if d := p.Decomposition(); len(d) >= 3 && (d[0] == 'I' || d[0] == 'J') { + // UTF-8 optimization: the decomposition will only have an above + // modifier if the last rune of the decomposition is in [U+300-U+311]. + // In all other cases, a decomposition starting with I is always + // an I followed by modifiers that are not cased themselves. See A.2. + if d[1] == 0xCC && d[2] <= 0x91 { // A.2.4. + if !c.writeBytes(d[:1]) { + return false + } + c.dst[c.pDst-1] += 'a' - 'A' // lower + + // Assumption: modifier never changes on lowercase. See A.1. + // Assumption: all modifiers added have CCC = Above. See A.2.3. + return c.writeString("\u0307") && c.writeBytes(d[1:]) + } + // In all other cases the additional modifiers will have a CCC + // that is less than 230 (Above). We will insert the U+0307, if + // needed, after these modifiers so that a string in FCD form + // will remain so. See A.2.2. + lower(c) + i = 1 + } else { + return lower(c) + } + } + + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + return c.writeString("\u0307") && c.copy() // See A.1. + default: + c.copy() // See A.1. + } + } + return i == maxIgnorable +} + +// ltLowerSpan would be the same as isLower. + +func ltUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // Unicode: + // 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE + // + // From CLDR: + // # Remove \u0307 following soft-dotteds (i, j, and the like), with possible + // # intervening non-230 marks. + // ::NFD(); + // [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ; + // ::Any-Upper(); + // ::NFC(); + + // TODO: See A.5. A soft-dotted rune never has an exception. This would + // allow us to overload the exception bit and encode this property in + // info. Need to measure performance impact of this. + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !f(c) { + return false + } + if !unicode.Is(unicode.Soft_Dotted, r) { + return true + } + + // We don't need to do an NFD normalization, as a soft-dotted rune never + // contains U+0307. See A.3. + + i := 0 + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + if c.hasPrefix("\u0307") { + // We don't do a full NFC, but rather combine runes for + // some of the common cases. (Returning NFC or + // preserving normal form is neither a requirement nor + // a possibility anyway). + if !c.next() { + return false + } + if c.dst[oldPDst] == 'I' && c.pDst == oldPDst+1 && c.src[c.pSrc] == 0xcc { + s := "" + switch c.src[c.pSrc+1] { + case 0x80: // U+0300 COMBINING GRAVE ACCENT + s = "\u00cc" // U+00CC LATIN CAPITAL LETTER I WITH GRAVE + case 0x81: // U+0301 COMBINING ACUTE ACCENT + s = "\u00cd" // U+00CD LATIN CAPITAL LETTER I WITH ACUTE + case 0x83: // U+0303 COMBINING TILDE + s = "\u0128" // U+0128 LATIN CAPITAL LETTER I WITH TILDE + case 0x88: // U+0308 COMBINING DIAERESIS + s = "\u00cf" // U+00CF LATIN CAPITAL LETTER I WITH DIAERESIS + default: + } + if s != "" { + c.pDst = oldPDst + return c.writeString(s) + } + } + } + return c.copy() + default: + c.copy() + } + } + return i == maxIgnorable + } +} + +// TODO: implement ltUpperSpan (low priority: complex and infrequent). + +func aztrUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // i→İ; + if c.src[c.pSrc] == 'i' { + return c.writeString("İ") + } + return f(c) + } +} + +func aztrLower(c *context) (done bool) { + // From CLDR: + // # I and i-dotless; I-dot and i are case pairs in Turkish and Azeri + // # 0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE + // İ→i; + // # When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. + // # This matches the behavior of the canonically equivalent I-dot_above + // # 0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE + // # When lowercasing, unless an I is before a dot_above, it turns into a dotless i. + // # 0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I + // I([^[:ccc=Not_Reordered:][:ccc=Above:]]*)\u0307 → i$1 ; + // I→ı ; + // ::Any-Lower(); + if c.hasPrefix("\u0130") { // İ + return c.writeString("i") + } + if c.src[c.pSrc] != 'I' { + return lower(c) + } + + // We ignore the lower-case I for now, but insert it later when we know + // which form we need. + start := c.pSrc + c.sz + + i := 0 +Loop: + // We check for up to n ignorables before \u0307. As \u0307 is an + // ignorable as well, n is maxIgnorable-1. + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccAbove: + if c.hasPrefix("\u0307") { + return c.writeString("i") && c.writeBytes(c.src[start:c.pSrc]) // ignore U+0307 + } + done = true + break Loop + case cccZero: + c.unreadRune() + done = true + break Loop + default: + // We'll write this rune after we know which starter to use. + } + } + if i == maxIgnorable { + done = true + } + return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done +} + +// aztrLowerSpan would be the same as isLower. + +func nlTitle(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' && c.src[c.pSrc] != 'i' { + return title(c) + } + + if !c.writeString("I") || !c.next() { + return false + } + if c.src[c.pSrc] == 'j' || c.src[c.pSrc] == 'J' { + return c.writeString("J") + } + c.unreadRune() + return true +} + +func nlTitleSpan(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' { + return isTitle(c) + } + if !c.next() || c.src[c.pSrc] == 'j' { + return false + } + if c.src[c.pSrc] != 'J' { + c.unreadRune() + } + return true +} + +// Not part of CLDR, but see https://unicode.org/cldr/trac/ticket/7078. +func afnlRewrite(c *context) { + if c.hasPrefix("'") || c.hasPrefix("’") { + c.isMidWord = true + } +} diff --git a/vendor/golang.org/x/text/cases/tables10.0.0.go b/vendor/golang.org/x/text/cases/tables10.0.0.go new file mode 100644 index 00000000000..ca9923105e3 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables10.0.0.go @@ -0,0 +1,2256 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.10 && !go1.13 +// +build go1.10,!go1.13 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var xorData string = "" + // Size: 185 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a\x00\x02:" + + "\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&\x00\x01*" + + "\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2068 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ιΙΙ\x166ΐ" + + "Ϊ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12φΦΦ\x12" + + "\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x12\x12вВВ\x12\x12дД" + + "Д\x12\x12оОО\x12\x12сСС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13" + + "\x1bꙋꙊꙊ\x13\x1bẖH̱H̱\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1ba" + + "ʾAʾAʾ\x13\x1bṡṠṠ\x12\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166" + + "ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ" + + "\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ" + + "\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ" + + "\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨ" + + "Ι\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15" + + "\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ" + + "\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ" + + "\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰι" + + "ᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12" + + "\x12ιΙΙ\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1c" + + "ηιῃΗΙ\x166ῒΪ̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ" + + "̀\x166ΰΫ́Ϋ́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙ" + + "ῼ\x14$ώιΏΙΏͅ\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk" + + "\x12\x10åå\x12\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ" + + "\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ" + + "\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFF" + + "Ff\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12" + + "stSTSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄ" + + "ԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 11892 bytes (11.61 KiB). Checksum: c6f15484b7653775. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 18: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 18 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 20 blocks, 1280 entries, 2560 bytes +// The third block is the zero block. +var caseValues = [1280]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x110a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x118a, + 0x19e: 0x120a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x128d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x130a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x144a, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x158a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x160a, 0x251: 0x168a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x170a, 0x256: 0x178a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x180a, 0x271: 0x188a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x190a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x0812, 0x281: 0x0812, 0x282: 0x0812, 0x283: 0x0812, 0x284: 0x0812, 0x285: 0x0812, + 0x288: 0x0813, 0x289: 0x0813, 0x28a: 0x0813, 0x28b: 0x0813, + 0x28c: 0x0813, 0x28d: 0x0813, 0x290: 0x239a, 0x291: 0x0812, + 0x292: 0x247a, 0x293: 0x0812, 0x294: 0x25ba, 0x295: 0x0812, 0x296: 0x26fa, 0x297: 0x0812, + 0x299: 0x0813, 0x29b: 0x0813, 0x29d: 0x0813, + 0x29f: 0x0813, 0x2a0: 0x0812, 0x2a1: 0x0812, 0x2a2: 0x0812, 0x2a3: 0x0812, + 0x2a4: 0x0812, 0x2a5: 0x0812, 0x2a6: 0x0812, 0x2a7: 0x0812, 0x2a8: 0x0813, 0x2a9: 0x0813, + 0x2aa: 0x0813, 0x2ab: 0x0813, 0x2ac: 0x0813, 0x2ad: 0x0813, 0x2ae: 0x0813, 0x2af: 0x0813, + 0x2b0: 0x8b52, 0x2b1: 0x8b52, 0x2b2: 0x8e52, 0x2b3: 0x8e52, 0x2b4: 0x9152, 0x2b5: 0x9152, + 0x2b6: 0x9452, 0x2b7: 0x9452, 0x2b8: 0x9752, 0x2b9: 0x9752, 0x2ba: 0x9a52, 0x2bb: 0x9a52, + 0x2bc: 0x4d52, 0x2bd: 0x4d52, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x283a, 0x2c1: 0x292a, 0x2c2: 0x2a1a, 0x2c3: 0x2b0a, 0x2c4: 0x2bfa, 0x2c5: 0x2cea, + 0x2c6: 0x2dda, 0x2c7: 0x2eca, 0x2c8: 0x2fb9, 0x2c9: 0x30a9, 0x2ca: 0x3199, 0x2cb: 0x3289, + 0x2cc: 0x3379, 0x2cd: 0x3469, 0x2ce: 0x3559, 0x2cf: 0x3649, 0x2d0: 0x373a, 0x2d1: 0x382a, + 0x2d2: 0x391a, 0x2d3: 0x3a0a, 0x2d4: 0x3afa, 0x2d5: 0x3bea, 0x2d6: 0x3cda, 0x2d7: 0x3dca, + 0x2d8: 0x3eb9, 0x2d9: 0x3fa9, 0x2da: 0x4099, 0x2db: 0x4189, 0x2dc: 0x4279, 0x2dd: 0x4369, + 0x2de: 0x4459, 0x2df: 0x4549, 0x2e0: 0x463a, 0x2e1: 0x472a, 0x2e2: 0x481a, 0x2e3: 0x490a, + 0x2e4: 0x49fa, 0x2e5: 0x4aea, 0x2e6: 0x4bda, 0x2e7: 0x4cca, 0x2e8: 0x4db9, 0x2e9: 0x4ea9, + 0x2ea: 0x4f99, 0x2eb: 0x5089, 0x2ec: 0x5179, 0x2ed: 0x5269, 0x2ee: 0x5359, 0x2ef: 0x5449, + 0x2f0: 0x0812, 0x2f1: 0x0812, 0x2f2: 0x553a, 0x2f3: 0x564a, 0x2f4: 0x571a, + 0x2f6: 0x57fa, 0x2f7: 0x58da, 0x2f8: 0x0813, 0x2f9: 0x0813, 0x2fa: 0x8b53, 0x2fb: 0x8b53, + 0x2fc: 0x5a19, 0x2fd: 0x0004, 0x2fe: 0x5aea, 0x2ff: 0x0004, + // Block 0xc, offset 0x300 + 0x300: 0x0004, 0x301: 0x0004, 0x302: 0x5b6a, 0x303: 0x5c7a, 0x304: 0x5d4a, + 0x306: 0x5e2a, 0x307: 0x5f0a, 0x308: 0x8e53, 0x309: 0x8e53, 0x30a: 0x9153, 0x30b: 0x9153, + 0x30c: 0x6049, 0x30d: 0x0004, 0x30e: 0x0004, 0x30f: 0x0004, 0x310: 0x0812, 0x311: 0x0812, + 0x312: 0x611a, 0x313: 0x625a, 0x316: 0x639a, 0x317: 0x647a, + 0x318: 0x0813, 0x319: 0x0813, 0x31a: 0x9453, 0x31b: 0x9453, 0x31d: 0x0004, + 0x31e: 0x0004, 0x31f: 0x0004, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x65ba, 0x323: 0x66fa, + 0x324: 0x683a, 0x325: 0x0912, 0x326: 0x691a, 0x327: 0x69fa, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x9a53, 0x32b: 0x9a53, 0x32c: 0x0913, 0x32d: 0x0004, 0x32e: 0x0004, 0x32f: 0x0004, + 0x332: 0x6b3a, 0x333: 0x6c4a, 0x334: 0x6d1a, + 0x336: 0x6dfa, 0x337: 0x6eda, 0x338: 0x9753, 0x339: 0x9753, 0x33a: 0x4d53, 0x33b: 0x4d53, + 0x33c: 0x7019, 0x33d: 0x0004, 0x33e: 0x0004, + // Block 0xd, offset 0x340 + 0x342: 0x0013, + 0x347: 0x0013, 0x34a: 0x0012, 0x34b: 0x0013, + 0x34c: 0x0013, 0x34d: 0x0013, 0x34e: 0x0012, 0x34f: 0x0012, 0x350: 0x0013, 0x351: 0x0013, + 0x352: 0x0013, 0x353: 0x0012, 0x355: 0x0013, + 0x359: 0x0013, 0x35a: 0x0013, 0x35b: 0x0013, 0x35c: 0x0013, 0x35d: 0x0013, + 0x364: 0x0013, 0x366: 0x70eb, 0x368: 0x0013, + 0x36a: 0x714b, 0x36b: 0x718b, 0x36c: 0x0013, 0x36d: 0x0013, 0x36f: 0x0012, + 0x370: 0x0013, 0x371: 0x0013, 0x372: 0x9d53, 0x373: 0x0013, 0x374: 0x0012, 0x375: 0x0010, + 0x376: 0x0010, 0x377: 0x0010, 0x378: 0x0010, 0x379: 0x0012, + 0x37c: 0x0012, 0x37d: 0x0012, 0x37e: 0x0013, 0x37f: 0x0013, + // Block 0xe, offset 0x380 + 0x380: 0x1a13, 0x381: 0x1a13, 0x382: 0x1e13, 0x383: 0x1e13, 0x384: 0x1a13, 0x385: 0x1a13, + 0x386: 0x2613, 0x387: 0x2613, 0x388: 0x2a13, 0x389: 0x2a13, 0x38a: 0x2e13, 0x38b: 0x2e13, + 0x38c: 0x2a13, 0x38d: 0x2a13, 0x38e: 0x2613, 0x38f: 0x2613, 0x390: 0xa052, 0x391: 0xa052, + 0x392: 0xa352, 0x393: 0xa352, 0x394: 0xa652, 0x395: 0xa652, 0x396: 0xa352, 0x397: 0xa352, + 0x398: 0xa052, 0x399: 0xa052, 0x39a: 0x1a12, 0x39b: 0x1a12, 0x39c: 0x1e12, 0x39d: 0x1e12, + 0x39e: 0x1a12, 0x39f: 0x1a12, 0x3a0: 0x2612, 0x3a1: 0x2612, 0x3a2: 0x2a12, 0x3a3: 0x2a12, + 0x3a4: 0x2e12, 0x3a5: 0x2e12, 0x3a6: 0x2a12, 0x3a7: 0x2a12, 0x3a8: 0x2612, 0x3a9: 0x2612, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x6552, 0x3c1: 0x6552, 0x3c2: 0x6552, 0x3c3: 0x6552, 0x3c4: 0x6552, 0x3c5: 0x6552, + 0x3c6: 0x6552, 0x3c7: 0x6552, 0x3c8: 0x6552, 0x3c9: 0x6552, 0x3ca: 0x6552, 0x3cb: 0x6552, + 0x3cc: 0x6552, 0x3cd: 0x6552, 0x3ce: 0x6552, 0x3cf: 0x6552, 0x3d0: 0xa952, 0x3d1: 0xa952, + 0x3d2: 0xa952, 0x3d3: 0xa952, 0x3d4: 0xa952, 0x3d5: 0xa952, 0x3d6: 0xa952, 0x3d7: 0xa952, + 0x3d8: 0xa952, 0x3d9: 0xa952, 0x3da: 0xa952, 0x3db: 0xa952, 0x3dc: 0xa952, 0x3dd: 0xa952, + 0x3de: 0xa952, 0x3e0: 0x0113, 0x3e1: 0x0112, 0x3e2: 0x71eb, 0x3e3: 0x8853, + 0x3e4: 0x724b, 0x3e5: 0x72aa, 0x3e6: 0x730a, 0x3e7: 0x0f13, 0x3e8: 0x0f12, 0x3e9: 0x0313, + 0x3ea: 0x0312, 0x3eb: 0x0713, 0x3ec: 0x0712, 0x3ed: 0x736b, 0x3ee: 0x73cb, 0x3ef: 0x742b, + 0x3f0: 0x748b, 0x3f1: 0x0012, 0x3f2: 0x0113, 0x3f3: 0x0112, 0x3f4: 0x0012, 0x3f5: 0x0313, + 0x3f6: 0x0312, 0x3f7: 0x0012, 0x3f8: 0x0012, 0x3f9: 0x0012, 0x3fa: 0x0012, 0x3fb: 0x0012, + 0x3fc: 0x0015, 0x3fd: 0x0015, 0x3fe: 0x74eb, 0x3ff: 0x754b, + // Block 0x10, offset 0x400 + 0x400: 0x0113, 0x401: 0x0112, 0x402: 0x0113, 0x403: 0x0112, 0x404: 0x0113, 0x405: 0x0112, + 0x406: 0x0113, 0x407: 0x0112, 0x408: 0x0014, 0x409: 0x0014, 0x40a: 0x0014, 0x40b: 0x0713, + 0x40c: 0x0712, 0x40d: 0x75ab, 0x40e: 0x0012, 0x40f: 0x0010, 0x410: 0x0113, 0x411: 0x0112, + 0x412: 0x0113, 0x413: 0x0112, 0x414: 0x0012, 0x415: 0x0012, 0x416: 0x0113, 0x417: 0x0112, + 0x418: 0x0113, 0x419: 0x0112, 0x41a: 0x0113, 0x41b: 0x0112, 0x41c: 0x0113, 0x41d: 0x0112, + 0x41e: 0x0113, 0x41f: 0x0112, 0x420: 0x0113, 0x421: 0x0112, 0x422: 0x0113, 0x423: 0x0112, + 0x424: 0x0113, 0x425: 0x0112, 0x426: 0x0113, 0x427: 0x0112, 0x428: 0x0113, 0x429: 0x0112, + 0x42a: 0x760b, 0x42b: 0x766b, 0x42c: 0x76cb, 0x42d: 0x772b, 0x42e: 0x778b, + 0x430: 0x77eb, 0x431: 0x784b, 0x432: 0x78ab, 0x433: 0xac53, 0x434: 0x0113, 0x435: 0x0112, + 0x436: 0x0113, 0x437: 0x0112, + // Block 0x11, offset 0x440 + 0x440: 0x790a, 0x441: 0x798a, 0x442: 0x7a0a, 0x443: 0x7a8a, 0x444: 0x7b3a, 0x445: 0x7bea, + 0x446: 0x7c6a, + 0x453: 0x7cea, 0x454: 0x7dca, 0x455: 0x7eaa, 0x456: 0x7f8a, 0x457: 0x806a, + 0x45d: 0x0010, + 0x45e: 0x0034, 0x45f: 0x0010, 0x460: 0x0010, 0x461: 0x0010, 0x462: 0x0010, 0x463: 0x0010, + 0x464: 0x0010, 0x465: 0x0010, 0x466: 0x0010, 0x467: 0x0010, 0x468: 0x0010, + 0x46a: 0x0010, 0x46b: 0x0010, 0x46c: 0x0010, 0x46d: 0x0010, 0x46e: 0x0010, 0x46f: 0x0010, + 0x470: 0x0010, 0x471: 0x0010, 0x472: 0x0010, 0x473: 0x0010, 0x474: 0x0010, 0x475: 0x0010, + 0x476: 0x0010, 0x478: 0x0010, 0x479: 0x0010, 0x47a: 0x0010, 0x47b: 0x0010, + 0x47c: 0x0010, 0x47e: 0x0010, + // Block 0x12, offset 0x480 + 0x480: 0x2213, 0x481: 0x2213, 0x482: 0x2613, 0x483: 0x2613, 0x484: 0x2213, 0x485: 0x2213, + 0x486: 0x2e13, 0x487: 0x2e13, 0x488: 0x2213, 0x489: 0x2213, 0x48a: 0x2613, 0x48b: 0x2613, + 0x48c: 0x2213, 0x48d: 0x2213, 0x48e: 0x3e13, 0x48f: 0x3e13, 0x490: 0x2213, 0x491: 0x2213, + 0x492: 0x2613, 0x493: 0x2613, 0x494: 0x2213, 0x495: 0x2213, 0x496: 0x2e13, 0x497: 0x2e13, + 0x498: 0x2213, 0x499: 0x2213, 0x49a: 0x2613, 0x49b: 0x2613, 0x49c: 0x2213, 0x49d: 0x2213, + 0x49e: 0xb553, 0x49f: 0xb553, 0x4a0: 0xb853, 0x4a1: 0xb853, 0x4a2: 0x2212, 0x4a3: 0x2212, + 0x4a4: 0x2612, 0x4a5: 0x2612, 0x4a6: 0x2212, 0x4a7: 0x2212, 0x4a8: 0x2e12, 0x4a9: 0x2e12, + 0x4aa: 0x2212, 0x4ab: 0x2212, 0x4ac: 0x2612, 0x4ad: 0x2612, 0x4ae: 0x2212, 0x4af: 0x2212, + 0x4b0: 0x3e12, 0x4b1: 0x3e12, 0x4b2: 0x2212, 0x4b3: 0x2212, 0x4b4: 0x2612, 0x4b5: 0x2612, + 0x4b6: 0x2212, 0x4b7: 0x2212, 0x4b8: 0x2e12, 0x4b9: 0x2e12, 0x4ba: 0x2212, 0x4bb: 0x2212, + 0x4bc: 0x2612, 0x4bd: 0x2612, 0x4be: 0x2212, 0x4bf: 0x2212, + // Block 0x13, offset 0x4c0 + 0x4c2: 0x0010, + 0x4c7: 0x0010, 0x4c9: 0x0010, 0x4cb: 0x0010, + 0x4cd: 0x0010, 0x4ce: 0x0010, 0x4cf: 0x0010, 0x4d1: 0x0010, + 0x4d2: 0x0010, 0x4d4: 0x0010, 0x4d7: 0x0010, + 0x4d9: 0x0010, 0x4db: 0x0010, 0x4dd: 0x0010, + 0x4df: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, + 0x4e4: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, 0x4e9: 0x0010, + 0x4ea: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f7: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x12, 0xc3: 0x13, 0xc4: 0x14, 0xc5: 0x15, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x16, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x17, 0xcc: 0x18, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x19, 0xd1: 0x1a, 0xd2: 0x1b, 0xd3: 0x1c, 0xd4: 0x1d, 0xd5: 0x1e, 0xd6: 0x1f, 0xd7: 0x20, + 0xd8: 0x21, 0xd9: 0x22, 0xda: 0x23, 0xdb: 0x24, 0xdc: 0x25, 0xdd: 0x26, 0xde: 0x27, 0xdf: 0x28, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x29, 0x121: 0x2a, 0x122: 0x2b, 0x123: 0x2c, 0x124: 0x2d, 0x125: 0x2e, 0x126: 0x2f, 0x127: 0x30, + 0x128: 0x31, 0x129: 0x32, 0x12a: 0x33, 0x12b: 0x34, 0x12c: 0x35, 0x12d: 0x36, 0x12e: 0x37, 0x12f: 0x38, + 0x130: 0x39, 0x131: 0x3a, 0x132: 0x3b, 0x133: 0x3c, 0x134: 0x3d, 0x135: 0x3e, 0x136: 0x3f, 0x137: 0x40, + 0x138: 0x41, 0x139: 0x42, 0x13a: 0x43, 0x13b: 0x44, 0x13c: 0x45, 0x13d: 0x46, 0x13e: 0x47, 0x13f: 0x48, + // Block 0x5, offset 0x140 + 0x140: 0x49, 0x141: 0x4a, 0x142: 0x4b, 0x143: 0x4c, 0x144: 0x23, 0x145: 0x23, 0x146: 0x23, 0x147: 0x23, + 0x148: 0x23, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x23, 0x152: 0x23, 0x153: 0x23, 0x154: 0x23, 0x155: 0x23, 0x156: 0x23, 0x157: 0x23, + 0x158: 0x23, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x08, 0x17e: 0x09, 0x17f: 0x0a, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0b, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0c, + 0x1b0: 0x7c, 0x1b1: 0x0d, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x23, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x23, 0x202: 0x23, 0x203: 0x23, 0x204: 0x23, 0x205: 0x23, 0x206: 0x23, 0x207: 0x23, + 0x208: 0x23, 0x209: 0x23, 0x20a: 0x23, 0x20b: 0x23, 0x20c: 0x23, 0x20d: 0x23, 0x20e: 0x23, 0x20f: 0x23, + 0x210: 0x23, 0x211: 0x23, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x23, 0x215: 0x23, 0x216: 0x23, 0x217: 0x23, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x0e, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x23, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x23, 0x231: 0x23, 0x232: 0x23, 0x233: 0x23, 0x234: 0x23, 0x235: 0x23, 0x236: 0x23, 0x237: 0x23, + 0x238: 0x23, 0x239: 0x23, 0x23a: 0x23, 0x23b: 0x23, 0x23c: 0x23, 0x23d: 0x23, 0x23e: 0x23, 0x23f: 0x23, + // Block 0x9, offset 0x240 + 0x240: 0x23, 0x241: 0x23, 0x242: 0x23, 0x243: 0x23, 0x244: 0x23, 0x245: 0x23, 0x246: 0x23, 0x247: 0x23, + 0x248: 0x23, 0x249: 0x23, 0x24a: 0x23, 0x24b: 0x23, 0x24c: 0x23, 0x24d: 0x23, 0x24e: 0x23, 0x24f: 0x23, + 0x250: 0x23, 0x251: 0x23, 0x252: 0x23, 0x253: 0x23, 0x254: 0x23, 0x255: 0x23, 0x256: 0x23, 0x257: 0x23, + 0x258: 0x23, 0x259: 0x23, 0x25a: 0x23, 0x25b: 0x23, 0x25c: 0x23, 0x25d: 0x23, 0x25e: 0x23, 0x25f: 0x23, + 0x260: 0x23, 0x261: 0x23, 0x262: 0x23, 0x263: 0x23, 0x264: 0x23, 0x265: 0x23, 0x266: 0x23, 0x267: 0x23, + 0x268: 0x23, 0x269: 0x23, 0x26a: 0x23, 0x26b: 0x23, 0x26c: 0x23, 0x26d: 0x23, 0x26e: 0x23, 0x26f: 0x23, + 0x270: 0x23, 0x271: 0x23, 0x272: 0x23, 0x273: 0x23, 0x274: 0x23, 0x275: 0x23, 0x276: 0x23, 0x277: 0x23, + 0x278: 0x23, 0x279: 0x23, 0x27a: 0x23, 0x27b: 0x23, 0x27c: 0x23, 0x27d: 0x23, 0x27e: 0x23, 0x27f: 0x23, + // Block 0xa, offset 0x280 + 0x280: 0x23, 0x281: 0x23, 0x282: 0x23, 0x283: 0x23, 0x284: 0x23, 0x285: 0x23, 0x286: 0x23, 0x287: 0x23, + 0x288: 0x23, 0x289: 0x23, 0x28a: 0x23, 0x28b: 0x23, 0x28c: 0x23, 0x28d: 0x23, 0x28e: 0x23, 0x28f: 0x23, + 0x290: 0x23, 0x291: 0x23, 0x292: 0x23, 0x293: 0x23, 0x294: 0x23, 0x295: 0x23, 0x296: 0x23, 0x297: 0x23, + 0x298: 0x23, 0x299: 0x23, 0x29a: 0x23, 0x29b: 0x23, 0x29c: 0x23, 0x29d: 0x23, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x0f, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x23, 0x2f1: 0x23, 0x2f2: 0x23, 0x2f3: 0x23, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x23, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x23, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x23, 0x319: 0x23, 0x31a: 0x23, 0x31b: 0x23, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x23, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, + // Block 0xd, offset 0x340 + 0x340: 0xd3, 0x341: 0xd4, 0x342: 0xd5, 0x343: 0xd6, 0x344: 0xd7, 0x345: 0xd8, 0x346: 0xd9, 0x347: 0xda, + 0x348: 0xdb, 0x34a: 0xdc, 0x34b: 0xdd, 0x34c: 0xde, 0x34d: 0xdf, + 0x350: 0xe0, 0x351: 0xe1, 0x352: 0xe2, 0x353: 0xe3, 0x356: 0xe4, 0x357: 0xe5, + 0x358: 0xe6, 0x359: 0xe7, 0x35a: 0xe8, 0x35b: 0xe9, 0x35c: 0xea, + 0x362: 0xeb, 0x363: 0xec, + 0x368: 0xed, 0x369: 0xee, 0x36a: 0xef, 0x36b: 0xf0, + 0x370: 0xf1, 0x371: 0xf2, 0x372: 0xf3, 0x374: 0xf4, 0x375: 0xf5, + // Block 0xe, offset 0x380 + 0x380: 0x23, 0x381: 0x23, 0x382: 0x23, 0x383: 0x23, 0x384: 0x23, 0x385: 0x23, 0x386: 0x23, 0x387: 0x23, + 0x388: 0x23, 0x389: 0x23, 0x38a: 0x23, 0x38b: 0x23, 0x38c: 0x23, 0x38d: 0x23, 0x38e: 0xf6, + 0x390: 0x23, 0x391: 0xf7, 0x392: 0x23, 0x393: 0x23, 0x394: 0x23, 0x395: 0xf8, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x23, 0x3c1: 0x23, 0x3c2: 0x23, 0x3c3: 0x23, 0x3c4: 0x23, 0x3c5: 0x23, 0x3c6: 0x23, 0x3c7: 0x23, + 0x3c8: 0x23, 0x3c9: 0x23, 0x3ca: 0x23, 0x3cb: 0x23, 0x3cc: 0x23, 0x3cd: 0x23, 0x3ce: 0x23, 0x3cf: 0x23, + 0x3d0: 0xf7, + // Block 0x10, offset 0x400 + 0x410: 0x23, 0x411: 0x23, 0x412: 0x23, 0x413: 0x23, 0x414: 0x23, 0x415: 0x23, 0x416: 0x23, 0x417: 0x23, + 0x418: 0x23, 0x419: 0xf9, + // Block 0x11, offset 0x440 + 0x460: 0x23, 0x461: 0x23, 0x462: 0x23, 0x463: 0x23, 0x464: 0x23, 0x465: 0x23, 0x466: 0x23, 0x467: 0x23, + 0x468: 0xf0, 0x469: 0xfa, 0x46b: 0xfb, 0x46c: 0xfc, 0x46d: 0xfd, 0x46e: 0xfe, + 0x47c: 0x23, 0x47d: 0xff, 0x47e: 0x100, 0x47f: 0x101, + // Block 0x12, offset 0x480 + 0x4b0: 0x23, 0x4b1: 0x102, 0x4b2: 0x103, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x104, 0x4c6: 0x105, + 0x4c9: 0x106, + 0x4d0: 0x107, 0x4d1: 0x108, 0x4d2: 0x109, 0x4d3: 0x10a, 0x4d4: 0x10b, 0x4d5: 0x10c, 0x4d6: 0x10d, 0x4d7: 0x10e, + 0x4d8: 0x10f, 0x4d9: 0x110, 0x4da: 0x111, 0x4db: 0x112, 0x4dc: 0x113, 0x4dd: 0x114, 0x4de: 0x115, 0x4df: 0x116, + 0x4e8: 0x117, 0x4e9: 0x118, 0x4ea: 0x119, + // Block 0x14, offset 0x500 + 0x500: 0x11a, + 0x520: 0x23, 0x521: 0x23, 0x522: 0x23, 0x523: 0x11b, 0x524: 0x10, 0x525: 0x11c, + 0x538: 0x11d, 0x539: 0x11, 0x53a: 0x11e, + // Block 0x15, offset 0x540 + 0x544: 0x11f, 0x545: 0x120, 0x546: 0x121, + 0x54f: 0x122, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x123, 0x5c1: 0x124, 0x5c4: 0x124, 0x5c5: 0x124, 0x5c6: 0x124, 0x5c7: 0x125, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 277 entries, 554 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x35, 0x38, 0x3c, 0x3f, 0x43, 0x4d, 0x4f, 0x54, 0x64, 0x6b, 0x70, 0x7e, 0x7f, 0x8d, 0x9c, 0xa6, 0xa9, 0xaf, 0xb7, 0xba, 0xbc, 0xca, 0xd0, 0xde, 0xe9, 0xf5, 0x100, 0x10c, 0x116, 0x122, 0x12d, 0x139, 0x145, 0x14d, 0x155, 0x15f, 0x16a, 0x176, 0x17d, 0x188, 0x18d, 0x195, 0x198, 0x19d, 0x1a1, 0x1a5, 0x1ac, 0x1b5, 0x1bd, 0x1be, 0x1c7, 0x1ce, 0x1d6, 0x1dc, 0x1e2, 0x1e7, 0x1eb, 0x1ee, 0x1f0, 0x1f3, 0x1f8, 0x1f9, 0x1fb, 0x1fd, 0x1ff, 0x206, 0x20b, 0x20f, 0x218, 0x21b, 0x21e, 0x224, 0x225, 0x230, 0x231, 0x232, 0x237, 0x244, 0x24c, 0x254, 0x25d, 0x266, 0x26f, 0x274, 0x277, 0x280, 0x28d, 0x28f, 0x296, 0x298, 0x2a4, 0x2a5, 0x2b0, 0x2b8, 0x2c0, 0x2c6, 0x2c7, 0x2d5, 0x2da, 0x2dd, 0x2e2, 0x2e6, 0x2ec, 0x2f1, 0x2f4, 0x2f9, 0x2fe, 0x2ff, 0x305, 0x307, 0x308, 0x30a, 0x30c, 0x30f, 0x310, 0x312, 0x315, 0x31b, 0x31f, 0x321, 0x326, 0x32d, 0x331, 0x33a, 0x33b, 0x343, 0x347, 0x34c, 0x354, 0x35a, 0x360, 0x36a, 0x36f, 0x378, 0x37e, 0x385, 0x389, 0x391, 0x393, 0x395, 0x398, 0x39a, 0x39c, 0x39d, 0x39e, 0x3a0, 0x3a2, 0x3a8, 0x3ad, 0x3af, 0x3b5, 0x3b8, 0x3ba, 0x3c0, 0x3c5, 0x3c7, 0x3c8, 0x3c9, 0x3ca, 0x3cc, 0x3ce, 0x3d0, 0x3d3, 0x3d5, 0x3d8, 0x3e0, 0x3e3, 0x3e7, 0x3ef, 0x3f1, 0x3f2, 0x3f3, 0x3f5, 0x3fb, 0x3fd, 0x3fe, 0x400, 0x402, 0x404, 0x411, 0x412, 0x413, 0x417, 0x419, 0x41a, 0x41b, 0x41c, 0x41d, 0x421, 0x425, 0x42b, 0x42d, 0x434, 0x437, 0x43b, 0x441, 0x44a, 0x450, 0x456, 0x460, 0x46a, 0x46c, 0x473, 0x479, 0x47f, 0x485, 0x488, 0x48e, 0x491, 0x499, 0x49a, 0x4a1, 0x4a2, 0x4a5, 0x4af, 0x4b5, 0x4bb, 0x4bc, 0x4c2, 0x4c5, 0x4cd, 0x4d4, 0x4db, 0x4dc, 0x4dd, 0x4de, 0x4df, 0x4e1, 0x4e3, 0x4e5, 0x4e9, 0x4ea, 0x4ec, 0x4ed, 0x4ee, 0x4f0, 0x4f5, 0x4fa, 0x4fe, 0x4ff, 0x502, 0x506, 0x511, 0x515, 0x51d, 0x522, 0x526, 0x529, 0x52d, 0x530, 0x533, 0x538, 0x53c, 0x540, 0x544, 0x548, 0x54a, 0x54c, 0x54f, 0x554, 0x556, 0x55b, 0x564, 0x569, 0x56a, 0x56d, 0x56e, 0x56f, 0x571, 0x572, 0x573} + +// sparseValues: 1395 entries, 5580 bytes +var sparseValues = [1395]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xbf}, + // Block 0x6, offset 0x35 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x38 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3c + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3f + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x43 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4d + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4f + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x54 + {value: 0x6852, lo: 0x80, hi: 0x86}, + {value: 0x198a, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0024, lo: 0x92, hi: 0x95}, + {value: 0x0034, lo: 0x96, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x99}, + {value: 0x0034, lo: 0x9a, hi: 0x9b}, + {value: 0x0024, lo: 0x9c, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa7}, + {value: 0x0024, lo: 0xa8, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xbd}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x64 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xf, offset 0x6b + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x10, offset 0x70 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x11, offset 0x7e + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x12, offset 0x7f + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8d + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x14, offset 0x9c + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x15, offset 0xa6 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x16, offset 0xa9 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + // Block 0x17, offset 0xaf + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x18, offset 0xb7 + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x19, offset 0xba + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x1a, offset 0xbc + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1b, offset 0xca + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1c, offset 0xd0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1d, offset 0xde + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0xe9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x1f, offset 0xf5 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x20, offset 0x100 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x21, offset 0x10c + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x22, offset 0x116 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x23, offset 0x122 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x12d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x25, offset 0x139 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x26, offset 0x145 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x27, offset 0x14d + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x28, offset 0x155 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x29, offset 0x15f + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x2a, offset 0x16a + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2b, offset 0x176 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2c, offset 0x17d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2d, offset 0x188 + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2e, offset 0x18d + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2f, offset 0x195 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x30, offset 0x198 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x31, offset 0x19d + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x32, offset 0x1a1 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x33, offset 0x1a5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x34, offset 0x1ac + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x1b5 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x36, offset 0x1bd + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x37, offset 0x1be + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x38, offset 0x1c7 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x39, offset 0x1ce + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d6 + {value: 0x7053, lo: 0x80, hi: 0x85}, + {value: 0x7053, lo: 0x87, hi: 0x87}, + {value: 0x7053, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x3b, offset 0x1dc + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x1e2 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3d, offset 0x1e7 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3e, offset 0x1eb + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3f, offset 0x1ee + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x40, offset 0x1f0 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x41, offset 0x1f3 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x42, offset 0x1f8 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x43, offset 0x1f9 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x44, offset 0x1fb + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x45, offset 0x1fd + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x46, offset 0x1ff + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x47, offset 0x206 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x48, offset 0x20b + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x49, offset 0x20f + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x4a, offset 0x218 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x4b, offset 0x21b + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb7}, + // Block 0x4c, offset 0x21e + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x224 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4e, offset 0x225 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4f, offset 0x230 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x50, offset 0x231 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x51, offset 0x232 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x52, offset 0x237 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x53, offset 0x244 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x54, offset 0x24c + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x254 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x56, offset 0x25d + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x57, offset 0x266 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x58, offset 0x26f + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x59, offset 0x274 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x5a, offset 0x277 + {value: 0x1a6a, lo: 0x80, hi: 0x80}, + {value: 0x1aea, lo: 0x81, hi: 0x81}, + {value: 0x1b6a, lo: 0x82, hi: 0x82}, + {value: 0x1bea, lo: 0x83, hi: 0x83}, + {value: 0x1c6a, lo: 0x84, hi: 0x84}, + {value: 0x1cea, lo: 0x85, hi: 0x85}, + {value: 0x1d6a, lo: 0x86, hi: 0x86}, + {value: 0x1dea, lo: 0x87, hi: 0x87}, + {value: 0x1e6a, lo: 0x88, hi: 0x88}, + // Block 0x5b, offset 0x280 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5c, offset 0x28d + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5d, offset 0x28f + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8452, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8852, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x296 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5f, offset 0x298 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x2a4 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x2a5 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x1f1a, lo: 0x96, hi: 0x96}, + {value: 0x1fca, lo: 0x97, hi: 0x97}, + {value: 0x207a, lo: 0x98, hi: 0x98}, + {value: 0x212a, lo: 0x99, hi: 0x99}, + {value: 0x21da, lo: 0x9a, hi: 0x9a}, + {value: 0x228a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x233b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x62, offset 0x2b0 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x63, offset 0x2b8 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2c0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x2c6 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x66, offset 0x2c7 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x67, offset 0x2d5 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0x9d52, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x68, offset 0x2da + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x69, offset 0x2dd + {value: 0xa053, lo: 0xb6, hi: 0xb7}, + {value: 0xa353, lo: 0xb8, hi: 0xb9}, + {value: 0xa653, lo: 0xba, hi: 0xbb}, + {value: 0xa353, lo: 0xbc, hi: 0xbd}, + {value: 0xa053, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x2e2 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xa953, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e6 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6c, offset 0x2ec + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6d, offset 0x2f1 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6f, offset 0x2f9 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x70, offset 0x2fe + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x71, offset 0x2ff + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x72, offset 0x305 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x73, offset 0x307 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x74, offset 0x308 + {value: 0x0010, lo: 0x85, hi: 0xae}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x75, offset 0x30a + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x76, offset 0x30c + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x77, offset 0x30f + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x78, offset 0x310 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x79, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x7a, offset 0x315 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x31b + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7c, offset 0x31f + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7d, offset 0x321 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7e, offset 0x326 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8453, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7f, offset 0x32d + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x80, offset 0x331 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x81, offset 0x33a + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x82, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x83, offset 0x343 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x347 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x85, offset 0x34c + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x86, offset 0x354 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x87, offset 0x35a + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x88, offset 0x360 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x89, offset 0x36a + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x36f + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8b, offset 0x378 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37e + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xac52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x385 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x389 + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8f, offset 0x391 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x393 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x91, offset 0x395 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x92, offset 0x398 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x93, offset 0x39a + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x94, offset 0x39c + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x95, offset 0x39d + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x96, offset 0x39e + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x97, offset 0x3a0 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x98, offset 0x3a2 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x99, offset 0x3a8 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x3ad + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3af + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x3b5 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9d, offset 0x3b8 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9e, offset 0x3ba + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9f, offset 0x3c0 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x3c5 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa1, offset 0x3c7 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa2, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa3, offset 0x3c9 + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa4, offset 0x3ca + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x3cc + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa6, offset 0x3ce + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa7, offset 0x3d0 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa8, offset 0x3d3 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa9, offset 0x3d5 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xaa, offset 0x3d8 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xaf53, lo: 0x98, hi: 0x9f}, + {value: 0xb253, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e0 + {value: 0xaf52, lo: 0x80, hi: 0x87}, + {value: 0xb252, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xac, offset 0x3e3 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb253, lo: 0xb0, hi: 0xb7}, + {value: 0xaf53, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x3e7 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb252, lo: 0x98, hi: 0x9f}, + {value: 0xaf52, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xae, offset 0x3ef + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xaf, offset 0x3f1 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xb0, offset 0x3f2 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb1, offset 0x3f3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb2, offset 0x3f5 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x3fb + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb4, offset 0x3fd + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb5, offset 0x3fe + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb6, offset 0x400 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb7, offset 0x402 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb8, offset 0x404 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb3}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x411 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xba, offset 0x412 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xbb, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbc, offset 0x417 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbd, offset 0x419 + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbe, offset 0x41a + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbf, offset 0x41b + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x41c + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc1, offset 0x41d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc2, offset 0x421 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x425 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc4, offset 0x42b + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc5, offset 0x42d + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc6, offset 0x434 + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc7, offset 0x437 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43b + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xc9, offset 0x441 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xca, offset 0x44a + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcb, offset 0x450 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcc, offset 0x456 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcd, offset 0x460 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xce, offset 0x46a + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xcf, offset 0x46c + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd0, offset 0x473 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x479 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd2, offset 0x47f + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x485 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd4, offset 0x488 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x48e + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd6, offset 0x491 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd7, offset 0x499 + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd8, offset 0x49a + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd9, offset 0x4a1 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xda, offset 0x4a2 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdb, offset 0x4a5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xdc, offset 0x4af + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xdd, offset 0x4b5 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x86, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + // Block 0xde, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xdf, offset 0x4bc + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe0, offset 0x4c2 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x4c5 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xe2, offset 0x4cd + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x4d4 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xe4, offset 0x4db + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe5, offset 0x4dc + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xe6, offset 0x4dd + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xe7, offset 0x4de + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xe8, offset 0x4df + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe9, offset 0x4e1 + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xea, offset 0x4e3 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xeb, offset 0x4e5 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xec, offset 0x4e9 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xed, offset 0x4ea + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xee, offset 0x4ec + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xef, offset 0x4ed + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + // Block 0xf0, offset 0x4ee + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xf1, offset 0x4f0 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xf2, offset 0x4f5 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xf3, offset 0x4fa + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xf4, offset 0x4fe + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xf5, offset 0x4ff + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xf6, offset 0x502 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xf7, offset 0x506 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xf8, offset 0x511 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xf9, offset 0x515 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xfa, offset 0x51d + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x522 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0xfc, offset 0x526 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0xfd, offset 0x529 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0xfe, offset 0x52d + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0xff, offset 0x530 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x100, offset 0x533 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x101, offset 0x538 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x102, offset 0x53c + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x103, offset 0x540 + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x104, offset 0x544 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x105, offset 0x548 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x106, offset 0x54a + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x107, offset 0x54c + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x108, offset 0x54f + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x109, offset 0x554 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x10a, offset 0x556 + {value: 0xb552, lo: 0x80, hi: 0x81}, + {value: 0xb852, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x10b, offset 0x55b + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x10c, offset 0x564 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x10d, offset 0x569 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10e, offset 0x56a + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10f, offset 0x56d + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x110, offset 0x56e + {value: 0x0004, lo: 0xbb, hi: 0xbf}, + // Block 0x111, offset 0x56f + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x112, offset 0x571 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x113, offset 0x572 + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 14177 bytes (13KiB); checksum: F17D40E8 diff --git a/vendor/golang.org/x/text/cases/tables11.0.0.go b/vendor/golang.org/x/text/cases/tables11.0.0.go new file mode 100644 index 00000000000..b1106b41713 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables11.0.0.go @@ -0,0 +1,2317 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.13 && !go1.14 +// +build go1.13,!go1.14 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +var xorData string = "" + // Size: 188 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x001\x00\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a" + + "\x00\x02:\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&" + + "\x00\x01*\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00" + + "\x01\x22" + +var exceptions string = "" + // Size: 2436 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ιΙΙ\x166ΐ" + + "Ϊ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12φΦΦ\x12" + + "\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x10\x1bᲐა\x10\x1bᲑბ" + + "\x10\x1bᲒგ\x10\x1bᲓდ\x10\x1bᲔე\x10\x1bᲕვ\x10\x1bᲖზ\x10\x1bᲗთ\x10\x1bᲘი" + + "\x10\x1bᲙკ\x10\x1bᲚლ\x10\x1bᲛმ\x10\x1bᲜნ\x10\x1bᲝო\x10\x1bᲞპ\x10\x1bᲟჟ" + + "\x10\x1bᲠრ\x10\x1bᲡს\x10\x1bᲢტ\x10\x1bᲣუ\x10\x1bᲤფ\x10\x1bᲥქ\x10\x1bᲦღ" + + "\x10\x1bᲧყ\x10\x1bᲨშ\x10\x1bᲩჩ\x10\x1bᲪც\x10\x1bᲫძ\x10\x1bᲬწ\x10\x1bᲭჭ" + + "\x10\x1bᲮხ\x10\x1bᲯჯ\x10\x1bᲰჰ\x10\x1bᲱჱ\x10\x1bᲲჲ\x10\x1bᲳჳ\x10\x1bᲴჴ" + + "\x10\x1bᲵჵ\x10\x1bᲶჶ\x10\x1bᲷჷ\x10\x1bᲸჸ\x10\x1bᲹჹ\x10\x1bᲺჺ\x10\x1bᲽჽ" + + "\x10\x1bᲾჾ\x10\x1bᲿჿ\x12\x12вВВ\x12\x12дДД\x12\x12оОО\x12\x12сСС\x12\x12" + + "тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13\x1bꙋꙊꙊ\x13\x1bẖH̱H̱\x13\x1bẗ" + + "T̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1baʾAʾAʾ\x13\x1bṡṠṠ\x12\x10ssß\x14" + + "$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ" + + "\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈ" + + "Ι\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15" + + "\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ" + + "\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨΙ\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ" + + "\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠι" + + "ὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧι" + + "ὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ" + + "\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰιᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ" + + "\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x12ιΙΙ\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ" + + "\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ\x166ῒΪ̀Ϊ̀\x166ΐΙ" + + "̈́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ̀\x166ΰΫ́Ϋ́\x14$ῤΡ̓Ρ̓" + + "\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ\x14$ῶΩ͂Ω͂\x16" + + "6ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12\x10ɫɫ\x12\x10ɽ" + + "ɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ" + + "\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ" + + "\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFFFf\x12\x12fiFIFi\x12\x12flFLFl" + + "\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12stSTSt\x12\x12stSTSt\x14$մնՄՆՄ" + + "ն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 12250 bytes (11.96 KiB). Checksum: 53ff6cb7321675e1. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 20: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 20 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 22 blocks, 1408 entries, 2816 bytes +// The third block is the zero block. +var caseValues = [1408]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x110a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x118a, + 0x19e: 0x120a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x128d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x130a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x144a, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x158a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x160a, 0x251: 0x168a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x170a, 0x256: 0x178a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x180a, 0x271: 0x188a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x190a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x6852, 0x281: 0x6852, 0x282: 0x6852, 0x283: 0x6852, 0x284: 0x6852, 0x285: 0x6852, + 0x286: 0x6852, 0x287: 0x198a, 0x288: 0x0012, + 0x291: 0x0034, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0034, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0034, 0x29b: 0x0034, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, 0x2a0: 0x0024, 0x2a1: 0x0024, 0x2a2: 0x0034, 0x2a3: 0x0034, + 0x2a4: 0x0034, 0x2a5: 0x0034, 0x2a6: 0x0034, 0x2a7: 0x0034, 0x2a8: 0x0024, 0x2a9: 0x0024, + 0x2aa: 0x0034, 0x2ab: 0x0024, 0x2ac: 0x0024, 0x2ad: 0x0034, 0x2ae: 0x0034, 0x2af: 0x0024, + 0x2b0: 0x0034, 0x2b1: 0x0034, 0x2b2: 0x0034, 0x2b3: 0x0034, 0x2b4: 0x0034, 0x2b5: 0x0034, + 0x2b6: 0x0034, 0x2b7: 0x0034, 0x2b8: 0x0034, 0x2b9: 0x0034, 0x2ba: 0x0034, 0x2bb: 0x0034, + 0x2bc: 0x0034, 0x2bd: 0x0034, 0x2bf: 0x0034, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x7053, 0x2c1: 0x7053, 0x2c2: 0x7053, 0x2c3: 0x7053, 0x2c4: 0x7053, 0x2c5: 0x7053, + 0x2c7: 0x7053, + 0x2cd: 0x7053, 0x2d0: 0x1a6a, 0x2d1: 0x1aea, + 0x2d2: 0x1b6a, 0x2d3: 0x1bea, 0x2d4: 0x1c6a, 0x2d5: 0x1cea, 0x2d6: 0x1d6a, 0x2d7: 0x1dea, + 0x2d8: 0x1e6a, 0x2d9: 0x1eea, 0x2da: 0x1f6a, 0x2db: 0x1fea, 0x2dc: 0x206a, 0x2dd: 0x20ea, + 0x2de: 0x216a, 0x2df: 0x21ea, 0x2e0: 0x226a, 0x2e1: 0x22ea, 0x2e2: 0x236a, 0x2e3: 0x23ea, + 0x2e4: 0x246a, 0x2e5: 0x24ea, 0x2e6: 0x256a, 0x2e7: 0x25ea, 0x2e8: 0x266a, 0x2e9: 0x26ea, + 0x2ea: 0x276a, 0x2eb: 0x27ea, 0x2ec: 0x286a, 0x2ed: 0x28ea, 0x2ee: 0x296a, 0x2ef: 0x29ea, + 0x2f0: 0x2a6a, 0x2f1: 0x2aea, 0x2f2: 0x2b6a, 0x2f3: 0x2bea, 0x2f4: 0x2c6a, 0x2f5: 0x2cea, + 0x2f6: 0x2d6a, 0x2f7: 0x2dea, 0x2f8: 0x2e6a, 0x2f9: 0x2eea, 0x2fa: 0x2f6a, + 0x2fc: 0x0014, 0x2fd: 0x2fea, 0x2fe: 0x306a, 0x2ff: 0x30ea, + // Block 0xc, offset 0x300 + 0x300: 0x0812, 0x301: 0x0812, 0x302: 0x0812, 0x303: 0x0812, 0x304: 0x0812, 0x305: 0x0812, + 0x308: 0x0813, 0x309: 0x0813, 0x30a: 0x0813, 0x30b: 0x0813, + 0x30c: 0x0813, 0x30d: 0x0813, 0x310: 0x3a9a, 0x311: 0x0812, + 0x312: 0x3b7a, 0x313: 0x0812, 0x314: 0x3cba, 0x315: 0x0812, 0x316: 0x3dfa, 0x317: 0x0812, + 0x319: 0x0813, 0x31b: 0x0813, 0x31d: 0x0813, + 0x31f: 0x0813, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x0812, 0x323: 0x0812, + 0x324: 0x0812, 0x325: 0x0812, 0x326: 0x0812, 0x327: 0x0812, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x0813, 0x32b: 0x0813, 0x32c: 0x0813, 0x32d: 0x0813, 0x32e: 0x0813, 0x32f: 0x0813, + 0x330: 0x8e52, 0x331: 0x8e52, 0x332: 0x9152, 0x333: 0x9152, 0x334: 0x9452, 0x335: 0x9452, + 0x336: 0x9752, 0x337: 0x9752, 0x338: 0x9a52, 0x339: 0x9a52, 0x33a: 0x9d52, 0x33b: 0x9d52, + 0x33c: 0x4d52, 0x33d: 0x4d52, + // Block 0xd, offset 0x340 + 0x340: 0x3f3a, 0x341: 0x402a, 0x342: 0x411a, 0x343: 0x420a, 0x344: 0x42fa, 0x345: 0x43ea, + 0x346: 0x44da, 0x347: 0x45ca, 0x348: 0x46b9, 0x349: 0x47a9, 0x34a: 0x4899, 0x34b: 0x4989, + 0x34c: 0x4a79, 0x34d: 0x4b69, 0x34e: 0x4c59, 0x34f: 0x4d49, 0x350: 0x4e3a, 0x351: 0x4f2a, + 0x352: 0x501a, 0x353: 0x510a, 0x354: 0x51fa, 0x355: 0x52ea, 0x356: 0x53da, 0x357: 0x54ca, + 0x358: 0x55b9, 0x359: 0x56a9, 0x35a: 0x5799, 0x35b: 0x5889, 0x35c: 0x5979, 0x35d: 0x5a69, + 0x35e: 0x5b59, 0x35f: 0x5c49, 0x360: 0x5d3a, 0x361: 0x5e2a, 0x362: 0x5f1a, 0x363: 0x600a, + 0x364: 0x60fa, 0x365: 0x61ea, 0x366: 0x62da, 0x367: 0x63ca, 0x368: 0x64b9, 0x369: 0x65a9, + 0x36a: 0x6699, 0x36b: 0x6789, 0x36c: 0x6879, 0x36d: 0x6969, 0x36e: 0x6a59, 0x36f: 0x6b49, + 0x370: 0x0812, 0x371: 0x0812, 0x372: 0x6c3a, 0x373: 0x6d4a, 0x374: 0x6e1a, + 0x376: 0x6efa, 0x377: 0x6fda, 0x378: 0x0813, 0x379: 0x0813, 0x37a: 0x8e53, 0x37b: 0x8e53, + 0x37c: 0x7119, 0x37d: 0x0004, 0x37e: 0x71ea, 0x37f: 0x0004, + // Block 0xe, offset 0x380 + 0x380: 0x0004, 0x381: 0x0004, 0x382: 0x726a, 0x383: 0x737a, 0x384: 0x744a, + 0x386: 0x752a, 0x387: 0x760a, 0x388: 0x9153, 0x389: 0x9153, 0x38a: 0x9453, 0x38b: 0x9453, + 0x38c: 0x7749, 0x38d: 0x0004, 0x38e: 0x0004, 0x38f: 0x0004, 0x390: 0x0812, 0x391: 0x0812, + 0x392: 0x781a, 0x393: 0x795a, 0x396: 0x7a9a, 0x397: 0x7b7a, + 0x398: 0x0813, 0x399: 0x0813, 0x39a: 0x9753, 0x39b: 0x9753, 0x39d: 0x0004, + 0x39e: 0x0004, 0x39f: 0x0004, 0x3a0: 0x0812, 0x3a1: 0x0812, 0x3a2: 0x7cba, 0x3a3: 0x7dfa, + 0x3a4: 0x7f3a, 0x3a5: 0x0912, 0x3a6: 0x801a, 0x3a7: 0x80fa, 0x3a8: 0x0813, 0x3a9: 0x0813, + 0x3aa: 0x9d53, 0x3ab: 0x9d53, 0x3ac: 0x0913, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, + 0x3b2: 0x823a, 0x3b3: 0x834a, 0x3b4: 0x841a, + 0x3b6: 0x84fa, 0x3b7: 0x85da, 0x3b8: 0x9a53, 0x3b9: 0x9a53, 0x3ba: 0x4d53, 0x3bb: 0x4d53, + 0x3bc: 0x8719, 0x3bd: 0x0004, 0x3be: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3c2: 0x0013, + 0x3c7: 0x0013, 0x3ca: 0x0012, 0x3cb: 0x0013, + 0x3cc: 0x0013, 0x3cd: 0x0013, 0x3ce: 0x0012, 0x3cf: 0x0012, 0x3d0: 0x0013, 0x3d1: 0x0013, + 0x3d2: 0x0013, 0x3d3: 0x0012, 0x3d5: 0x0013, + 0x3d9: 0x0013, 0x3da: 0x0013, 0x3db: 0x0013, 0x3dc: 0x0013, 0x3dd: 0x0013, + 0x3e4: 0x0013, 0x3e6: 0x87eb, 0x3e8: 0x0013, + 0x3ea: 0x884b, 0x3eb: 0x888b, 0x3ec: 0x0013, 0x3ed: 0x0013, 0x3ef: 0x0012, + 0x3f0: 0x0013, 0x3f1: 0x0013, 0x3f2: 0xa053, 0x3f3: 0x0013, 0x3f4: 0x0012, 0x3f5: 0x0010, + 0x3f6: 0x0010, 0x3f7: 0x0010, 0x3f8: 0x0010, 0x3f9: 0x0012, + 0x3fc: 0x0012, 0x3fd: 0x0012, 0x3fe: 0x0013, 0x3ff: 0x0013, + // Block 0x10, offset 0x400 + 0x400: 0x1a13, 0x401: 0x1a13, 0x402: 0x1e13, 0x403: 0x1e13, 0x404: 0x1a13, 0x405: 0x1a13, + 0x406: 0x2613, 0x407: 0x2613, 0x408: 0x2a13, 0x409: 0x2a13, 0x40a: 0x2e13, 0x40b: 0x2e13, + 0x40c: 0x2a13, 0x40d: 0x2a13, 0x40e: 0x2613, 0x40f: 0x2613, 0x410: 0xa352, 0x411: 0xa352, + 0x412: 0xa652, 0x413: 0xa652, 0x414: 0xa952, 0x415: 0xa952, 0x416: 0xa652, 0x417: 0xa652, + 0x418: 0xa352, 0x419: 0xa352, 0x41a: 0x1a12, 0x41b: 0x1a12, 0x41c: 0x1e12, 0x41d: 0x1e12, + 0x41e: 0x1a12, 0x41f: 0x1a12, 0x420: 0x2612, 0x421: 0x2612, 0x422: 0x2a12, 0x423: 0x2a12, + 0x424: 0x2e12, 0x425: 0x2e12, 0x426: 0x2a12, 0x427: 0x2a12, 0x428: 0x2612, 0x429: 0x2612, + // Block 0x11, offset 0x440 + 0x440: 0x6552, 0x441: 0x6552, 0x442: 0x6552, 0x443: 0x6552, 0x444: 0x6552, 0x445: 0x6552, + 0x446: 0x6552, 0x447: 0x6552, 0x448: 0x6552, 0x449: 0x6552, 0x44a: 0x6552, 0x44b: 0x6552, + 0x44c: 0x6552, 0x44d: 0x6552, 0x44e: 0x6552, 0x44f: 0x6552, 0x450: 0xac52, 0x451: 0xac52, + 0x452: 0xac52, 0x453: 0xac52, 0x454: 0xac52, 0x455: 0xac52, 0x456: 0xac52, 0x457: 0xac52, + 0x458: 0xac52, 0x459: 0xac52, 0x45a: 0xac52, 0x45b: 0xac52, 0x45c: 0xac52, 0x45d: 0xac52, + 0x45e: 0xac52, 0x460: 0x0113, 0x461: 0x0112, 0x462: 0x88eb, 0x463: 0x8b53, + 0x464: 0x894b, 0x465: 0x89aa, 0x466: 0x8a0a, 0x467: 0x0f13, 0x468: 0x0f12, 0x469: 0x0313, + 0x46a: 0x0312, 0x46b: 0x0713, 0x46c: 0x0712, 0x46d: 0x8a6b, 0x46e: 0x8acb, 0x46f: 0x8b2b, + 0x470: 0x8b8b, 0x471: 0x0012, 0x472: 0x0113, 0x473: 0x0112, 0x474: 0x0012, 0x475: 0x0313, + 0x476: 0x0312, 0x477: 0x0012, 0x478: 0x0012, 0x479: 0x0012, 0x47a: 0x0012, 0x47b: 0x0012, + 0x47c: 0x0015, 0x47d: 0x0015, 0x47e: 0x8beb, 0x47f: 0x8c4b, + // Block 0x12, offset 0x480 + 0x480: 0x0113, 0x481: 0x0112, 0x482: 0x0113, 0x483: 0x0112, 0x484: 0x0113, 0x485: 0x0112, + 0x486: 0x0113, 0x487: 0x0112, 0x488: 0x0014, 0x489: 0x0014, 0x48a: 0x0014, 0x48b: 0x0713, + 0x48c: 0x0712, 0x48d: 0x8cab, 0x48e: 0x0012, 0x48f: 0x0010, 0x490: 0x0113, 0x491: 0x0112, + 0x492: 0x0113, 0x493: 0x0112, 0x494: 0x0012, 0x495: 0x0012, 0x496: 0x0113, 0x497: 0x0112, + 0x498: 0x0113, 0x499: 0x0112, 0x49a: 0x0113, 0x49b: 0x0112, 0x49c: 0x0113, 0x49d: 0x0112, + 0x49e: 0x0113, 0x49f: 0x0112, 0x4a0: 0x0113, 0x4a1: 0x0112, 0x4a2: 0x0113, 0x4a3: 0x0112, + 0x4a4: 0x0113, 0x4a5: 0x0112, 0x4a6: 0x0113, 0x4a7: 0x0112, 0x4a8: 0x0113, 0x4a9: 0x0112, + 0x4aa: 0x8d0b, 0x4ab: 0x8d6b, 0x4ac: 0x8dcb, 0x4ad: 0x8e2b, 0x4ae: 0x8e8b, 0x4af: 0x0012, + 0x4b0: 0x8eeb, 0x4b1: 0x8f4b, 0x4b2: 0x8fab, 0x4b3: 0xaf53, 0x4b4: 0x0113, 0x4b5: 0x0112, + 0x4b6: 0x0113, 0x4b7: 0x0112, 0x4b8: 0x0113, 0x4b9: 0x0112, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x900a, 0x4c1: 0x908a, 0x4c2: 0x910a, 0x4c3: 0x918a, 0x4c4: 0x923a, 0x4c5: 0x92ea, + 0x4c6: 0x936a, + 0x4d3: 0x93ea, 0x4d4: 0x94ca, 0x4d5: 0x95aa, 0x4d6: 0x968a, 0x4d7: 0x976a, + 0x4dd: 0x0010, + 0x4de: 0x0034, 0x4df: 0x0010, 0x4e0: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, 0x4e3: 0x0010, + 0x4e4: 0x0010, 0x4e5: 0x0010, 0x4e6: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, + 0x4ea: 0x0010, 0x4eb: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f3: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f8: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, + // Block 0x14, offset 0x500 + 0x500: 0x2213, 0x501: 0x2213, 0x502: 0x2613, 0x503: 0x2613, 0x504: 0x2213, 0x505: 0x2213, + 0x506: 0x2e13, 0x507: 0x2e13, 0x508: 0x2213, 0x509: 0x2213, 0x50a: 0x2613, 0x50b: 0x2613, + 0x50c: 0x2213, 0x50d: 0x2213, 0x50e: 0x3e13, 0x50f: 0x3e13, 0x510: 0x2213, 0x511: 0x2213, + 0x512: 0x2613, 0x513: 0x2613, 0x514: 0x2213, 0x515: 0x2213, 0x516: 0x2e13, 0x517: 0x2e13, + 0x518: 0x2213, 0x519: 0x2213, 0x51a: 0x2613, 0x51b: 0x2613, 0x51c: 0x2213, 0x51d: 0x2213, + 0x51e: 0xb853, 0x51f: 0xb853, 0x520: 0xbb53, 0x521: 0xbb53, 0x522: 0x2212, 0x523: 0x2212, + 0x524: 0x2612, 0x525: 0x2612, 0x526: 0x2212, 0x527: 0x2212, 0x528: 0x2e12, 0x529: 0x2e12, + 0x52a: 0x2212, 0x52b: 0x2212, 0x52c: 0x2612, 0x52d: 0x2612, 0x52e: 0x2212, 0x52f: 0x2212, + 0x530: 0x3e12, 0x531: 0x3e12, 0x532: 0x2212, 0x533: 0x2212, 0x534: 0x2612, 0x535: 0x2612, + 0x536: 0x2212, 0x537: 0x2212, 0x538: 0x2e12, 0x539: 0x2e12, 0x53a: 0x2212, 0x53b: 0x2212, + 0x53c: 0x2612, 0x53d: 0x2612, 0x53e: 0x2212, 0x53f: 0x2212, + // Block 0x15, offset 0x540 + 0x542: 0x0010, + 0x547: 0x0010, 0x549: 0x0010, 0x54b: 0x0010, + 0x54d: 0x0010, 0x54e: 0x0010, 0x54f: 0x0010, 0x551: 0x0010, + 0x552: 0x0010, 0x554: 0x0010, 0x557: 0x0010, + 0x559: 0x0010, 0x55b: 0x0010, 0x55d: 0x0010, + 0x55f: 0x0010, 0x561: 0x0010, 0x562: 0x0010, + 0x564: 0x0010, 0x567: 0x0010, 0x568: 0x0010, 0x569: 0x0010, + 0x56a: 0x0010, 0x56c: 0x0010, 0x56d: 0x0010, 0x56e: 0x0010, 0x56f: 0x0010, + 0x570: 0x0010, 0x571: 0x0010, 0x572: 0x0010, 0x574: 0x0010, 0x575: 0x0010, + 0x576: 0x0010, 0x577: 0x0010, 0x579: 0x0010, 0x57a: 0x0010, 0x57b: 0x0010, + 0x57c: 0x0010, 0x57e: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x14, 0xc3: 0x15, 0xc4: 0x16, 0xc5: 0x17, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x18, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x19, 0xcc: 0x1a, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x1b, 0xd1: 0x1c, 0xd2: 0x1d, 0xd3: 0x1e, 0xd4: 0x1f, 0xd5: 0x20, 0xd6: 0x08, 0xd7: 0x21, + 0xd8: 0x22, 0xd9: 0x23, 0xda: 0x24, 0xdb: 0x25, 0xdc: 0x26, 0xdd: 0x27, 0xde: 0x28, 0xdf: 0x29, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x2a, 0x121: 0x2b, 0x122: 0x2c, 0x123: 0x2d, 0x124: 0x2e, 0x125: 0x2f, 0x126: 0x30, 0x127: 0x31, + 0x128: 0x32, 0x129: 0x33, 0x12a: 0x34, 0x12b: 0x35, 0x12c: 0x36, 0x12d: 0x37, 0x12e: 0x38, 0x12f: 0x39, + 0x130: 0x3a, 0x131: 0x3b, 0x132: 0x3c, 0x133: 0x3d, 0x134: 0x3e, 0x135: 0x3f, 0x136: 0x40, 0x137: 0x41, + 0x138: 0x42, 0x139: 0x43, 0x13a: 0x44, 0x13b: 0x45, 0x13c: 0x46, 0x13d: 0x47, 0x13e: 0x48, 0x13f: 0x49, + // Block 0x5, offset 0x140 + 0x140: 0x4a, 0x141: 0x4b, 0x142: 0x4c, 0x143: 0x09, 0x144: 0x24, 0x145: 0x24, 0x146: 0x24, 0x147: 0x24, + 0x148: 0x24, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x24, 0x152: 0x24, 0x153: 0x24, 0x154: 0x24, 0x155: 0x24, 0x156: 0x24, 0x157: 0x24, + 0x158: 0x24, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x0a, 0x17e: 0x0b, 0x17f: 0x0c, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0d, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0e, + 0x1b0: 0x7c, 0x1b1: 0x0f, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x24, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x24, 0x202: 0x24, 0x203: 0x24, 0x204: 0x24, 0x205: 0x24, 0x206: 0x24, 0x207: 0x24, + 0x208: 0x24, 0x209: 0x24, 0x20a: 0x24, 0x20b: 0x24, 0x20c: 0x24, 0x20d: 0x24, 0x20e: 0x24, 0x20f: 0x24, + 0x210: 0x24, 0x211: 0x24, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x24, 0x215: 0x24, 0x216: 0x24, 0x217: 0x24, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x10, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x24, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x24, 0x231: 0x24, 0x232: 0x24, 0x233: 0x24, 0x234: 0x24, 0x235: 0x24, 0x236: 0x24, 0x237: 0x24, + 0x238: 0x24, 0x239: 0x24, 0x23a: 0x24, 0x23b: 0x24, 0x23c: 0x24, 0x23d: 0x24, 0x23e: 0x24, 0x23f: 0x24, + // Block 0x9, offset 0x240 + 0x240: 0x24, 0x241: 0x24, 0x242: 0x24, 0x243: 0x24, 0x244: 0x24, 0x245: 0x24, 0x246: 0x24, 0x247: 0x24, + 0x248: 0x24, 0x249: 0x24, 0x24a: 0x24, 0x24b: 0x24, 0x24c: 0x24, 0x24d: 0x24, 0x24e: 0x24, 0x24f: 0x24, + 0x250: 0x24, 0x251: 0x24, 0x252: 0x24, 0x253: 0x24, 0x254: 0x24, 0x255: 0x24, 0x256: 0x24, 0x257: 0x24, + 0x258: 0x24, 0x259: 0x24, 0x25a: 0x24, 0x25b: 0x24, 0x25c: 0x24, 0x25d: 0x24, 0x25e: 0x24, 0x25f: 0x24, + 0x260: 0x24, 0x261: 0x24, 0x262: 0x24, 0x263: 0x24, 0x264: 0x24, 0x265: 0x24, 0x266: 0x24, 0x267: 0x24, + 0x268: 0x24, 0x269: 0x24, 0x26a: 0x24, 0x26b: 0x24, 0x26c: 0x24, 0x26d: 0x24, 0x26e: 0x24, 0x26f: 0x24, + 0x270: 0x24, 0x271: 0x24, 0x272: 0x24, 0x273: 0x24, 0x274: 0x24, 0x275: 0x24, 0x276: 0x24, 0x277: 0x24, + 0x278: 0x24, 0x279: 0x24, 0x27a: 0x24, 0x27b: 0x24, 0x27c: 0x24, 0x27d: 0x24, 0x27e: 0x24, 0x27f: 0x24, + // Block 0xa, offset 0x280 + 0x280: 0x24, 0x281: 0x24, 0x282: 0x24, 0x283: 0x24, 0x284: 0x24, 0x285: 0x24, 0x286: 0x24, 0x287: 0x24, + 0x288: 0x24, 0x289: 0x24, 0x28a: 0x24, 0x28b: 0x24, 0x28c: 0x24, 0x28d: 0x24, 0x28e: 0x24, 0x28f: 0x24, + 0x290: 0x24, 0x291: 0x24, 0x292: 0x24, 0x293: 0x24, 0x294: 0x24, 0x295: 0x24, 0x296: 0x24, 0x297: 0x24, + 0x298: 0x24, 0x299: 0x24, 0x29a: 0x24, 0x29b: 0x24, 0x29c: 0x24, 0x29d: 0x24, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x11, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x24, 0x2f1: 0x24, 0x2f2: 0x24, 0x2f3: 0x24, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x24, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x24, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x24, 0x319: 0x24, 0x31a: 0x24, 0x31b: 0x24, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x24, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, 0x334: 0xd3, + 0x33c: 0xd4, 0x33d: 0xd5, + // Block 0xd, offset 0x340 + 0x340: 0xd6, 0x341: 0xd7, 0x342: 0xd8, 0x343: 0xd9, 0x344: 0xda, 0x345: 0xdb, 0x346: 0xdc, 0x347: 0xdd, + 0x348: 0xde, 0x34a: 0xdf, 0x34b: 0xe0, 0x34c: 0xe1, 0x34d: 0xe2, + 0x350: 0xe3, 0x351: 0xe4, 0x352: 0xe5, 0x353: 0xe6, 0x356: 0xe7, 0x357: 0xe8, + 0x358: 0xe9, 0x359: 0xea, 0x35a: 0xeb, 0x35b: 0xec, 0x35c: 0xed, + 0x360: 0xee, 0x362: 0xef, 0x363: 0xf0, + 0x368: 0xf1, 0x369: 0xf2, 0x36a: 0xf3, 0x36b: 0xf4, + 0x370: 0xf5, 0x371: 0xf6, 0x372: 0xf7, 0x374: 0xf8, 0x375: 0xf9, 0x376: 0xfa, + 0x37b: 0xfb, + // Block 0xe, offset 0x380 + 0x380: 0x24, 0x381: 0x24, 0x382: 0x24, 0x383: 0x24, 0x384: 0x24, 0x385: 0x24, 0x386: 0x24, 0x387: 0x24, + 0x388: 0x24, 0x389: 0x24, 0x38a: 0x24, 0x38b: 0x24, 0x38c: 0x24, 0x38d: 0x24, 0x38e: 0xfc, + 0x390: 0x24, 0x391: 0xfd, 0x392: 0x24, 0x393: 0x24, 0x394: 0x24, 0x395: 0xfe, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x24, 0x3c1: 0x24, 0x3c2: 0x24, 0x3c3: 0x24, 0x3c4: 0x24, 0x3c5: 0x24, 0x3c6: 0x24, 0x3c7: 0x24, + 0x3c8: 0x24, 0x3c9: 0x24, 0x3ca: 0x24, 0x3cb: 0x24, 0x3cc: 0x24, 0x3cd: 0x24, 0x3ce: 0x24, 0x3cf: 0x24, + 0x3d0: 0xfd, + // Block 0x10, offset 0x400 + 0x410: 0x24, 0x411: 0x24, 0x412: 0x24, 0x413: 0x24, 0x414: 0x24, 0x415: 0x24, 0x416: 0x24, 0x417: 0x24, + 0x418: 0x24, 0x419: 0xff, + // Block 0x11, offset 0x440 + 0x460: 0x24, 0x461: 0x24, 0x462: 0x24, 0x463: 0x24, 0x464: 0x24, 0x465: 0x24, 0x466: 0x24, 0x467: 0x24, + 0x468: 0xf4, 0x469: 0x100, 0x46b: 0x101, 0x46c: 0x102, 0x46d: 0x103, 0x46e: 0x104, + 0x479: 0x105, 0x47c: 0x24, 0x47d: 0x106, 0x47e: 0x107, 0x47f: 0x108, + // Block 0x12, offset 0x480 + 0x4b0: 0x24, 0x4b1: 0x109, 0x4b2: 0x10a, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x10b, 0x4c6: 0x10c, + 0x4c9: 0x10d, + 0x4d0: 0x10e, 0x4d1: 0x10f, 0x4d2: 0x110, 0x4d3: 0x111, 0x4d4: 0x112, 0x4d5: 0x113, 0x4d6: 0x114, 0x4d7: 0x115, + 0x4d8: 0x116, 0x4d9: 0x117, 0x4da: 0x118, 0x4db: 0x119, 0x4dc: 0x11a, 0x4dd: 0x11b, 0x4de: 0x11c, 0x4df: 0x11d, + 0x4e8: 0x11e, 0x4e9: 0x11f, 0x4ea: 0x120, + // Block 0x14, offset 0x500 + 0x500: 0x121, + 0x520: 0x24, 0x521: 0x24, 0x522: 0x24, 0x523: 0x122, 0x524: 0x12, 0x525: 0x123, + 0x538: 0x124, 0x539: 0x13, 0x53a: 0x125, + // Block 0x15, offset 0x540 + 0x544: 0x126, 0x545: 0x127, 0x546: 0x128, + 0x54f: 0x129, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x12a, 0x5c1: 0x12b, 0x5c4: 0x12b, 0x5c5: 0x12b, 0x5c6: 0x12b, 0x5c7: 0x12c, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 282 entries, 564 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x35, 0x38, 0x3c, 0x3f, 0x43, 0x4d, 0x4f, 0x57, 0x5e, 0x63, 0x71, 0x72, 0x80, 0x8f, 0x99, 0x9c, 0xa3, 0xab, 0xae, 0xb0, 0xbf, 0xc5, 0xd3, 0xde, 0xeb, 0xf6, 0x102, 0x10c, 0x118, 0x123, 0x12f, 0x13b, 0x143, 0x14c, 0x156, 0x161, 0x16d, 0x174, 0x17f, 0x184, 0x18c, 0x18f, 0x194, 0x198, 0x19c, 0x1a3, 0x1ac, 0x1b4, 0x1b5, 0x1be, 0x1c5, 0x1cd, 0x1d3, 0x1d8, 0x1dc, 0x1df, 0x1e1, 0x1e4, 0x1e9, 0x1ea, 0x1ec, 0x1ee, 0x1f0, 0x1f7, 0x1fc, 0x200, 0x209, 0x20c, 0x20f, 0x215, 0x216, 0x221, 0x222, 0x223, 0x228, 0x235, 0x23d, 0x245, 0x24e, 0x257, 0x260, 0x265, 0x268, 0x273, 0x280, 0x282, 0x289, 0x28b, 0x297, 0x298, 0x2a3, 0x2ab, 0x2b3, 0x2b9, 0x2ba, 0x2c8, 0x2cd, 0x2d0, 0x2d5, 0x2d9, 0x2df, 0x2e4, 0x2e7, 0x2ec, 0x2f1, 0x2f2, 0x2f8, 0x2fa, 0x2fb, 0x2fd, 0x2ff, 0x302, 0x303, 0x305, 0x308, 0x30e, 0x312, 0x314, 0x319, 0x320, 0x324, 0x32d, 0x32e, 0x337, 0x33b, 0x340, 0x348, 0x34e, 0x354, 0x35e, 0x363, 0x36c, 0x372, 0x379, 0x37d, 0x385, 0x387, 0x389, 0x38c, 0x38e, 0x390, 0x391, 0x392, 0x394, 0x396, 0x39c, 0x3a1, 0x3a3, 0x3a9, 0x3ac, 0x3ae, 0x3b4, 0x3b9, 0x3bb, 0x3bc, 0x3bd, 0x3be, 0x3c0, 0x3c2, 0x3c4, 0x3c7, 0x3c9, 0x3cc, 0x3d4, 0x3d7, 0x3db, 0x3e3, 0x3e5, 0x3e6, 0x3e7, 0x3e9, 0x3ef, 0x3f1, 0x3f2, 0x3f4, 0x3f6, 0x3f8, 0x405, 0x406, 0x407, 0x40b, 0x40d, 0x40e, 0x40f, 0x410, 0x411, 0x414, 0x417, 0x41d, 0x421, 0x425, 0x42b, 0x42e, 0x435, 0x439, 0x43d, 0x444, 0x44d, 0x453, 0x459, 0x463, 0x46d, 0x46f, 0x477, 0x47d, 0x483, 0x489, 0x48c, 0x492, 0x495, 0x49d, 0x49e, 0x4a5, 0x4a9, 0x4aa, 0x4ad, 0x4b5, 0x4bb, 0x4c2, 0x4c3, 0x4c9, 0x4cc, 0x4d4, 0x4db, 0x4e5, 0x4ed, 0x4f0, 0x4f1, 0x4f2, 0x4f3, 0x4f4, 0x4f6, 0x4f8, 0x4fa, 0x4fe, 0x4ff, 0x501, 0x503, 0x504, 0x505, 0x507, 0x50c, 0x511, 0x515, 0x516, 0x519, 0x51d, 0x528, 0x52c, 0x534, 0x539, 0x53d, 0x540, 0x544, 0x547, 0x54a, 0x54f, 0x553, 0x557, 0x55b, 0x55f, 0x561, 0x563, 0x566, 0x56b, 0x56d, 0x572, 0x57b, 0x580, 0x581, 0x584, 0x585, 0x586, 0x588, 0x589, 0x58a} + +// sparseValues: 1418 entries, 5672 bytes +var sparseValues = [1418]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xbf}, + // Block 0x6, offset 0x35 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x38 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3c + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3f + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x43 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4d + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4f + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9b, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0xa0, hi: 0xa0}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x57 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xaf, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xe, offset 0x5e + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xf, offset 0x63 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x10, offset 0x71 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x11, offset 0x72 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x12, offset 0x80 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x14, offset 0x99 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x15, offset 0x9c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0x16, offset 0xa3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x17, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x18, offset 0xae + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x19, offset 0xb0 + {value: 0x0034, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1a, offset 0xbf + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1b, offset 0xc5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1c, offset 0xd3 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0xde + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xeb + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x1f, offset 0xf6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x20, offset 0x102 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x21, offset 0x10c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x22, offset 0x118 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x23, offset 0x123 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x24, offset 0x12f + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x26, offset 0x143 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x27, offset 0x14c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x28, offset 0x156 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2a, offset 0x16d + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2b, offset 0x174 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2c, offset 0x17f + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2d, offset 0x184 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2e, offset 0x18c + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x2f, offset 0x18f + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x30, offset 0x194 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x31, offset 0x198 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x32, offset 0x19c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x33, offset 0x1a3 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x34, offset 0x1ac + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x35, offset 0x1b4 + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x36, offset 0x1b5 + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x37, offset 0x1be + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x38, offset 0x1c5 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x39, offset 0x1cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3b, offset 0x1d8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3c, offset 0x1dc + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3d, offset 0x1df + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x3e, offset 0x1e1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x3f, offset 0x1e4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x40, offset 0x1e9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x41, offset 0x1ea + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x42, offset 0x1ec + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x43, offset 0x1ee + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x44, offset 0x1f0 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x45, offset 0x1f7 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x46, offset 0x1fc + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x47, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x48, offset 0x209 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x20c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb8}, + // Block 0x4a, offset 0x20f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4b, offset 0x215 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4c, offset 0x216 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x221 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x4e, offset 0x222 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x4f, offset 0x223 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x50, offset 0x228 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x51, offset 0x235 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x52, offset 0x23d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x54, offset 0x24e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x55, offset 0x257 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x56, offset 0x260 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x57, offset 0x265 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x58, offset 0x268 + {value: 0x316a, lo: 0x80, hi: 0x80}, + {value: 0x31ea, lo: 0x81, hi: 0x81}, + {value: 0x326a, lo: 0x82, hi: 0x82}, + {value: 0x32ea, lo: 0x83, hi: 0x83}, + {value: 0x336a, lo: 0x84, hi: 0x84}, + {value: 0x33ea, lo: 0x85, hi: 0x85}, + {value: 0x346a, lo: 0x86, hi: 0x86}, + {value: 0x34ea, lo: 0x87, hi: 0x87}, + {value: 0x356a, lo: 0x88, hi: 0x88}, + {value: 0x8353, lo: 0x90, hi: 0xba}, + {value: 0x8353, lo: 0xbd, hi: 0xbf}, + // Block 0x59, offset 0x273 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5a, offset 0x280 + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5b, offset 0x282 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8752, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8b52, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5c, offset 0x289 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5d, offset 0x28b + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x5e, offset 0x297 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x298 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x361a, lo: 0x96, hi: 0x96}, + {value: 0x36ca, lo: 0x97, hi: 0x97}, + {value: 0x377a, lo: 0x98, hi: 0x98}, + {value: 0x382a, lo: 0x99, hi: 0x99}, + {value: 0x38da, lo: 0x9a, hi: 0x9a}, + {value: 0x398a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x3a3b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x60, offset 0x2a3 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x61, offset 0x2ab + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x2b3 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x2b9 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x64, offset 0x2ba + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x65, offset 0x2c8 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0xa052, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x2cd + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x67, offset 0x2d0 + {value: 0xa353, lo: 0xb6, hi: 0xb7}, + {value: 0xa653, lo: 0xb8, hi: 0xb9}, + {value: 0xa953, lo: 0xba, hi: 0xbb}, + {value: 0xa653, lo: 0xbc, hi: 0xbd}, + {value: 0xa353, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x2d5 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xac53, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x69, offset 0x2d9 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6a, offset 0x2df + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e4 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6c, offset 0x2e7 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6d, offset 0x2ec + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x6e, offset 0x2f1 + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x6f, offset 0x2f2 + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x70, offset 0x2f8 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x71, offset 0x2fa + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x72, offset 0x2fb + {value: 0x0010, lo: 0x85, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x73, offset 0x2fd + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x74, offset 0x2ff + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x75, offset 0x302 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x76, offset 0x303 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x77, offset 0x305 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x78, offset 0x308 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x79, offset 0x30e + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7a, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7b, offset 0x314 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7c, offset 0x319 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8753, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7d, offset 0x320 + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x7e, offset 0x324 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x7f, offset 0x32d + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x80, offset 0x32e + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x337 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x82, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x83, offset 0x340 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x84, offset 0x348 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x85, offset 0x34e + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x86, offset 0x354 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x87, offset 0x35e + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x363 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x89, offset 0x36c + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8a, offset 0x372 + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xaf52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8b, offset 0x379 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37d + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8d, offset 0x385 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x387 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x8f, offset 0x389 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x90, offset 0x38c + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x91, offset 0x38e + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x92, offset 0x390 + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x93, offset 0x391 + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x94, offset 0x392 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x95, offset 0x394 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x96, offset 0x396 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x97, offset 0x39c + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x98, offset 0x3a1 + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x3a3 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x3a9 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9b, offset 0x3ac + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9c, offset 0x3ae + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9d, offset 0x3b4 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x3b9 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0x9f, offset 0x3bb + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa0, offset 0x3bc + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa1, offset 0x3bd + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa2, offset 0x3be + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x3c0 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa4, offset 0x3c2 + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa5, offset 0x3c4 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa6, offset 0x3c7 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa7, offset 0x3c9 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xa8, offset 0x3cc + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xb253, lo: 0x98, hi: 0x9f}, + {value: 0xb553, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xa9, offset 0x3d4 + {value: 0xb252, lo: 0x80, hi: 0x87}, + {value: 0xb552, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xaa, offset 0x3d7 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb553, lo: 0xb0, hi: 0xb7}, + {value: 0xb253, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3db + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb552, lo: 0x98, hi: 0x9f}, + {value: 0xb252, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xac, offset 0x3e3 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xad, offset 0x3e5 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xae, offset 0x3e6 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xaf, offset 0x3e7 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb0, offset 0x3e9 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x3ef + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb2, offset 0x3f1 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb3, offset 0x3f2 + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb4, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb5, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb6, offset 0x3f8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb5}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x405 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xb8, offset 0x406 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xb9, offset 0x407 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xba, offset 0x40b + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbb, offset 0x40d + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbc, offset 0x40e + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbd, offset 0x40f + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xbe, offset 0x410 + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xbf, offset 0x411 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc0, offset 0x414 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc1, offset 0x417 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x87}, + {value: 0x0024, lo: 0x88, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x8b}, + {value: 0x0024, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + // Block 0xc2, offset 0x41d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc3, offset 0x421 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x425 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc5, offset 0x42b + {value: 0x0014, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc6, offset 0x42e + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc7, offset 0x435 + {value: 0x0010, lo: 0x84, hi: 0x86}, + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc8, offset 0x439 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x43d + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x89, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xca, offset 0x444 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xcb, offset 0x44d + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcc, offset 0x453 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcd, offset 0x459 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xce, offset 0x463 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xcf, offset 0x46d + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xd0, offset 0x46f + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0x9e, hi: 0x9e}, + // Block 0xd1, offset 0x477 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd2, offset 0x47d + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd3, offset 0x483 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd4, offset 0x489 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd5, offset 0x48c + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x492 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd7, offset 0x495 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd8, offset 0x49d + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd9, offset 0x49e + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xda, offset 0x4a5 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + // Block 0xdb, offset 0x4a9 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xdc, offset 0x4aa + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdd, offset 0x4ad + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xde, offset 0x4b5 + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xdf, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x86, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + // Block 0xe0, offset 0x4c2 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xe1, offset 0x4c3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe2, offset 0x4c9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xe3, offset 0x4cc + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xe4, offset 0x4d4 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xe5, offset 0x4db + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa5}, + {value: 0x0010, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xe6, offset 0x4e5 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0x96}, + {value: 0x0034, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe7, offset 0x4ed + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + // Block 0xe8, offset 0x4f0 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe9, offset 0x4f1 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xea, offset 0x4f2 + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xeb, offset 0x4f3 + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xec, offset 0x4f4 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xed, offset 0x4f6 + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xee, offset 0x4f8 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xef, offset 0x4fa + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xf0, offset 0x4fe + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xf1, offset 0x4ff + {value: 0x2013, lo: 0x80, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xbf}, + // Block 0xf2, offset 0x501 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xf3, offset 0x503 + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xf4, offset 0x504 + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + // Block 0xf5, offset 0x505 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xf6, offset 0x507 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xf7, offset 0x50c + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xf8, offset 0x511 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xf9, offset 0x515 + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xfa, offset 0x516 + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xfb, offset 0x519 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xfc, offset 0x51d + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xfd, offset 0x528 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xfe, offset 0x52c + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xff, offset 0x534 + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0x100, offset 0x539 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0x101, offset 0x53d + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0x102, offset 0x540 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x544 + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x104, offset 0x547 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x105, offset 0x54a + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x106, offset 0x54f + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x107, offset 0x553 + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x108, offset 0x557 + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x109, offset 0x55b + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x10a, offset 0x55f + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x10b, offset 0x561 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x10c, offset 0x563 + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x10d, offset 0x566 + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x10e, offset 0x56b + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x10f, offset 0x56d + {value: 0xb852, lo: 0x80, hi: 0x81}, + {value: 0xbb52, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x110, offset 0x572 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x111, offset 0x57b + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x112, offset 0x580 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x113, offset 0x581 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x114, offset 0x584 + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x115, offset 0x585 + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x116, offset 0x586 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x117, offset 0x588 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x118, offset 0x589 + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 14906 bytes (14KiB); checksum: 362795C7 diff --git a/vendor/golang.org/x/text/cases/tables12.0.0.go b/vendor/golang.org/x/text/cases/tables12.0.0.go new file mode 100644 index 00000000000..ae7dc240722 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables12.0.0.go @@ -0,0 +1,2360 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +var xorData string = "" + // Size: 192 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x001\x00\x00\x0b(\x04\x00\x03\x04\x1e\x00\x0b)\x08" + + "\x00\x03\x0a\x00\x02:\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<" + + "\x00\x01&\x00\x01*\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01" + + "\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2450 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꟅꟅ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ι" + + "ΙΙ\x166ΐΪ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12" + + "φΦΦ\x12\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x10\x1bᲐა" + + "\x10\x1bᲑბ\x10\x1bᲒგ\x10\x1bᲓდ\x10\x1bᲔე\x10\x1bᲕვ\x10\x1bᲖზ\x10\x1bᲗთ" + + "\x10\x1bᲘი\x10\x1bᲙკ\x10\x1bᲚლ\x10\x1bᲛმ\x10\x1bᲜნ\x10\x1bᲝო\x10\x1bᲞპ" + + "\x10\x1bᲟჟ\x10\x1bᲠრ\x10\x1bᲡს\x10\x1bᲢტ\x10\x1bᲣუ\x10\x1bᲤფ\x10\x1bᲥქ" + + "\x10\x1bᲦღ\x10\x1bᲧყ\x10\x1bᲨშ\x10\x1bᲩჩ\x10\x1bᲪც\x10\x1bᲫძ\x10\x1bᲬწ" + + "\x10\x1bᲭჭ\x10\x1bᲮხ\x10\x1bᲯჯ\x10\x1bᲰჰ\x10\x1bᲱჱ\x10\x1bᲲჲ\x10\x1bᲳჳ" + + "\x10\x1bᲴჴ\x10\x1bᲵჵ\x10\x1bᲶჶ\x10\x1bᲷჷ\x10\x1bᲸჸ\x10\x1bᲹჹ\x10\x1bᲺჺ" + + "\x10\x1bᲽჽ\x10\x1bᲾჾ\x10\x1bᲿჿ\x12\x12вВВ\x12\x12дДД\x12\x12оОО\x12\x12с" + + "СС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13\x1bꙋꙊꙊ\x13\x1bẖH̱H̱" + + "\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1baʾAʾAʾ\x13\x1bṡṠṠ\x12" + + "\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ" + + "\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ" + + "\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ\x15\x1dἄιᾄἌΙ\x15" + + "\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ\x15+ἢιἪΙᾚ\x15+ἣι" + + "ἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨΙ\x15\x1dἡιᾑἩΙ" + + "\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15\x1dἦιᾖἮΙ\x15" + + "\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ\x15+ὥιὭΙᾭ" + + "\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ\x15\x1dὣιᾣὫΙ" + + "\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰιᾺΙᾺͅ\x14#αιΑΙ" + + "ᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x12ιΙΙ\x15-ὴιῊΙ" + + "Ὴͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ\x166ῒΙ" + + "̈̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ̀\x166ΰΫ́Ϋ" + + "́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ" + + "\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12" + + "\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12" + + "\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12" + + "\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x10ʂʂ\x12\x12ffFFFf" + + "\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12st" + + "STSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄ" + + "խ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 12396 bytes (12.11 KiB). Checksum: c0656238384c3da1. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 20: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 20 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 22 blocks, 1408 entries, 2816 bytes +// The third block is the zero block. +var caseValues = [1408]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x110a, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x118a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x120a, + 0x19e: 0x128a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x130d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x138a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x14ca, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x160a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x168a, 0x251: 0x170a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x178a, 0x256: 0x180a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x188a, 0x271: 0x190a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x198a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x6852, 0x281: 0x6852, 0x282: 0x6852, 0x283: 0x6852, 0x284: 0x6852, 0x285: 0x6852, + 0x286: 0x6852, 0x287: 0x1a0a, 0x288: 0x0012, + 0x291: 0x0034, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0034, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0034, 0x29b: 0x0034, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, 0x2a0: 0x0024, 0x2a1: 0x0024, 0x2a2: 0x0034, 0x2a3: 0x0034, + 0x2a4: 0x0034, 0x2a5: 0x0034, 0x2a6: 0x0034, 0x2a7: 0x0034, 0x2a8: 0x0024, 0x2a9: 0x0024, + 0x2aa: 0x0034, 0x2ab: 0x0024, 0x2ac: 0x0024, 0x2ad: 0x0034, 0x2ae: 0x0034, 0x2af: 0x0024, + 0x2b0: 0x0034, 0x2b1: 0x0034, 0x2b2: 0x0034, 0x2b3: 0x0034, 0x2b4: 0x0034, 0x2b5: 0x0034, + 0x2b6: 0x0034, 0x2b7: 0x0034, 0x2b8: 0x0034, 0x2b9: 0x0034, 0x2ba: 0x0034, 0x2bb: 0x0034, + 0x2bc: 0x0034, 0x2bd: 0x0034, 0x2bf: 0x0034, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x7053, 0x2c1: 0x7053, 0x2c2: 0x7053, 0x2c3: 0x7053, 0x2c4: 0x7053, 0x2c5: 0x7053, + 0x2c7: 0x7053, + 0x2cd: 0x7053, 0x2d0: 0x1aea, 0x2d1: 0x1b6a, + 0x2d2: 0x1bea, 0x2d3: 0x1c6a, 0x2d4: 0x1cea, 0x2d5: 0x1d6a, 0x2d6: 0x1dea, 0x2d7: 0x1e6a, + 0x2d8: 0x1eea, 0x2d9: 0x1f6a, 0x2da: 0x1fea, 0x2db: 0x206a, 0x2dc: 0x20ea, 0x2dd: 0x216a, + 0x2de: 0x21ea, 0x2df: 0x226a, 0x2e0: 0x22ea, 0x2e1: 0x236a, 0x2e2: 0x23ea, 0x2e3: 0x246a, + 0x2e4: 0x24ea, 0x2e5: 0x256a, 0x2e6: 0x25ea, 0x2e7: 0x266a, 0x2e8: 0x26ea, 0x2e9: 0x276a, + 0x2ea: 0x27ea, 0x2eb: 0x286a, 0x2ec: 0x28ea, 0x2ed: 0x296a, 0x2ee: 0x29ea, 0x2ef: 0x2a6a, + 0x2f0: 0x2aea, 0x2f1: 0x2b6a, 0x2f2: 0x2bea, 0x2f3: 0x2c6a, 0x2f4: 0x2cea, 0x2f5: 0x2d6a, + 0x2f6: 0x2dea, 0x2f7: 0x2e6a, 0x2f8: 0x2eea, 0x2f9: 0x2f6a, 0x2fa: 0x2fea, + 0x2fc: 0x0014, 0x2fd: 0x306a, 0x2fe: 0x30ea, 0x2ff: 0x316a, + // Block 0xc, offset 0x300 + 0x300: 0x0812, 0x301: 0x0812, 0x302: 0x0812, 0x303: 0x0812, 0x304: 0x0812, 0x305: 0x0812, + 0x308: 0x0813, 0x309: 0x0813, 0x30a: 0x0813, 0x30b: 0x0813, + 0x30c: 0x0813, 0x30d: 0x0813, 0x310: 0x3b1a, 0x311: 0x0812, + 0x312: 0x3bfa, 0x313: 0x0812, 0x314: 0x3d3a, 0x315: 0x0812, 0x316: 0x3e7a, 0x317: 0x0812, + 0x319: 0x0813, 0x31b: 0x0813, 0x31d: 0x0813, + 0x31f: 0x0813, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x0812, 0x323: 0x0812, + 0x324: 0x0812, 0x325: 0x0812, 0x326: 0x0812, 0x327: 0x0812, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x0813, 0x32b: 0x0813, 0x32c: 0x0813, 0x32d: 0x0813, 0x32e: 0x0813, 0x32f: 0x0813, + 0x330: 0x9252, 0x331: 0x9252, 0x332: 0x9552, 0x333: 0x9552, 0x334: 0x9852, 0x335: 0x9852, + 0x336: 0x9b52, 0x337: 0x9b52, 0x338: 0x9e52, 0x339: 0x9e52, 0x33a: 0xa152, 0x33b: 0xa152, + 0x33c: 0x4d52, 0x33d: 0x4d52, + // Block 0xd, offset 0x340 + 0x340: 0x3fba, 0x341: 0x40aa, 0x342: 0x419a, 0x343: 0x428a, 0x344: 0x437a, 0x345: 0x446a, + 0x346: 0x455a, 0x347: 0x464a, 0x348: 0x4739, 0x349: 0x4829, 0x34a: 0x4919, 0x34b: 0x4a09, + 0x34c: 0x4af9, 0x34d: 0x4be9, 0x34e: 0x4cd9, 0x34f: 0x4dc9, 0x350: 0x4eba, 0x351: 0x4faa, + 0x352: 0x509a, 0x353: 0x518a, 0x354: 0x527a, 0x355: 0x536a, 0x356: 0x545a, 0x357: 0x554a, + 0x358: 0x5639, 0x359: 0x5729, 0x35a: 0x5819, 0x35b: 0x5909, 0x35c: 0x59f9, 0x35d: 0x5ae9, + 0x35e: 0x5bd9, 0x35f: 0x5cc9, 0x360: 0x5dba, 0x361: 0x5eaa, 0x362: 0x5f9a, 0x363: 0x608a, + 0x364: 0x617a, 0x365: 0x626a, 0x366: 0x635a, 0x367: 0x644a, 0x368: 0x6539, 0x369: 0x6629, + 0x36a: 0x6719, 0x36b: 0x6809, 0x36c: 0x68f9, 0x36d: 0x69e9, 0x36e: 0x6ad9, 0x36f: 0x6bc9, + 0x370: 0x0812, 0x371: 0x0812, 0x372: 0x6cba, 0x373: 0x6dca, 0x374: 0x6e9a, + 0x376: 0x6f7a, 0x377: 0x705a, 0x378: 0x0813, 0x379: 0x0813, 0x37a: 0x9253, 0x37b: 0x9253, + 0x37c: 0x7199, 0x37d: 0x0004, 0x37e: 0x726a, 0x37f: 0x0004, + // Block 0xe, offset 0x380 + 0x380: 0x0004, 0x381: 0x0004, 0x382: 0x72ea, 0x383: 0x73fa, 0x384: 0x74ca, + 0x386: 0x75aa, 0x387: 0x768a, 0x388: 0x9553, 0x389: 0x9553, 0x38a: 0x9853, 0x38b: 0x9853, + 0x38c: 0x77c9, 0x38d: 0x0004, 0x38e: 0x0004, 0x38f: 0x0004, 0x390: 0x0812, 0x391: 0x0812, + 0x392: 0x789a, 0x393: 0x79da, 0x396: 0x7b1a, 0x397: 0x7bfa, + 0x398: 0x0813, 0x399: 0x0813, 0x39a: 0x9b53, 0x39b: 0x9b53, 0x39d: 0x0004, + 0x39e: 0x0004, 0x39f: 0x0004, 0x3a0: 0x0812, 0x3a1: 0x0812, 0x3a2: 0x7d3a, 0x3a3: 0x7e7a, + 0x3a4: 0x7fba, 0x3a5: 0x0912, 0x3a6: 0x809a, 0x3a7: 0x817a, 0x3a8: 0x0813, 0x3a9: 0x0813, + 0x3aa: 0xa153, 0x3ab: 0xa153, 0x3ac: 0x0913, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, + 0x3b2: 0x82ba, 0x3b3: 0x83ca, 0x3b4: 0x849a, + 0x3b6: 0x857a, 0x3b7: 0x865a, 0x3b8: 0x9e53, 0x3b9: 0x9e53, 0x3ba: 0x4d53, 0x3bb: 0x4d53, + 0x3bc: 0x8799, 0x3bd: 0x0004, 0x3be: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3c2: 0x0013, + 0x3c7: 0x0013, 0x3ca: 0x0012, 0x3cb: 0x0013, + 0x3cc: 0x0013, 0x3cd: 0x0013, 0x3ce: 0x0012, 0x3cf: 0x0012, 0x3d0: 0x0013, 0x3d1: 0x0013, + 0x3d2: 0x0013, 0x3d3: 0x0012, 0x3d5: 0x0013, + 0x3d9: 0x0013, 0x3da: 0x0013, 0x3db: 0x0013, 0x3dc: 0x0013, 0x3dd: 0x0013, + 0x3e4: 0x0013, 0x3e6: 0x886b, 0x3e8: 0x0013, + 0x3ea: 0x88cb, 0x3eb: 0x890b, 0x3ec: 0x0013, 0x3ed: 0x0013, 0x3ef: 0x0012, + 0x3f0: 0x0013, 0x3f1: 0x0013, 0x3f2: 0xa453, 0x3f3: 0x0013, 0x3f4: 0x0012, 0x3f5: 0x0010, + 0x3f6: 0x0010, 0x3f7: 0x0010, 0x3f8: 0x0010, 0x3f9: 0x0012, + 0x3fc: 0x0012, 0x3fd: 0x0012, 0x3fe: 0x0013, 0x3ff: 0x0013, + // Block 0x10, offset 0x400 + 0x400: 0x1a13, 0x401: 0x1a13, 0x402: 0x1e13, 0x403: 0x1e13, 0x404: 0x1a13, 0x405: 0x1a13, + 0x406: 0x2613, 0x407: 0x2613, 0x408: 0x2a13, 0x409: 0x2a13, 0x40a: 0x2e13, 0x40b: 0x2e13, + 0x40c: 0x2a13, 0x40d: 0x2a13, 0x40e: 0x2613, 0x40f: 0x2613, 0x410: 0xa752, 0x411: 0xa752, + 0x412: 0xaa52, 0x413: 0xaa52, 0x414: 0xad52, 0x415: 0xad52, 0x416: 0xaa52, 0x417: 0xaa52, + 0x418: 0xa752, 0x419: 0xa752, 0x41a: 0x1a12, 0x41b: 0x1a12, 0x41c: 0x1e12, 0x41d: 0x1e12, + 0x41e: 0x1a12, 0x41f: 0x1a12, 0x420: 0x2612, 0x421: 0x2612, 0x422: 0x2a12, 0x423: 0x2a12, + 0x424: 0x2e12, 0x425: 0x2e12, 0x426: 0x2a12, 0x427: 0x2a12, 0x428: 0x2612, 0x429: 0x2612, + // Block 0x11, offset 0x440 + 0x440: 0x6552, 0x441: 0x6552, 0x442: 0x6552, 0x443: 0x6552, 0x444: 0x6552, 0x445: 0x6552, + 0x446: 0x6552, 0x447: 0x6552, 0x448: 0x6552, 0x449: 0x6552, 0x44a: 0x6552, 0x44b: 0x6552, + 0x44c: 0x6552, 0x44d: 0x6552, 0x44e: 0x6552, 0x44f: 0x6552, 0x450: 0xb052, 0x451: 0xb052, + 0x452: 0xb052, 0x453: 0xb052, 0x454: 0xb052, 0x455: 0xb052, 0x456: 0xb052, 0x457: 0xb052, + 0x458: 0xb052, 0x459: 0xb052, 0x45a: 0xb052, 0x45b: 0xb052, 0x45c: 0xb052, 0x45d: 0xb052, + 0x45e: 0xb052, 0x460: 0x0113, 0x461: 0x0112, 0x462: 0x896b, 0x463: 0x8b53, + 0x464: 0x89cb, 0x465: 0x8a2a, 0x466: 0x8a8a, 0x467: 0x0f13, 0x468: 0x0f12, 0x469: 0x0313, + 0x46a: 0x0312, 0x46b: 0x0713, 0x46c: 0x0712, 0x46d: 0x8aeb, 0x46e: 0x8b4b, 0x46f: 0x8bab, + 0x470: 0x8c0b, 0x471: 0x0012, 0x472: 0x0113, 0x473: 0x0112, 0x474: 0x0012, 0x475: 0x0313, + 0x476: 0x0312, 0x477: 0x0012, 0x478: 0x0012, 0x479: 0x0012, 0x47a: 0x0012, 0x47b: 0x0012, + 0x47c: 0x0015, 0x47d: 0x0015, 0x47e: 0x8c6b, 0x47f: 0x8ccb, + // Block 0x12, offset 0x480 + 0x480: 0x0113, 0x481: 0x0112, 0x482: 0x0113, 0x483: 0x0112, 0x484: 0x0113, 0x485: 0x0112, + 0x486: 0x0113, 0x487: 0x0112, 0x488: 0x0014, 0x489: 0x0014, 0x48a: 0x0014, 0x48b: 0x0713, + 0x48c: 0x0712, 0x48d: 0x8d2b, 0x48e: 0x0012, 0x48f: 0x0010, 0x490: 0x0113, 0x491: 0x0112, + 0x492: 0x0113, 0x493: 0x0112, 0x494: 0x6552, 0x495: 0x0012, 0x496: 0x0113, 0x497: 0x0112, + 0x498: 0x0113, 0x499: 0x0112, 0x49a: 0x0113, 0x49b: 0x0112, 0x49c: 0x0113, 0x49d: 0x0112, + 0x49e: 0x0113, 0x49f: 0x0112, 0x4a0: 0x0113, 0x4a1: 0x0112, 0x4a2: 0x0113, 0x4a3: 0x0112, + 0x4a4: 0x0113, 0x4a5: 0x0112, 0x4a6: 0x0113, 0x4a7: 0x0112, 0x4a8: 0x0113, 0x4a9: 0x0112, + 0x4aa: 0x8d8b, 0x4ab: 0x8deb, 0x4ac: 0x8e4b, 0x4ad: 0x8eab, 0x4ae: 0x8f0b, 0x4af: 0x0012, + 0x4b0: 0x8f6b, 0x4b1: 0x8fcb, 0x4b2: 0x902b, 0x4b3: 0xb353, 0x4b4: 0x0113, 0x4b5: 0x0112, + 0x4b6: 0x0113, 0x4b7: 0x0112, 0x4b8: 0x0113, 0x4b9: 0x0112, 0x4ba: 0x0113, 0x4bb: 0x0112, + 0x4bc: 0x0113, 0x4bd: 0x0112, 0x4be: 0x0113, 0x4bf: 0x0112, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x90ea, 0x4c1: 0x916a, 0x4c2: 0x91ea, 0x4c3: 0x926a, 0x4c4: 0x931a, 0x4c5: 0x93ca, + 0x4c6: 0x944a, + 0x4d3: 0x94ca, 0x4d4: 0x95aa, 0x4d5: 0x968a, 0x4d6: 0x976a, 0x4d7: 0x984a, + 0x4dd: 0x0010, + 0x4de: 0x0034, 0x4df: 0x0010, 0x4e0: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, 0x4e3: 0x0010, + 0x4e4: 0x0010, 0x4e5: 0x0010, 0x4e6: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, + 0x4ea: 0x0010, 0x4eb: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f3: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f8: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, + // Block 0x14, offset 0x500 + 0x500: 0x2213, 0x501: 0x2213, 0x502: 0x2613, 0x503: 0x2613, 0x504: 0x2213, 0x505: 0x2213, + 0x506: 0x2e13, 0x507: 0x2e13, 0x508: 0x2213, 0x509: 0x2213, 0x50a: 0x2613, 0x50b: 0x2613, + 0x50c: 0x2213, 0x50d: 0x2213, 0x50e: 0x3e13, 0x50f: 0x3e13, 0x510: 0x2213, 0x511: 0x2213, + 0x512: 0x2613, 0x513: 0x2613, 0x514: 0x2213, 0x515: 0x2213, 0x516: 0x2e13, 0x517: 0x2e13, + 0x518: 0x2213, 0x519: 0x2213, 0x51a: 0x2613, 0x51b: 0x2613, 0x51c: 0x2213, 0x51d: 0x2213, + 0x51e: 0xbc53, 0x51f: 0xbc53, 0x520: 0xbf53, 0x521: 0xbf53, 0x522: 0x2212, 0x523: 0x2212, + 0x524: 0x2612, 0x525: 0x2612, 0x526: 0x2212, 0x527: 0x2212, 0x528: 0x2e12, 0x529: 0x2e12, + 0x52a: 0x2212, 0x52b: 0x2212, 0x52c: 0x2612, 0x52d: 0x2612, 0x52e: 0x2212, 0x52f: 0x2212, + 0x530: 0x3e12, 0x531: 0x3e12, 0x532: 0x2212, 0x533: 0x2212, 0x534: 0x2612, 0x535: 0x2612, + 0x536: 0x2212, 0x537: 0x2212, 0x538: 0x2e12, 0x539: 0x2e12, 0x53a: 0x2212, 0x53b: 0x2212, + 0x53c: 0x2612, 0x53d: 0x2612, 0x53e: 0x2212, 0x53f: 0x2212, + // Block 0x15, offset 0x540 + 0x542: 0x0010, + 0x547: 0x0010, 0x549: 0x0010, 0x54b: 0x0010, + 0x54d: 0x0010, 0x54e: 0x0010, 0x54f: 0x0010, 0x551: 0x0010, + 0x552: 0x0010, 0x554: 0x0010, 0x557: 0x0010, + 0x559: 0x0010, 0x55b: 0x0010, 0x55d: 0x0010, + 0x55f: 0x0010, 0x561: 0x0010, 0x562: 0x0010, + 0x564: 0x0010, 0x567: 0x0010, 0x568: 0x0010, 0x569: 0x0010, + 0x56a: 0x0010, 0x56c: 0x0010, 0x56d: 0x0010, 0x56e: 0x0010, 0x56f: 0x0010, + 0x570: 0x0010, 0x571: 0x0010, 0x572: 0x0010, 0x574: 0x0010, 0x575: 0x0010, + 0x576: 0x0010, 0x577: 0x0010, 0x579: 0x0010, 0x57a: 0x0010, 0x57b: 0x0010, + 0x57c: 0x0010, 0x57e: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x14, 0xc3: 0x15, 0xc4: 0x16, 0xc5: 0x17, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x18, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x19, 0xcc: 0x1a, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x1b, 0xd1: 0x1c, 0xd2: 0x1d, 0xd3: 0x1e, 0xd4: 0x1f, 0xd5: 0x20, 0xd6: 0x08, 0xd7: 0x21, + 0xd8: 0x22, 0xd9: 0x23, 0xda: 0x24, 0xdb: 0x25, 0xdc: 0x26, 0xdd: 0x27, 0xde: 0x28, 0xdf: 0x29, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x2a, 0x121: 0x2b, 0x122: 0x2c, 0x123: 0x2d, 0x124: 0x2e, 0x125: 0x2f, 0x126: 0x30, 0x127: 0x31, + 0x128: 0x32, 0x129: 0x33, 0x12a: 0x34, 0x12b: 0x35, 0x12c: 0x36, 0x12d: 0x37, 0x12e: 0x38, 0x12f: 0x39, + 0x130: 0x3a, 0x131: 0x3b, 0x132: 0x3c, 0x133: 0x3d, 0x134: 0x3e, 0x135: 0x3f, 0x136: 0x40, 0x137: 0x41, + 0x138: 0x42, 0x139: 0x43, 0x13a: 0x44, 0x13b: 0x45, 0x13c: 0x46, 0x13d: 0x47, 0x13e: 0x48, 0x13f: 0x49, + // Block 0x5, offset 0x140 + 0x140: 0x4a, 0x141: 0x4b, 0x142: 0x4c, 0x143: 0x09, 0x144: 0x24, 0x145: 0x24, 0x146: 0x24, 0x147: 0x24, + 0x148: 0x24, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x24, 0x152: 0x24, 0x153: 0x24, 0x154: 0x24, 0x155: 0x24, 0x156: 0x24, 0x157: 0x24, + 0x158: 0x24, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x0a, 0x17e: 0x0b, 0x17f: 0x0c, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0d, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0e, + 0x1b0: 0x7c, 0x1b1: 0x0f, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x24, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x24, 0x202: 0x24, 0x203: 0x24, 0x204: 0x24, 0x205: 0x24, 0x206: 0x24, 0x207: 0x24, + 0x208: 0x24, 0x209: 0x24, 0x20a: 0x24, 0x20b: 0x24, 0x20c: 0x24, 0x20d: 0x24, 0x20e: 0x24, 0x20f: 0x24, + 0x210: 0x24, 0x211: 0x24, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x24, 0x215: 0x24, 0x216: 0x24, 0x217: 0x24, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x10, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x24, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x24, 0x231: 0x24, 0x232: 0x24, 0x233: 0x24, 0x234: 0x24, 0x235: 0x24, 0x236: 0x24, 0x237: 0x24, + 0x238: 0x24, 0x239: 0x24, 0x23a: 0x24, 0x23b: 0x24, 0x23c: 0x24, 0x23d: 0x24, 0x23e: 0x24, 0x23f: 0x24, + // Block 0x9, offset 0x240 + 0x240: 0x24, 0x241: 0x24, 0x242: 0x24, 0x243: 0x24, 0x244: 0x24, 0x245: 0x24, 0x246: 0x24, 0x247: 0x24, + 0x248: 0x24, 0x249: 0x24, 0x24a: 0x24, 0x24b: 0x24, 0x24c: 0x24, 0x24d: 0x24, 0x24e: 0x24, 0x24f: 0x24, + 0x250: 0x24, 0x251: 0x24, 0x252: 0x24, 0x253: 0x24, 0x254: 0x24, 0x255: 0x24, 0x256: 0x24, 0x257: 0x24, + 0x258: 0x24, 0x259: 0x24, 0x25a: 0x24, 0x25b: 0x24, 0x25c: 0x24, 0x25d: 0x24, 0x25e: 0x24, 0x25f: 0x24, + 0x260: 0x24, 0x261: 0x24, 0x262: 0x24, 0x263: 0x24, 0x264: 0x24, 0x265: 0x24, 0x266: 0x24, 0x267: 0x24, + 0x268: 0x24, 0x269: 0x24, 0x26a: 0x24, 0x26b: 0x24, 0x26c: 0x24, 0x26d: 0x24, 0x26e: 0x24, 0x26f: 0x24, + 0x270: 0x24, 0x271: 0x24, 0x272: 0x24, 0x273: 0x24, 0x274: 0x24, 0x275: 0x24, 0x276: 0x24, 0x277: 0x24, + 0x278: 0x24, 0x279: 0x24, 0x27a: 0x24, 0x27b: 0x24, 0x27c: 0x24, 0x27d: 0x24, 0x27e: 0x24, 0x27f: 0x24, + // Block 0xa, offset 0x280 + 0x280: 0x24, 0x281: 0x24, 0x282: 0x24, 0x283: 0x24, 0x284: 0x24, 0x285: 0x24, 0x286: 0x24, 0x287: 0x24, + 0x288: 0x24, 0x289: 0x24, 0x28a: 0x24, 0x28b: 0x24, 0x28c: 0x24, 0x28d: 0x24, 0x28e: 0x24, 0x28f: 0x24, + 0x290: 0x24, 0x291: 0x24, 0x292: 0x24, 0x293: 0x24, 0x294: 0x24, 0x295: 0x24, 0x296: 0x24, 0x297: 0x24, + 0x298: 0x24, 0x299: 0x24, 0x29a: 0x24, 0x29b: 0x24, 0x29c: 0x24, 0x29d: 0x24, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x11, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x24, 0x2f1: 0x24, 0x2f2: 0x24, 0x2f3: 0x24, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x24, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x24, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x24, 0x319: 0x24, 0x31a: 0x24, 0x31b: 0x24, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x24, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, 0x334: 0xd3, + 0x33c: 0xd4, 0x33d: 0xd5, 0x33f: 0xd6, + // Block 0xd, offset 0x340 + 0x340: 0xd7, 0x341: 0xd8, 0x342: 0xd9, 0x343: 0xda, 0x344: 0xdb, 0x345: 0xdc, 0x346: 0xdd, 0x347: 0xde, + 0x348: 0xdf, 0x34a: 0xe0, 0x34b: 0xe1, 0x34c: 0xe2, 0x34d: 0xe3, + 0x350: 0xe4, 0x351: 0xe5, 0x352: 0xe6, 0x353: 0xe7, 0x356: 0xe8, 0x357: 0xe9, + 0x358: 0xea, 0x359: 0xeb, 0x35a: 0xec, 0x35b: 0xed, 0x35c: 0xee, + 0x360: 0xef, 0x362: 0xf0, 0x363: 0xf1, 0x366: 0xf2, 0x367: 0xf3, + 0x368: 0xf4, 0x369: 0xf5, 0x36a: 0xf6, 0x36b: 0xf7, + 0x370: 0xf8, 0x371: 0xf9, 0x372: 0xfa, 0x374: 0xfb, 0x375: 0xfc, 0x376: 0xfd, + 0x37b: 0xfe, + // Block 0xe, offset 0x380 + 0x380: 0x24, 0x381: 0x24, 0x382: 0x24, 0x383: 0x24, 0x384: 0x24, 0x385: 0x24, 0x386: 0x24, 0x387: 0x24, + 0x388: 0x24, 0x389: 0x24, 0x38a: 0x24, 0x38b: 0x24, 0x38c: 0x24, 0x38d: 0x24, 0x38e: 0xff, + 0x390: 0x24, 0x391: 0x100, 0x392: 0x24, 0x393: 0x24, 0x394: 0x24, 0x395: 0x101, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x24, 0x3c1: 0x24, 0x3c2: 0x24, 0x3c3: 0x24, 0x3c4: 0x24, 0x3c5: 0x24, 0x3c6: 0x24, 0x3c7: 0x24, + 0x3c8: 0x24, 0x3c9: 0x24, 0x3ca: 0x24, 0x3cb: 0x24, 0x3cc: 0x24, 0x3cd: 0x24, 0x3ce: 0x24, 0x3cf: 0x24, + 0x3d0: 0x102, + // Block 0x10, offset 0x400 + 0x410: 0x24, 0x411: 0x24, 0x412: 0x24, 0x413: 0x24, 0x414: 0x24, 0x415: 0x24, 0x416: 0x24, 0x417: 0x24, + 0x418: 0x24, 0x419: 0x103, + // Block 0x11, offset 0x440 + 0x460: 0x24, 0x461: 0x24, 0x462: 0x24, 0x463: 0x24, 0x464: 0x24, 0x465: 0x24, 0x466: 0x24, 0x467: 0x24, + 0x468: 0xf7, 0x469: 0x104, 0x46b: 0x105, 0x46c: 0x106, 0x46d: 0x107, 0x46e: 0x108, + 0x479: 0x109, 0x47c: 0x24, 0x47d: 0x10a, 0x47e: 0x10b, 0x47f: 0x10c, + // Block 0x12, offset 0x480 + 0x4b0: 0x24, 0x4b1: 0x10d, 0x4b2: 0x10e, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x10f, 0x4c6: 0x110, + 0x4c9: 0x111, + 0x4d0: 0x112, 0x4d1: 0x113, 0x4d2: 0x114, 0x4d3: 0x115, 0x4d4: 0x116, 0x4d5: 0x117, 0x4d6: 0x118, 0x4d7: 0x119, + 0x4d8: 0x11a, 0x4d9: 0x11b, 0x4da: 0x11c, 0x4db: 0x11d, 0x4dc: 0x11e, 0x4dd: 0x11f, 0x4de: 0x120, 0x4df: 0x121, + 0x4e8: 0x122, 0x4e9: 0x123, 0x4ea: 0x124, + // Block 0x14, offset 0x500 + 0x500: 0x125, 0x504: 0x126, 0x505: 0x127, + 0x50b: 0x128, + 0x520: 0x24, 0x521: 0x24, 0x522: 0x24, 0x523: 0x129, 0x524: 0x12, 0x525: 0x12a, + 0x538: 0x12b, 0x539: 0x13, 0x53a: 0x12c, + // Block 0x15, offset 0x540 + 0x544: 0x12d, 0x545: 0x12e, 0x546: 0x12f, + 0x54f: 0x130, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x131, 0x5c1: 0x132, 0x5c4: 0x132, 0x5c5: 0x132, 0x5c6: 0x132, 0x5c7: 0x133, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 289 entries, 578 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x35, 0x38, 0x3c, 0x3f, 0x43, 0x4d, 0x4f, 0x57, 0x5e, 0x63, 0x71, 0x72, 0x80, 0x8f, 0x99, 0x9c, 0xa3, 0xab, 0xae, 0xb0, 0xbf, 0xc5, 0xd3, 0xde, 0xeb, 0xf6, 0x102, 0x10c, 0x118, 0x123, 0x12f, 0x13b, 0x143, 0x14c, 0x156, 0x161, 0x16d, 0x174, 0x17f, 0x184, 0x18c, 0x18f, 0x194, 0x198, 0x19c, 0x1a3, 0x1ac, 0x1b4, 0x1b5, 0x1be, 0x1c5, 0x1cd, 0x1d3, 0x1d8, 0x1dc, 0x1df, 0x1e1, 0x1e4, 0x1e9, 0x1ea, 0x1ec, 0x1ee, 0x1f0, 0x1f7, 0x1fc, 0x200, 0x209, 0x20c, 0x20f, 0x215, 0x216, 0x221, 0x222, 0x223, 0x228, 0x235, 0x23d, 0x245, 0x24e, 0x257, 0x260, 0x265, 0x268, 0x273, 0x281, 0x283, 0x28a, 0x28e, 0x29a, 0x29b, 0x2a6, 0x2ae, 0x2b6, 0x2bc, 0x2bd, 0x2cb, 0x2d0, 0x2d3, 0x2d8, 0x2dc, 0x2e2, 0x2e7, 0x2ea, 0x2ef, 0x2f4, 0x2f5, 0x2fb, 0x2fd, 0x2fe, 0x300, 0x302, 0x305, 0x306, 0x308, 0x30b, 0x311, 0x315, 0x317, 0x31c, 0x323, 0x32b, 0x334, 0x335, 0x33e, 0x342, 0x347, 0x34f, 0x355, 0x35b, 0x365, 0x36a, 0x373, 0x379, 0x380, 0x384, 0x38c, 0x38e, 0x390, 0x393, 0x395, 0x397, 0x398, 0x399, 0x39b, 0x39d, 0x3a3, 0x3a8, 0x3aa, 0x3b1, 0x3b4, 0x3b6, 0x3bc, 0x3c1, 0x3c3, 0x3c4, 0x3c5, 0x3c6, 0x3c8, 0x3ca, 0x3cc, 0x3cf, 0x3d1, 0x3d4, 0x3dc, 0x3df, 0x3e3, 0x3eb, 0x3ed, 0x3ee, 0x3ef, 0x3f1, 0x3f7, 0x3f9, 0x3fa, 0x3fc, 0x3fe, 0x400, 0x40d, 0x40e, 0x40f, 0x413, 0x415, 0x416, 0x417, 0x418, 0x419, 0x41c, 0x41f, 0x425, 0x426, 0x42a, 0x42e, 0x434, 0x437, 0x43e, 0x442, 0x446, 0x44d, 0x456, 0x45c, 0x462, 0x46c, 0x476, 0x478, 0x481, 0x487, 0x48d, 0x493, 0x496, 0x49c, 0x49f, 0x4a8, 0x4a9, 0x4b0, 0x4b4, 0x4b5, 0x4b8, 0x4ba, 0x4c1, 0x4c9, 0x4cf, 0x4d5, 0x4d6, 0x4dc, 0x4df, 0x4e7, 0x4ee, 0x4f8, 0x500, 0x503, 0x504, 0x505, 0x506, 0x508, 0x509, 0x50b, 0x50d, 0x50f, 0x513, 0x514, 0x516, 0x519, 0x51b, 0x51d, 0x51f, 0x524, 0x529, 0x52d, 0x52e, 0x531, 0x535, 0x540, 0x544, 0x54c, 0x551, 0x555, 0x558, 0x55c, 0x55f, 0x562, 0x567, 0x56b, 0x56f, 0x573, 0x577, 0x579, 0x57b, 0x57e, 0x583, 0x586, 0x588, 0x58b, 0x58d, 0x593, 0x59c, 0x5a1, 0x5a2, 0x5a5, 0x5a6, 0x5a7, 0x5a9, 0x5aa, 0x5ab} + +// sparseValues: 1451 entries, 5804 bytes +var sparseValues = [1451]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xbf}, + // Block 0x6, offset 0x35 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x38 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3c + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3f + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x43 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4d + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4f + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9b, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0xa0, hi: 0xa0}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x57 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xaf, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xe, offset 0x5e + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xf, offset 0x63 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x10, offset 0x71 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x11, offset 0x72 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x12, offset 0x80 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x14, offset 0x99 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x15, offset 0x9c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0x16, offset 0xa3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x17, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x18, offset 0xae + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x19, offset 0xb0 + {value: 0x0034, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1a, offset 0xbf + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1b, offset 0xc5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1c, offset 0xd3 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0xde + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xeb + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x1f, offset 0xf6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x20, offset 0x102 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x21, offset 0x10c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x22, offset 0x118 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x23, offset 0x123 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x24, offset 0x12f + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x26, offset 0x143 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x27, offset 0x14c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x28, offset 0x156 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2a, offset 0x16d + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2b, offset 0x174 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2c, offset 0x17f + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2d, offset 0x184 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2e, offset 0x18c + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x2f, offset 0x18f + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x30, offset 0x194 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x31, offset 0x198 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x32, offset 0x19c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x33, offset 0x1a3 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x34, offset 0x1ac + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x35, offset 0x1b4 + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x36, offset 0x1b5 + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x37, offset 0x1be + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x38, offset 0x1c5 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x39, offset 0x1cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3b, offset 0x1d8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3c, offset 0x1dc + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3d, offset 0x1df + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x3e, offset 0x1e1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x3f, offset 0x1e4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x40, offset 0x1e9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x41, offset 0x1ea + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x42, offset 0x1ec + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x43, offset 0x1ee + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x44, offset 0x1f0 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x45, offset 0x1f7 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x46, offset 0x1fc + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x47, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x48, offset 0x209 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x20c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb8}, + // Block 0x4a, offset 0x20f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4b, offset 0x215 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4c, offset 0x216 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x221 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x4e, offset 0x222 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x4f, offset 0x223 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x50, offset 0x228 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x51, offset 0x235 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x52, offset 0x23d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x54, offset 0x24e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x55, offset 0x257 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x56, offset 0x260 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x57, offset 0x265 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x58, offset 0x268 + {value: 0x31ea, lo: 0x80, hi: 0x80}, + {value: 0x326a, lo: 0x81, hi: 0x81}, + {value: 0x32ea, lo: 0x82, hi: 0x82}, + {value: 0x336a, lo: 0x83, hi: 0x83}, + {value: 0x33ea, lo: 0x84, hi: 0x84}, + {value: 0x346a, lo: 0x85, hi: 0x85}, + {value: 0x34ea, lo: 0x86, hi: 0x86}, + {value: 0x356a, lo: 0x87, hi: 0x87}, + {value: 0x35ea, lo: 0x88, hi: 0x88}, + {value: 0x8353, lo: 0x90, hi: 0xba}, + {value: 0x8353, lo: 0xbd, hi: 0xbf}, + // Block 0x59, offset 0x273 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xba}, + // Block 0x5a, offset 0x281 + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5b, offset 0x283 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8752, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8b52, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5c, offset 0x28a + {value: 0x0012, lo: 0x80, hi: 0x8d}, + {value: 0x8f52, lo: 0x8e, hi: 0x8e}, + {value: 0x0012, lo: 0x8f, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5d, offset 0x28e + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x5e, offset 0x29a + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x29b + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x369a, lo: 0x96, hi: 0x96}, + {value: 0x374a, lo: 0x97, hi: 0x97}, + {value: 0x37fa, lo: 0x98, hi: 0x98}, + {value: 0x38aa, lo: 0x99, hi: 0x99}, + {value: 0x395a, lo: 0x9a, hi: 0x9a}, + {value: 0x3a0a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x3abb, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x60, offset 0x2a6 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x61, offset 0x2ae + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x2b6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x2bc + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x64, offset 0x2bd + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x65, offset 0x2cb + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0xa452, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x2d0 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x67, offset 0x2d3 + {value: 0xa753, lo: 0xb6, hi: 0xb7}, + {value: 0xaa53, lo: 0xb8, hi: 0xb9}, + {value: 0xad53, lo: 0xba, hi: 0xbb}, + {value: 0xaa53, lo: 0xbc, hi: 0xbd}, + {value: 0xa753, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x2d8 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xb053, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x69, offset 0x2dc + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6a, offset 0x2e2 + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e7 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6c, offset 0x2ea + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6d, offset 0x2ef + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x6f, offset 0x2f5 + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x70, offset 0x2fb + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x71, offset 0x2fd + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x72, offset 0x2fe + {value: 0x0010, lo: 0x85, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x73, offset 0x300 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x74, offset 0x302 + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x75, offset 0x305 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x76, offset 0x306 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x77, offset 0x308 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x78, offset 0x30b + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x79, offset 0x311 + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7a, offset 0x315 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7b, offset 0x317 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7c, offset 0x31c + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8753, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7d, offset 0x323 + {value: 0x0117, lo: 0x82, hi: 0x83}, + {value: 0x6553, lo: 0x84, hi: 0x84}, + {value: 0x908b, lo: 0x85, hi: 0x85}, + {value: 0x8f53, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x7e, offset 0x32b + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x7f, offset 0x334 + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x80, offset 0x335 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x33e + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x82, offset 0x342 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x83, offset 0x347 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x84, offset 0x34f + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x85, offset 0x355 + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x86, offset 0x35b + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x87, offset 0x365 + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x36a + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x89, offset 0x373 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8a, offset 0x379 + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xb352, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa7}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8b, offset 0x380 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x384 + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8d, offset 0x38c + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x38e + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x8f, offset 0x390 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x90, offset 0x393 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x91, offset 0x395 + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x92, offset 0x397 + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x93, offset 0x398 + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x94, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x95, offset 0x39b + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x96, offset 0x39d + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x97, offset 0x3a3 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x98, offset 0x3a8 + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x3aa + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x3b1 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9b, offset 0x3b4 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9c, offset 0x3b6 + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9d, offset 0x3bc + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x3c1 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0x9f, offset 0x3c3 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa0, offset 0x3c4 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa1, offset 0x3c5 + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa2, offset 0x3c6 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa4, offset 0x3ca + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa5, offset 0x3cc + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa6, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa7, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xa8, offset 0x3d4 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xb653, lo: 0x98, hi: 0x9f}, + {value: 0xb953, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xa9, offset 0x3dc + {value: 0xb652, lo: 0x80, hi: 0x87}, + {value: 0xb952, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xaa, offset 0x3df + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb953, lo: 0xb0, hi: 0xb7}, + {value: 0xb653, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e3 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb952, lo: 0x98, hi: 0x9f}, + {value: 0xb652, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xac, offset 0x3eb + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xad, offset 0x3ed + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xae, offset 0x3ee + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xaf, offset 0x3ef + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb0, offset 0x3f1 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x3f7 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb2, offset 0x3f9 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb3, offset 0x3fa + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb4, offset 0x3fc + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb5, offset 0x3fe + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb6, offset 0x400 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb5}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x40d + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xb8, offset 0x40e + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xb9, offset 0x40f + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xba, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbb, offset 0x415 + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbc, offset 0x416 + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbd, offset 0x417 + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xbe, offset 0x418 + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xbf, offset 0x419 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc0, offset 0x41c + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc1, offset 0x41f + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x87}, + {value: 0x0024, lo: 0x88, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x8b}, + {value: 0x0024, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + // Block 0xc2, offset 0x425 + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xc3, offset 0x426 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc4, offset 0x42a + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc5, offset 0x42e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc6, offset 0x434 + {value: 0x0014, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc7, offset 0x437 + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc8, offset 0x43e + {value: 0x0010, lo: 0x84, hi: 0x86}, + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc9, offset 0x442 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x446 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x89, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xcb, offset 0x44d + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xcc, offset 0x456 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcd, offset 0x45c + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xce, offset 0x462 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcf, offset 0x46c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xd0, offset 0x476 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xd1, offset 0x478 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0x9f, hi: 0x9f}, + // Block 0xd2, offset 0x481 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x487 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd4, offset 0x48d + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x493 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd6, offset 0x496 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd7, offset 0x49c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd8, offset 0x49f + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + // Block 0xd9, offset 0x4a8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xda, offset 0x4a9 + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xdb, offset 0x4b0 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + // Block 0xdc, offset 0x4b4 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xdd, offset 0x4b5 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x4b8 + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xdf, offset 0x4ba + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0014, lo: 0x94, hi: 0x97}, + {value: 0x0014, lo: 0x9a, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0x9f}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + // Block 0xe0, offset 0x4c1 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xe1, offset 0x4c9 + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xe2, offset 0x4cf + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + // Block 0xe3, offset 0x4d5 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xe4, offset 0x4d6 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe5, offset 0x4dc + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x4df + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xe7, offset 0x4e7 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xe8, offset 0x4ee + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa5}, + {value: 0x0010, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xe9, offset 0x4f8 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0x96}, + {value: 0x0034, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xea, offset 0x500 + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + // Block 0xeb, offset 0x503 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xec, offset 0x504 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xed, offset 0x505 + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xee, offset 0x506 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xb0, hi: 0xb8}, + // Block 0xef, offset 0x508 + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xf0, offset 0x509 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xf1, offset 0x50b + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xf2, offset 0x50d + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xf3, offset 0x50f + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xf4, offset 0x513 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xf5, offset 0x514 + {value: 0x2013, lo: 0x80, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xbf}, + // Block 0xf6, offset 0x516 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x519 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xf8, offset 0x51b + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa3, hi: 0xa3}, + // Block 0xf9, offset 0x51d + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xfa, offset 0x51f + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xfb, offset 0x524 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xfc, offset 0x529 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xfd, offset 0x52d + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xfe, offset 0x52e + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xff, offset 0x531 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x100, offset 0x535 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0x101, offset 0x540 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x102, offset 0x544 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0x103, offset 0x54c + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0x104, offset 0x551 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0x105, offset 0x555 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0x106, offset 0x558 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0x107, offset 0x55c + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x108, offset 0x55f + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x109, offset 0x562 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x10a, offset 0x567 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x10b, offset 0x56b + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x10c, offset 0x56f + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x10d, offset 0x573 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x10e, offset 0x577 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x10f, offset 0x579 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x110, offset 0x57b + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x111, offset 0x57e + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x112, offset 0x583 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + // Block 0x113, offset 0x586 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + // Block 0x114, offset 0x588 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0024, lo: 0xac, hi: 0xaf}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x115, offset 0x58b + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x116, offset 0x58d + {value: 0xbc52, lo: 0x80, hi: 0x81}, + {value: 0xbf52, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x117, offset 0x593 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x118, offset 0x59c + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x119, offset 0x5a1 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x11a, offset 0x5a2 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x11b, offset 0x5a5 + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x11c, offset 0x5a6 + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x11d, offset 0x5a7 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x11e, offset 0x5a9 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x11f, offset 0x5aa + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 15070 bytes (14KiB); checksum: 1EB13752 diff --git a/vendor/golang.org/x/text/cases/tables13.0.0.go b/vendor/golang.org/x/text/cases/tables13.0.0.go new file mode 100644 index 00000000000..cd874775b39 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables13.0.0.go @@ -0,0 +1,2400 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.16 +// +build go1.16 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +var xorData string = "" + // Size: 192 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x001\x00\x00\x0b(\x04\x00\x03\x04\x1e\x00\x0b)\x08" + + "\x00\x03\x0a\x00\x02:\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<" + + "\x00\x01&\x00\x01*\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01" + + "\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2450 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꟅꟅ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ι" + + "ΙΙ\x166ΐΪ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12" + + "φΦΦ\x12\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x10\x1bᲐა" + + "\x10\x1bᲑბ\x10\x1bᲒგ\x10\x1bᲓდ\x10\x1bᲔე\x10\x1bᲕვ\x10\x1bᲖზ\x10\x1bᲗთ" + + "\x10\x1bᲘი\x10\x1bᲙკ\x10\x1bᲚლ\x10\x1bᲛმ\x10\x1bᲜნ\x10\x1bᲝო\x10\x1bᲞპ" + + "\x10\x1bᲟჟ\x10\x1bᲠრ\x10\x1bᲡს\x10\x1bᲢტ\x10\x1bᲣუ\x10\x1bᲤფ\x10\x1bᲥქ" + + "\x10\x1bᲦღ\x10\x1bᲧყ\x10\x1bᲨშ\x10\x1bᲩჩ\x10\x1bᲪც\x10\x1bᲫძ\x10\x1bᲬწ" + + "\x10\x1bᲭჭ\x10\x1bᲮხ\x10\x1bᲯჯ\x10\x1bᲰჰ\x10\x1bᲱჱ\x10\x1bᲲჲ\x10\x1bᲳჳ" + + "\x10\x1bᲴჴ\x10\x1bᲵჵ\x10\x1bᲶჶ\x10\x1bᲷჷ\x10\x1bᲸჸ\x10\x1bᲹჹ\x10\x1bᲺჺ" + + "\x10\x1bᲽჽ\x10\x1bᲾჾ\x10\x1bᲿჿ\x12\x12вВВ\x12\x12дДД\x12\x12оОО\x12\x12с" + + "СС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13\x1bꙋꙊꙊ\x13\x1bẖH̱H̱" + + "\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1baʾAʾAʾ\x13\x1bṡṠṠ\x12" + + "\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ" + + "\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ" + + "\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ\x15\x1dἄιᾄἌΙ\x15" + + "\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ\x15+ἢιἪΙᾚ\x15+ἣι" + + "ἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨΙ\x15\x1dἡιᾑἩΙ" + + "\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15\x1dἦιᾖἮΙ\x15" + + "\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ\x15+ὥιὭΙᾭ" + + "\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ\x15\x1dὣιᾣὫΙ" + + "\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰιᾺΙᾺͅ\x14#αιΑΙ" + + "ᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x12ιΙΙ\x15-ὴιῊΙ" + + "Ὴͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ\x166ῒΙ" + + "̈̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ̀\x166ΰΫ́Ϋ" + + "́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ" + + "\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12" + + "\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12" + + "\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12" + + "\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x10ʂʂ\x12\x12ffFFFf" + + "\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12st" + + "STSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄ" + + "խ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 12538 bytes (12.24 KiB). Checksum: af4dfa7d60c71d4c. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 20: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 20 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 22 blocks, 1408 entries, 2816 bytes +// The third block is the zero block. +var caseValues = [1408]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x110a, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x118a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x120a, + 0x19e: 0x128a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x130d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x138a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x14ca, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x160a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x168a, 0x251: 0x170a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x178a, 0x256: 0x180a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x188a, 0x271: 0x190a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x198a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x6852, 0x281: 0x6852, 0x282: 0x6852, 0x283: 0x6852, 0x284: 0x6852, 0x285: 0x6852, + 0x286: 0x6852, 0x287: 0x1a0a, 0x288: 0x0012, 0x28a: 0x0010, + 0x291: 0x0034, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0034, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0034, 0x29b: 0x0034, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, 0x2a0: 0x0024, 0x2a1: 0x0024, 0x2a2: 0x0034, 0x2a3: 0x0034, + 0x2a4: 0x0034, 0x2a5: 0x0034, 0x2a6: 0x0034, 0x2a7: 0x0034, 0x2a8: 0x0024, 0x2a9: 0x0024, + 0x2aa: 0x0034, 0x2ab: 0x0024, 0x2ac: 0x0024, 0x2ad: 0x0034, 0x2ae: 0x0034, 0x2af: 0x0024, + 0x2b0: 0x0034, 0x2b1: 0x0034, 0x2b2: 0x0034, 0x2b3: 0x0034, 0x2b4: 0x0034, 0x2b5: 0x0034, + 0x2b6: 0x0034, 0x2b7: 0x0034, 0x2b8: 0x0034, 0x2b9: 0x0034, 0x2ba: 0x0034, 0x2bb: 0x0034, + 0x2bc: 0x0034, 0x2bd: 0x0034, 0x2bf: 0x0034, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x7053, 0x2c1: 0x7053, 0x2c2: 0x7053, 0x2c3: 0x7053, 0x2c4: 0x7053, 0x2c5: 0x7053, + 0x2c7: 0x7053, + 0x2cd: 0x7053, 0x2d0: 0x1aea, 0x2d1: 0x1b6a, + 0x2d2: 0x1bea, 0x2d3: 0x1c6a, 0x2d4: 0x1cea, 0x2d5: 0x1d6a, 0x2d6: 0x1dea, 0x2d7: 0x1e6a, + 0x2d8: 0x1eea, 0x2d9: 0x1f6a, 0x2da: 0x1fea, 0x2db: 0x206a, 0x2dc: 0x20ea, 0x2dd: 0x216a, + 0x2de: 0x21ea, 0x2df: 0x226a, 0x2e0: 0x22ea, 0x2e1: 0x236a, 0x2e2: 0x23ea, 0x2e3: 0x246a, + 0x2e4: 0x24ea, 0x2e5: 0x256a, 0x2e6: 0x25ea, 0x2e7: 0x266a, 0x2e8: 0x26ea, 0x2e9: 0x276a, + 0x2ea: 0x27ea, 0x2eb: 0x286a, 0x2ec: 0x28ea, 0x2ed: 0x296a, 0x2ee: 0x29ea, 0x2ef: 0x2a6a, + 0x2f0: 0x2aea, 0x2f1: 0x2b6a, 0x2f2: 0x2bea, 0x2f3: 0x2c6a, 0x2f4: 0x2cea, 0x2f5: 0x2d6a, + 0x2f6: 0x2dea, 0x2f7: 0x2e6a, 0x2f8: 0x2eea, 0x2f9: 0x2f6a, 0x2fa: 0x2fea, + 0x2fc: 0x0014, 0x2fd: 0x306a, 0x2fe: 0x30ea, 0x2ff: 0x316a, + // Block 0xc, offset 0x300 + 0x300: 0x0812, 0x301: 0x0812, 0x302: 0x0812, 0x303: 0x0812, 0x304: 0x0812, 0x305: 0x0812, + 0x308: 0x0813, 0x309: 0x0813, 0x30a: 0x0813, 0x30b: 0x0813, + 0x30c: 0x0813, 0x30d: 0x0813, 0x310: 0x3b1a, 0x311: 0x0812, + 0x312: 0x3bfa, 0x313: 0x0812, 0x314: 0x3d3a, 0x315: 0x0812, 0x316: 0x3e7a, 0x317: 0x0812, + 0x319: 0x0813, 0x31b: 0x0813, 0x31d: 0x0813, + 0x31f: 0x0813, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x0812, 0x323: 0x0812, + 0x324: 0x0812, 0x325: 0x0812, 0x326: 0x0812, 0x327: 0x0812, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x0813, 0x32b: 0x0813, 0x32c: 0x0813, 0x32d: 0x0813, 0x32e: 0x0813, 0x32f: 0x0813, + 0x330: 0x9252, 0x331: 0x9252, 0x332: 0x9552, 0x333: 0x9552, 0x334: 0x9852, 0x335: 0x9852, + 0x336: 0x9b52, 0x337: 0x9b52, 0x338: 0x9e52, 0x339: 0x9e52, 0x33a: 0xa152, 0x33b: 0xa152, + 0x33c: 0x4d52, 0x33d: 0x4d52, + // Block 0xd, offset 0x340 + 0x340: 0x3fba, 0x341: 0x40aa, 0x342: 0x419a, 0x343: 0x428a, 0x344: 0x437a, 0x345: 0x446a, + 0x346: 0x455a, 0x347: 0x464a, 0x348: 0x4739, 0x349: 0x4829, 0x34a: 0x4919, 0x34b: 0x4a09, + 0x34c: 0x4af9, 0x34d: 0x4be9, 0x34e: 0x4cd9, 0x34f: 0x4dc9, 0x350: 0x4eba, 0x351: 0x4faa, + 0x352: 0x509a, 0x353: 0x518a, 0x354: 0x527a, 0x355: 0x536a, 0x356: 0x545a, 0x357: 0x554a, + 0x358: 0x5639, 0x359: 0x5729, 0x35a: 0x5819, 0x35b: 0x5909, 0x35c: 0x59f9, 0x35d: 0x5ae9, + 0x35e: 0x5bd9, 0x35f: 0x5cc9, 0x360: 0x5dba, 0x361: 0x5eaa, 0x362: 0x5f9a, 0x363: 0x608a, + 0x364: 0x617a, 0x365: 0x626a, 0x366: 0x635a, 0x367: 0x644a, 0x368: 0x6539, 0x369: 0x6629, + 0x36a: 0x6719, 0x36b: 0x6809, 0x36c: 0x68f9, 0x36d: 0x69e9, 0x36e: 0x6ad9, 0x36f: 0x6bc9, + 0x370: 0x0812, 0x371: 0x0812, 0x372: 0x6cba, 0x373: 0x6dca, 0x374: 0x6e9a, + 0x376: 0x6f7a, 0x377: 0x705a, 0x378: 0x0813, 0x379: 0x0813, 0x37a: 0x9253, 0x37b: 0x9253, + 0x37c: 0x7199, 0x37d: 0x0004, 0x37e: 0x726a, 0x37f: 0x0004, + // Block 0xe, offset 0x380 + 0x380: 0x0004, 0x381: 0x0004, 0x382: 0x72ea, 0x383: 0x73fa, 0x384: 0x74ca, + 0x386: 0x75aa, 0x387: 0x768a, 0x388: 0x9553, 0x389: 0x9553, 0x38a: 0x9853, 0x38b: 0x9853, + 0x38c: 0x77c9, 0x38d: 0x0004, 0x38e: 0x0004, 0x38f: 0x0004, 0x390: 0x0812, 0x391: 0x0812, + 0x392: 0x789a, 0x393: 0x79da, 0x396: 0x7b1a, 0x397: 0x7bfa, + 0x398: 0x0813, 0x399: 0x0813, 0x39a: 0x9b53, 0x39b: 0x9b53, 0x39d: 0x0004, + 0x39e: 0x0004, 0x39f: 0x0004, 0x3a0: 0x0812, 0x3a1: 0x0812, 0x3a2: 0x7d3a, 0x3a3: 0x7e7a, + 0x3a4: 0x7fba, 0x3a5: 0x0912, 0x3a6: 0x809a, 0x3a7: 0x817a, 0x3a8: 0x0813, 0x3a9: 0x0813, + 0x3aa: 0xa153, 0x3ab: 0xa153, 0x3ac: 0x0913, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, + 0x3b2: 0x82ba, 0x3b3: 0x83ca, 0x3b4: 0x849a, + 0x3b6: 0x857a, 0x3b7: 0x865a, 0x3b8: 0x9e53, 0x3b9: 0x9e53, 0x3ba: 0x4d53, 0x3bb: 0x4d53, + 0x3bc: 0x8799, 0x3bd: 0x0004, 0x3be: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3c2: 0x0013, + 0x3c7: 0x0013, 0x3ca: 0x0012, 0x3cb: 0x0013, + 0x3cc: 0x0013, 0x3cd: 0x0013, 0x3ce: 0x0012, 0x3cf: 0x0012, 0x3d0: 0x0013, 0x3d1: 0x0013, + 0x3d2: 0x0013, 0x3d3: 0x0012, 0x3d5: 0x0013, + 0x3d9: 0x0013, 0x3da: 0x0013, 0x3db: 0x0013, 0x3dc: 0x0013, 0x3dd: 0x0013, + 0x3e4: 0x0013, 0x3e6: 0x886b, 0x3e8: 0x0013, + 0x3ea: 0x88cb, 0x3eb: 0x890b, 0x3ec: 0x0013, 0x3ed: 0x0013, 0x3ef: 0x0012, + 0x3f0: 0x0013, 0x3f1: 0x0013, 0x3f2: 0xa453, 0x3f3: 0x0013, 0x3f4: 0x0012, 0x3f5: 0x0010, + 0x3f6: 0x0010, 0x3f7: 0x0010, 0x3f8: 0x0010, 0x3f9: 0x0012, + 0x3fc: 0x0012, 0x3fd: 0x0012, 0x3fe: 0x0013, 0x3ff: 0x0013, + // Block 0x10, offset 0x400 + 0x400: 0x1a13, 0x401: 0x1a13, 0x402: 0x1e13, 0x403: 0x1e13, 0x404: 0x1a13, 0x405: 0x1a13, + 0x406: 0x2613, 0x407: 0x2613, 0x408: 0x2a13, 0x409: 0x2a13, 0x40a: 0x2e13, 0x40b: 0x2e13, + 0x40c: 0x2a13, 0x40d: 0x2a13, 0x40e: 0x2613, 0x40f: 0x2613, 0x410: 0xa752, 0x411: 0xa752, + 0x412: 0xaa52, 0x413: 0xaa52, 0x414: 0xad52, 0x415: 0xad52, 0x416: 0xaa52, 0x417: 0xaa52, + 0x418: 0xa752, 0x419: 0xa752, 0x41a: 0x1a12, 0x41b: 0x1a12, 0x41c: 0x1e12, 0x41d: 0x1e12, + 0x41e: 0x1a12, 0x41f: 0x1a12, 0x420: 0x2612, 0x421: 0x2612, 0x422: 0x2a12, 0x423: 0x2a12, + 0x424: 0x2e12, 0x425: 0x2e12, 0x426: 0x2a12, 0x427: 0x2a12, 0x428: 0x2612, 0x429: 0x2612, + // Block 0x11, offset 0x440 + 0x440: 0x6552, 0x441: 0x6552, 0x442: 0x6552, 0x443: 0x6552, 0x444: 0x6552, 0x445: 0x6552, + 0x446: 0x6552, 0x447: 0x6552, 0x448: 0x6552, 0x449: 0x6552, 0x44a: 0x6552, 0x44b: 0x6552, + 0x44c: 0x6552, 0x44d: 0x6552, 0x44e: 0x6552, 0x44f: 0x6552, 0x450: 0xb052, 0x451: 0xb052, + 0x452: 0xb052, 0x453: 0xb052, 0x454: 0xb052, 0x455: 0xb052, 0x456: 0xb052, 0x457: 0xb052, + 0x458: 0xb052, 0x459: 0xb052, 0x45a: 0xb052, 0x45b: 0xb052, 0x45c: 0xb052, 0x45d: 0xb052, + 0x45e: 0xb052, 0x460: 0x0113, 0x461: 0x0112, 0x462: 0x896b, 0x463: 0x8b53, + 0x464: 0x89cb, 0x465: 0x8a2a, 0x466: 0x8a8a, 0x467: 0x0f13, 0x468: 0x0f12, 0x469: 0x0313, + 0x46a: 0x0312, 0x46b: 0x0713, 0x46c: 0x0712, 0x46d: 0x8aeb, 0x46e: 0x8b4b, 0x46f: 0x8bab, + 0x470: 0x8c0b, 0x471: 0x0012, 0x472: 0x0113, 0x473: 0x0112, 0x474: 0x0012, 0x475: 0x0313, + 0x476: 0x0312, 0x477: 0x0012, 0x478: 0x0012, 0x479: 0x0012, 0x47a: 0x0012, 0x47b: 0x0012, + 0x47c: 0x0015, 0x47d: 0x0015, 0x47e: 0x8c6b, 0x47f: 0x8ccb, + // Block 0x12, offset 0x480 + 0x480: 0x0113, 0x481: 0x0112, 0x482: 0x0113, 0x483: 0x0112, 0x484: 0x0113, 0x485: 0x0112, + 0x486: 0x0113, 0x487: 0x0112, 0x488: 0x0014, 0x489: 0x0014, 0x48a: 0x0014, 0x48b: 0x0713, + 0x48c: 0x0712, 0x48d: 0x8d2b, 0x48e: 0x0012, 0x48f: 0x0010, 0x490: 0x0113, 0x491: 0x0112, + 0x492: 0x0113, 0x493: 0x0112, 0x494: 0x6552, 0x495: 0x0012, 0x496: 0x0113, 0x497: 0x0112, + 0x498: 0x0113, 0x499: 0x0112, 0x49a: 0x0113, 0x49b: 0x0112, 0x49c: 0x0113, 0x49d: 0x0112, + 0x49e: 0x0113, 0x49f: 0x0112, 0x4a0: 0x0113, 0x4a1: 0x0112, 0x4a2: 0x0113, 0x4a3: 0x0112, + 0x4a4: 0x0113, 0x4a5: 0x0112, 0x4a6: 0x0113, 0x4a7: 0x0112, 0x4a8: 0x0113, 0x4a9: 0x0112, + 0x4aa: 0x8d8b, 0x4ab: 0x8deb, 0x4ac: 0x8e4b, 0x4ad: 0x8eab, 0x4ae: 0x8f0b, 0x4af: 0x0012, + 0x4b0: 0x8f6b, 0x4b1: 0x8fcb, 0x4b2: 0x902b, 0x4b3: 0xb353, 0x4b4: 0x0113, 0x4b5: 0x0112, + 0x4b6: 0x0113, 0x4b7: 0x0112, 0x4b8: 0x0113, 0x4b9: 0x0112, 0x4ba: 0x0113, 0x4bb: 0x0112, + 0x4bc: 0x0113, 0x4bd: 0x0112, 0x4be: 0x0113, 0x4bf: 0x0112, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x90ea, 0x4c1: 0x916a, 0x4c2: 0x91ea, 0x4c3: 0x926a, 0x4c4: 0x931a, 0x4c5: 0x93ca, + 0x4c6: 0x944a, + 0x4d3: 0x94ca, 0x4d4: 0x95aa, 0x4d5: 0x968a, 0x4d6: 0x976a, 0x4d7: 0x984a, + 0x4dd: 0x0010, + 0x4de: 0x0034, 0x4df: 0x0010, 0x4e0: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, 0x4e3: 0x0010, + 0x4e4: 0x0010, 0x4e5: 0x0010, 0x4e6: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, + 0x4ea: 0x0010, 0x4eb: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f3: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f8: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, + // Block 0x14, offset 0x500 + 0x500: 0x2213, 0x501: 0x2213, 0x502: 0x2613, 0x503: 0x2613, 0x504: 0x2213, 0x505: 0x2213, + 0x506: 0x2e13, 0x507: 0x2e13, 0x508: 0x2213, 0x509: 0x2213, 0x50a: 0x2613, 0x50b: 0x2613, + 0x50c: 0x2213, 0x50d: 0x2213, 0x50e: 0x3e13, 0x50f: 0x3e13, 0x510: 0x2213, 0x511: 0x2213, + 0x512: 0x2613, 0x513: 0x2613, 0x514: 0x2213, 0x515: 0x2213, 0x516: 0x2e13, 0x517: 0x2e13, + 0x518: 0x2213, 0x519: 0x2213, 0x51a: 0x2613, 0x51b: 0x2613, 0x51c: 0x2213, 0x51d: 0x2213, + 0x51e: 0xbc53, 0x51f: 0xbc53, 0x520: 0xbf53, 0x521: 0xbf53, 0x522: 0x2212, 0x523: 0x2212, + 0x524: 0x2612, 0x525: 0x2612, 0x526: 0x2212, 0x527: 0x2212, 0x528: 0x2e12, 0x529: 0x2e12, + 0x52a: 0x2212, 0x52b: 0x2212, 0x52c: 0x2612, 0x52d: 0x2612, 0x52e: 0x2212, 0x52f: 0x2212, + 0x530: 0x3e12, 0x531: 0x3e12, 0x532: 0x2212, 0x533: 0x2212, 0x534: 0x2612, 0x535: 0x2612, + 0x536: 0x2212, 0x537: 0x2212, 0x538: 0x2e12, 0x539: 0x2e12, 0x53a: 0x2212, 0x53b: 0x2212, + 0x53c: 0x2612, 0x53d: 0x2612, 0x53e: 0x2212, 0x53f: 0x2212, + // Block 0x15, offset 0x540 + 0x542: 0x0010, + 0x547: 0x0010, 0x549: 0x0010, 0x54b: 0x0010, + 0x54d: 0x0010, 0x54e: 0x0010, 0x54f: 0x0010, 0x551: 0x0010, + 0x552: 0x0010, 0x554: 0x0010, 0x557: 0x0010, + 0x559: 0x0010, 0x55b: 0x0010, 0x55d: 0x0010, + 0x55f: 0x0010, 0x561: 0x0010, 0x562: 0x0010, + 0x564: 0x0010, 0x567: 0x0010, 0x568: 0x0010, 0x569: 0x0010, + 0x56a: 0x0010, 0x56c: 0x0010, 0x56d: 0x0010, 0x56e: 0x0010, 0x56f: 0x0010, + 0x570: 0x0010, 0x571: 0x0010, 0x572: 0x0010, 0x574: 0x0010, 0x575: 0x0010, + 0x576: 0x0010, 0x577: 0x0010, 0x579: 0x0010, 0x57a: 0x0010, 0x57b: 0x0010, + 0x57c: 0x0010, 0x57e: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x14, 0xc3: 0x15, 0xc4: 0x16, 0xc5: 0x17, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x18, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x19, 0xcc: 0x1a, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x1b, 0xd1: 0x1c, 0xd2: 0x1d, 0xd3: 0x1e, 0xd4: 0x1f, 0xd5: 0x20, 0xd6: 0x08, 0xd7: 0x21, + 0xd8: 0x22, 0xd9: 0x23, 0xda: 0x24, 0xdb: 0x25, 0xdc: 0x26, 0xdd: 0x27, 0xde: 0x28, 0xdf: 0x29, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x2a, 0x121: 0x2b, 0x122: 0x2c, 0x123: 0x2d, 0x124: 0x2e, 0x125: 0x2f, 0x126: 0x30, 0x127: 0x31, + 0x128: 0x32, 0x129: 0x33, 0x12a: 0x34, 0x12b: 0x35, 0x12c: 0x36, 0x12d: 0x37, 0x12e: 0x38, 0x12f: 0x39, + 0x130: 0x3a, 0x131: 0x3b, 0x132: 0x3c, 0x133: 0x3d, 0x134: 0x3e, 0x135: 0x3f, 0x136: 0x40, 0x137: 0x41, + 0x138: 0x42, 0x139: 0x43, 0x13a: 0x44, 0x13b: 0x45, 0x13c: 0x46, 0x13d: 0x47, 0x13e: 0x48, 0x13f: 0x49, + // Block 0x5, offset 0x140 + 0x140: 0x4a, 0x141: 0x4b, 0x142: 0x4c, 0x143: 0x09, 0x144: 0x24, 0x145: 0x24, 0x146: 0x24, 0x147: 0x24, + 0x148: 0x24, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x24, 0x152: 0x24, 0x153: 0x24, 0x154: 0x24, 0x155: 0x24, 0x156: 0x24, 0x157: 0x24, + 0x158: 0x24, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16b: 0x66, 0x16c: 0x67, 0x16d: 0x68, 0x16e: 0x69, 0x16f: 0x6a, + 0x170: 0x6b, 0x171: 0x6c, 0x172: 0x6d, 0x173: 0x6e, 0x174: 0x6f, 0x175: 0x70, 0x176: 0x71, 0x177: 0x72, + 0x178: 0x73, 0x179: 0x73, 0x17a: 0x74, 0x17b: 0x73, 0x17c: 0x75, 0x17d: 0x0a, 0x17e: 0x0b, 0x17f: 0x0c, + // Block 0x6, offset 0x180 + 0x180: 0x76, 0x181: 0x77, 0x182: 0x78, 0x183: 0x79, 0x184: 0x0d, 0x185: 0x7a, 0x186: 0x7b, + 0x192: 0x7c, 0x193: 0x0e, + 0x1b0: 0x7d, 0x1b1: 0x0f, 0x1b2: 0x73, 0x1b3: 0x7e, 0x1b4: 0x7f, 0x1b5: 0x80, 0x1b6: 0x81, 0x1b7: 0x82, + 0x1b8: 0x83, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x84, 0x1c2: 0x85, 0x1c3: 0x86, 0x1c4: 0x87, 0x1c5: 0x24, 0x1c6: 0x88, + // Block 0x8, offset 0x200 + 0x200: 0x89, 0x201: 0x24, 0x202: 0x24, 0x203: 0x24, 0x204: 0x24, 0x205: 0x24, 0x206: 0x24, 0x207: 0x24, + 0x208: 0x24, 0x209: 0x24, 0x20a: 0x24, 0x20b: 0x24, 0x20c: 0x24, 0x20d: 0x24, 0x20e: 0x24, 0x20f: 0x24, + 0x210: 0x24, 0x211: 0x24, 0x212: 0x8a, 0x213: 0x8b, 0x214: 0x24, 0x215: 0x24, 0x216: 0x24, 0x217: 0x24, + 0x218: 0x8c, 0x219: 0x8d, 0x21a: 0x8e, 0x21b: 0x8f, 0x21c: 0x90, 0x21d: 0x91, 0x21e: 0x10, 0x21f: 0x92, + 0x220: 0x93, 0x221: 0x94, 0x222: 0x24, 0x223: 0x95, 0x224: 0x96, 0x225: 0x97, 0x226: 0x98, 0x227: 0x99, + 0x228: 0x9a, 0x229: 0x9b, 0x22a: 0x9c, 0x22b: 0x9d, 0x22c: 0x9e, 0x22d: 0x9f, 0x22e: 0xa0, 0x22f: 0xa1, + 0x230: 0x24, 0x231: 0x24, 0x232: 0x24, 0x233: 0x24, 0x234: 0x24, 0x235: 0x24, 0x236: 0x24, 0x237: 0x24, + 0x238: 0x24, 0x239: 0x24, 0x23a: 0x24, 0x23b: 0x24, 0x23c: 0x24, 0x23d: 0x24, 0x23e: 0x24, 0x23f: 0x24, + // Block 0x9, offset 0x240 + 0x240: 0x24, 0x241: 0x24, 0x242: 0x24, 0x243: 0x24, 0x244: 0x24, 0x245: 0x24, 0x246: 0x24, 0x247: 0x24, + 0x248: 0x24, 0x249: 0x24, 0x24a: 0x24, 0x24b: 0x24, 0x24c: 0x24, 0x24d: 0x24, 0x24e: 0x24, 0x24f: 0x24, + 0x250: 0x24, 0x251: 0x24, 0x252: 0x24, 0x253: 0x24, 0x254: 0x24, 0x255: 0x24, 0x256: 0x24, 0x257: 0x24, + 0x258: 0x24, 0x259: 0x24, 0x25a: 0x24, 0x25b: 0x24, 0x25c: 0x24, 0x25d: 0x24, 0x25e: 0x24, 0x25f: 0x24, + 0x260: 0x24, 0x261: 0x24, 0x262: 0x24, 0x263: 0x24, 0x264: 0x24, 0x265: 0x24, 0x266: 0x24, 0x267: 0x24, + 0x268: 0x24, 0x269: 0x24, 0x26a: 0x24, 0x26b: 0x24, 0x26c: 0x24, 0x26d: 0x24, 0x26e: 0x24, 0x26f: 0x24, + 0x270: 0x24, 0x271: 0x24, 0x272: 0x24, 0x273: 0x24, 0x274: 0x24, 0x275: 0x24, 0x276: 0x24, 0x277: 0x24, + 0x278: 0x24, 0x279: 0x24, 0x27a: 0x24, 0x27b: 0x24, 0x27c: 0x24, 0x27d: 0x24, 0x27e: 0x24, 0x27f: 0x24, + // Block 0xa, offset 0x280 + 0x280: 0x24, 0x281: 0x24, 0x282: 0x24, 0x283: 0x24, 0x284: 0x24, 0x285: 0x24, 0x286: 0x24, 0x287: 0x24, + 0x288: 0x24, 0x289: 0x24, 0x28a: 0x24, 0x28b: 0x24, 0x28c: 0x24, 0x28d: 0x24, 0x28e: 0x24, 0x28f: 0x24, + 0x290: 0x24, 0x291: 0x24, 0x292: 0x24, 0x293: 0x24, 0x294: 0x24, 0x295: 0x24, 0x296: 0x24, 0x297: 0x24, + 0x298: 0x24, 0x299: 0x24, 0x29a: 0x24, 0x29b: 0x24, 0x29c: 0x24, 0x29d: 0x24, 0x29e: 0xa2, 0x29f: 0xa3, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x11, 0x2ed: 0xa4, 0x2ee: 0xa5, 0x2ef: 0xa6, + 0x2f0: 0x24, 0x2f1: 0x24, 0x2f2: 0x24, 0x2f3: 0x24, 0x2f4: 0xa7, 0x2f5: 0xa8, 0x2f6: 0xa9, 0x2f7: 0xaa, + 0x2f8: 0xab, 0x2f9: 0xac, 0x2fa: 0x24, 0x2fb: 0xad, 0x2fc: 0xae, 0x2fd: 0xaf, 0x2fe: 0xb0, 0x2ff: 0xb1, + // Block 0xc, offset 0x300 + 0x300: 0xb2, 0x301: 0xb3, 0x302: 0x24, 0x303: 0xb4, 0x305: 0xb5, 0x307: 0xb6, + 0x30a: 0xb7, 0x30b: 0xb8, 0x30c: 0xb9, 0x30d: 0xba, 0x30e: 0xbb, 0x30f: 0xbc, + 0x310: 0xbd, 0x311: 0xbe, 0x312: 0xbf, 0x313: 0xc0, 0x314: 0xc1, 0x315: 0xc2, + 0x318: 0x24, 0x319: 0x24, 0x31a: 0x24, 0x31b: 0x24, 0x31c: 0xc3, 0x31d: 0xc4, + 0x320: 0xc5, 0x321: 0xc6, 0x322: 0xc7, 0x323: 0xc8, 0x324: 0xc9, 0x326: 0xca, + 0x328: 0xcb, 0x329: 0xcc, 0x32a: 0xcd, 0x32b: 0xce, 0x32c: 0x5f, 0x32d: 0xcf, 0x32e: 0xd0, + 0x330: 0x24, 0x331: 0xd1, 0x332: 0xd2, 0x333: 0xd3, 0x334: 0xd4, + 0x33a: 0xd5, 0x33c: 0xd6, 0x33d: 0xd7, 0x33e: 0xd8, 0x33f: 0xd9, + // Block 0xd, offset 0x340 + 0x340: 0xda, 0x341: 0xdb, 0x342: 0xdc, 0x343: 0xdd, 0x344: 0xde, 0x345: 0xdf, 0x346: 0xe0, 0x347: 0xe1, + 0x348: 0xe2, 0x34a: 0xe3, 0x34b: 0xe4, 0x34c: 0xe5, 0x34d: 0xe6, + 0x350: 0xe7, 0x351: 0xe8, 0x352: 0xe9, 0x353: 0xea, 0x356: 0xeb, 0x357: 0xec, + 0x358: 0xed, 0x359: 0xee, 0x35a: 0xef, 0x35b: 0xf0, 0x35c: 0xf1, + 0x360: 0xf2, 0x362: 0xf3, 0x363: 0xf4, 0x364: 0xf5, 0x365: 0xf6, 0x366: 0xf7, 0x367: 0xf8, + 0x368: 0xf9, 0x369: 0xfa, 0x36a: 0xfb, 0x36b: 0xfc, + 0x370: 0xfd, 0x371: 0xfe, 0x372: 0xff, 0x374: 0x100, 0x375: 0x101, 0x376: 0x102, + 0x37b: 0x103, 0x37e: 0x104, + // Block 0xe, offset 0x380 + 0x380: 0x24, 0x381: 0x24, 0x382: 0x24, 0x383: 0x24, 0x384: 0x24, 0x385: 0x24, 0x386: 0x24, 0x387: 0x24, + 0x388: 0x24, 0x389: 0x24, 0x38a: 0x24, 0x38b: 0x24, 0x38c: 0x24, 0x38d: 0x24, 0x38e: 0x105, + 0x390: 0x24, 0x391: 0x106, 0x392: 0x24, 0x393: 0x24, 0x394: 0x24, 0x395: 0x107, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x24, 0x3c1: 0x24, 0x3c2: 0x24, 0x3c3: 0x24, 0x3c4: 0x24, 0x3c5: 0x24, 0x3c6: 0x24, 0x3c7: 0x24, + 0x3c8: 0x24, 0x3c9: 0x24, 0x3ca: 0x24, 0x3cb: 0x24, 0x3cc: 0x24, 0x3cd: 0x24, 0x3ce: 0x24, 0x3cf: 0x24, + 0x3d0: 0x108, + // Block 0x10, offset 0x400 + 0x410: 0x24, 0x411: 0x24, 0x412: 0x24, 0x413: 0x24, 0x414: 0x24, 0x415: 0x24, 0x416: 0x24, 0x417: 0x24, + 0x418: 0x24, 0x419: 0x109, + // Block 0x11, offset 0x440 + 0x460: 0x24, 0x461: 0x24, 0x462: 0x24, 0x463: 0x24, 0x464: 0x24, 0x465: 0x24, 0x466: 0x24, 0x467: 0x24, + 0x468: 0xfc, 0x469: 0x10a, 0x46b: 0x10b, 0x46c: 0x10c, 0x46d: 0x10d, 0x46e: 0x10e, + 0x479: 0x10f, 0x47c: 0x24, 0x47d: 0x110, 0x47e: 0x111, 0x47f: 0x112, + // Block 0x12, offset 0x480 + 0x4b0: 0x24, 0x4b1: 0x113, 0x4b2: 0x114, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x115, 0x4c6: 0x116, + 0x4c9: 0x117, + 0x4d0: 0x118, 0x4d1: 0x119, 0x4d2: 0x11a, 0x4d3: 0x11b, 0x4d4: 0x11c, 0x4d5: 0x11d, 0x4d6: 0x11e, 0x4d7: 0x11f, + 0x4d8: 0x120, 0x4d9: 0x121, 0x4da: 0x122, 0x4db: 0x123, 0x4dc: 0x124, 0x4dd: 0x125, 0x4de: 0x126, 0x4df: 0x127, + 0x4e8: 0x128, 0x4e9: 0x129, 0x4ea: 0x12a, + // Block 0x14, offset 0x500 + 0x500: 0x12b, 0x504: 0x12c, 0x505: 0x12d, + 0x50b: 0x12e, + 0x520: 0x24, 0x521: 0x24, 0x522: 0x24, 0x523: 0x12f, 0x524: 0x12, 0x525: 0x130, + 0x538: 0x131, 0x539: 0x13, 0x53a: 0x132, + // Block 0x15, offset 0x540 + 0x544: 0x133, 0x545: 0x134, 0x546: 0x135, + 0x54f: 0x136, + 0x56f: 0x137, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x138, 0x5c1: 0x139, 0x5c4: 0x139, 0x5c5: 0x139, 0x5c6: 0x139, 0x5c7: 0x13a, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 296 entries, 592 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x34, 0x37, 0x3b, 0x3e, 0x42, 0x4c, 0x4e, 0x57, 0x5e, 0x63, 0x71, 0x72, 0x80, 0x8f, 0x99, 0x9c, 0xa3, 0xab, 0xae, 0xb0, 0xc0, 0xc6, 0xd4, 0xdf, 0xec, 0xf7, 0x103, 0x10d, 0x119, 0x124, 0x130, 0x13c, 0x144, 0x14d, 0x157, 0x162, 0x16e, 0x174, 0x17f, 0x185, 0x18d, 0x190, 0x195, 0x199, 0x19d, 0x1a4, 0x1ad, 0x1b5, 0x1b6, 0x1bf, 0x1c6, 0x1ce, 0x1d4, 0x1d9, 0x1dd, 0x1e0, 0x1e2, 0x1e5, 0x1ea, 0x1eb, 0x1ed, 0x1ef, 0x1f1, 0x1f8, 0x1fd, 0x201, 0x20a, 0x20d, 0x210, 0x216, 0x217, 0x222, 0x223, 0x224, 0x229, 0x236, 0x23f, 0x240, 0x248, 0x251, 0x25a, 0x263, 0x268, 0x26b, 0x276, 0x284, 0x286, 0x28d, 0x291, 0x29d, 0x29e, 0x2a9, 0x2b1, 0x2b9, 0x2bf, 0x2c0, 0x2ce, 0x2d3, 0x2d6, 0x2db, 0x2df, 0x2e5, 0x2ea, 0x2ed, 0x2f2, 0x2f7, 0x2f8, 0x2fe, 0x300, 0x301, 0x303, 0x305, 0x308, 0x309, 0x30b, 0x30e, 0x314, 0x318, 0x31a, 0x31f, 0x326, 0x331, 0x33b, 0x33c, 0x345, 0x349, 0x34e, 0x356, 0x35c, 0x362, 0x36c, 0x371, 0x37a, 0x380, 0x389, 0x38d, 0x395, 0x397, 0x399, 0x39c, 0x39e, 0x3a0, 0x3a1, 0x3a2, 0x3a4, 0x3a6, 0x3ac, 0x3b1, 0x3b3, 0x3ba, 0x3bd, 0x3bf, 0x3c5, 0x3ca, 0x3cc, 0x3cd, 0x3ce, 0x3cf, 0x3d1, 0x3d3, 0x3d5, 0x3d8, 0x3da, 0x3dd, 0x3e5, 0x3e8, 0x3ec, 0x3f4, 0x3f6, 0x3f7, 0x3f8, 0x3fa, 0x400, 0x402, 0x403, 0x405, 0x407, 0x409, 0x416, 0x417, 0x418, 0x41c, 0x41e, 0x41f, 0x420, 0x421, 0x422, 0x425, 0x428, 0x42b, 0x431, 0x432, 0x434, 0x438, 0x43c, 0x442, 0x445, 0x44c, 0x450, 0x454, 0x45d, 0x466, 0x46c, 0x472, 0x47c, 0x486, 0x488, 0x491, 0x497, 0x49d, 0x4a3, 0x4a6, 0x4ac, 0x4af, 0x4b8, 0x4b9, 0x4c0, 0x4c4, 0x4c5, 0x4c8, 0x4d2, 0x4d5, 0x4d7, 0x4de, 0x4e6, 0x4ec, 0x4f2, 0x4f3, 0x4f9, 0x4fc, 0x504, 0x50b, 0x515, 0x51d, 0x520, 0x521, 0x522, 0x523, 0x524, 0x526, 0x527, 0x529, 0x52b, 0x52d, 0x531, 0x532, 0x534, 0x537, 0x539, 0x53c, 0x53e, 0x543, 0x548, 0x54c, 0x54d, 0x550, 0x554, 0x55f, 0x563, 0x56b, 0x570, 0x574, 0x577, 0x57b, 0x57e, 0x581, 0x586, 0x58a, 0x58e, 0x592, 0x596, 0x598, 0x59a, 0x59d, 0x5a2, 0x5a5, 0x5a7, 0x5aa, 0x5ac, 0x5b2, 0x5bb, 0x5c0, 0x5c1, 0x5c4, 0x5c5, 0x5c6, 0x5c7, 0x5c9, 0x5ca, 0x5cb} + +// sparseValues: 1483 entries, 5932 bytes +var sparseValues = [1483]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x37 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3b + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3e + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x42 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4c + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4e + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0054, lo: 0x9f, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa0}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x57 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xaf, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xe, offset 0x5e + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xf, offset 0x63 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x10, offset 0x71 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x11, offset 0x72 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x12, offset 0x80 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x14, offset 0x99 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x15, offset 0x9c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0x16, offset 0xa3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x17, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x18, offset 0xae + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x19, offset 0xb0 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0034, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1a, offset 0xc0 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1b, offset 0xc6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1c, offset 0xd4 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0xdf + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xec + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x1f, offset 0xf7 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x20, offset 0x103 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x21, offset 0x10d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x22, offset 0x119 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x23, offset 0x124 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x24, offset 0x130 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x25, offset 0x13c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x26, offset 0x144 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x27, offset 0x14d + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x28, offset 0x157 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x29, offset 0x162 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2a, offset 0x16e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2b, offset 0x174 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2c, offset 0x17f + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2d, offset 0x185 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2e, offset 0x18d + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x2f, offset 0x190 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x30, offset 0x195 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x31, offset 0x199 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x32, offset 0x19d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x33, offset 0x1a4 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x34, offset 0x1ad + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x35, offset 0x1b5 + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x36, offset 0x1b6 + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x37, offset 0x1bf + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x38, offset 0x1c6 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x39, offset 0x1ce + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d4 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3b, offset 0x1d9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3c, offset 0x1dd + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3d, offset 0x1e0 + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x3e, offset 0x1e2 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x3f, offset 0x1e5 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x40, offset 0x1ea + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x41, offset 0x1eb + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x42, offset 0x1ed + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x43, offset 0x1ef + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x44, offset 0x1f1 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x45, offset 0x1f8 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x46, offset 0x1fd + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x47, offset 0x201 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x48, offset 0x20a + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x20d + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb8}, + // Block 0x4a, offset 0x210 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4b, offset 0x216 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4c, offset 0x217 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x222 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x4e, offset 0x223 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x4f, offset 0x224 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x50, offset 0x229 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x51, offset 0x236 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x52, offset 0x23f + {value: 0x0034, lo: 0x80, hi: 0x80}, + // Block 0x53, offset 0x240 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x54, offset 0x248 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x55, offset 0x251 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x56, offset 0x25a + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x57, offset 0x263 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x58, offset 0x268 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x59, offset 0x26b + {value: 0x31ea, lo: 0x80, hi: 0x80}, + {value: 0x326a, lo: 0x81, hi: 0x81}, + {value: 0x32ea, lo: 0x82, hi: 0x82}, + {value: 0x336a, lo: 0x83, hi: 0x83}, + {value: 0x33ea, lo: 0x84, hi: 0x84}, + {value: 0x346a, lo: 0x85, hi: 0x85}, + {value: 0x34ea, lo: 0x86, hi: 0x86}, + {value: 0x356a, lo: 0x87, hi: 0x87}, + {value: 0x35ea, lo: 0x88, hi: 0x88}, + {value: 0x8353, lo: 0x90, hi: 0xba}, + {value: 0x8353, lo: 0xbd, hi: 0xbf}, + // Block 0x5a, offset 0x276 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xba}, + // Block 0x5b, offset 0x284 + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5c, offset 0x286 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8752, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8b52, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5d, offset 0x28d + {value: 0x0012, lo: 0x80, hi: 0x8d}, + {value: 0x8f52, lo: 0x8e, hi: 0x8e}, + {value: 0x0012, lo: 0x8f, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5e, offset 0x291 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x5f, offset 0x29d + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x29e + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x369a, lo: 0x96, hi: 0x96}, + {value: 0x374a, lo: 0x97, hi: 0x97}, + {value: 0x37fa, lo: 0x98, hi: 0x98}, + {value: 0x38aa, lo: 0x99, hi: 0x99}, + {value: 0x395a, lo: 0x9a, hi: 0x9a}, + {value: 0x3a0a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x3abb, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x61, offset 0x2a9 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x62, offset 0x2b1 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x2b9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2bf + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x65, offset 0x2c0 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x66, offset 0x2ce + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0xa452, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x2d3 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x68, offset 0x2d6 + {value: 0xa753, lo: 0xb6, hi: 0xb7}, + {value: 0xaa53, lo: 0xb8, hi: 0xb9}, + {value: 0xad53, lo: 0xba, hi: 0xbb}, + {value: 0xaa53, lo: 0xbc, hi: 0xbd}, + {value: 0xa753, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x2db + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xb053, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6a, offset 0x2df + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6b, offset 0x2e5 + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6c, offset 0x2ea + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6d, offset 0x2ed + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6e, offset 0x2f2 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x6f, offset 0x2f7 + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x70, offset 0x2f8 + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x71, offset 0x2fe + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x72, offset 0x300 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x73, offset 0x301 + {value: 0x0010, lo: 0x85, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x74, offset 0x303 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x75, offset 0x305 + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x76, offset 0x308 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x77, offset 0x309 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x78, offset 0x30b + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x79, offset 0x30e + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7a, offset 0x314 + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7b, offset 0x318 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7c, offset 0x31a + {value: 0x0004, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7d, offset 0x31f + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8753, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7e, offset 0x326 + {value: 0x0117, lo: 0x82, hi: 0x83}, + {value: 0x6553, lo: 0x84, hi: 0x84}, + {value: 0x908b, lo: 0x85, hi: 0x85}, + {value: 0x8f53, lo: 0x86, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0316, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x7f, offset 0x331 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + // Block 0x80, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x81, offset 0x33c + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x82, offset 0x345 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x349 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x84, offset 0x34e + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x85, offset 0x356 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x86, offset 0x35c + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x87, offset 0x362 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x88, offset 0x36c + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x89, offset 0x371 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8a, offset 0x37a + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8b, offset 0x380 + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xb352, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xa9}, + {value: 0x0004, lo: 0xaa, hi: 0xab}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x389 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x38d + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8e, offset 0x395 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x8f, offset 0x397 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x90, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x91, offset 0x39c + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x92, offset 0x39e + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x93, offset 0x3a0 + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x94, offset 0x3a1 + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x95, offset 0x3a2 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x96, offset 0x3a4 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x97, offset 0x3a6 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x98, offset 0x3ac + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x99, offset 0x3b1 + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x3b3 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3ba + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9c, offset 0x3bd + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9d, offset 0x3bf + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9e, offset 0x3c5 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9f, offset 0x3ca + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa0, offset 0x3cc + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa1, offset 0x3cd + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa2, offset 0x3ce + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa3, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa4, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa5, offset 0x3d3 + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa6, offset 0x3d5 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa7, offset 0x3d8 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa8, offset 0x3da + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xa9, offset 0x3dd + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xb653, lo: 0x98, hi: 0x9f}, + {value: 0xb953, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xaa, offset 0x3e5 + {value: 0xb652, lo: 0x80, hi: 0x87}, + {value: 0xb952, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xab, offset 0x3e8 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb953, lo: 0xb0, hi: 0xb7}, + {value: 0xb653, lo: 0xb8, hi: 0xbf}, + // Block 0xac, offset 0x3ec + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb952, lo: 0x98, hi: 0x9f}, + {value: 0xb652, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xad, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xae, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xaf, offset 0x3f7 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb0, offset 0x3f8 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb1, offset 0x3fa + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb2, offset 0x400 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb3, offset 0x402 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb4, offset 0x403 + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb5, offset 0x405 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb6, offset 0x407 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb7, offset 0x409 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb5}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x416 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xb9, offset 0x417 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xba, offset 0x418 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbb, offset 0x41c + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbc, offset 0x41e + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbd, offset 0x41f + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbe, offset 0x420 + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xbf, offset 0x421 + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x422 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc1, offset 0x425 + {value: 0x0010, lo: 0x80, hi: 0xa9}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + // Block 0xc2, offset 0x428 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc3, offset 0x42b + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x87}, + {value: 0x0024, lo: 0x88, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x8b}, + {value: 0x0024, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + // Block 0xc4, offset 0x431 + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc5, offset 0x432 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xc6, offset 0x434 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc7, offset 0x438 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43c + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc9, offset 0x442 + {value: 0x0014, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xca, offset 0x445 + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xcb, offset 0x44c + {value: 0x0010, lo: 0x84, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xcc, offset 0x450 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xcd, offset 0x454 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x89, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xce, offset 0x45d + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xcf, offset 0x466 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xd0, offset 0x46c + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd1, offset 0x472 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xd2, offset 0x47c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xd3, offset 0x486 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xd4, offset 0x488 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + // Block 0xd5, offset 0x491 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x497 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd7, offset 0x49d + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd8, offset 0x4a3 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd9, offset 0x4a6 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xda, offset 0x4ac + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xdb, offset 0x4af + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + // Block 0xdc, offset 0x4b8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xdd, offset 0x4b9 + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xde, offset 0x4c0 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + // Block 0xdf, offset 0x4c4 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xe0, offset 0x4c5 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xe1, offset 0x4c8 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8c, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + {value: 0x0030, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xe2, offset 0x4d2 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0034, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xe3, offset 0x4d5 + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xe4, offset 0x4d7 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0014, lo: 0x94, hi: 0x97}, + {value: 0x0014, lo: 0x9a, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0x9f}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + // Block 0xe5, offset 0x4de + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xe6, offset 0x4e6 + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xe7, offset 0x4ec + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + // Block 0xe8, offset 0x4f2 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xe9, offset 0x4f3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xea, offset 0x4f9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xeb, offset 0x4fc + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xec, offset 0x504 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xed, offset 0x50b + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa5}, + {value: 0x0010, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xee, offset 0x515 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0x96}, + {value: 0x0034, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xef, offset 0x51d + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + // Block 0xf0, offset 0x520 + {value: 0x0010, lo: 0xb0, hi: 0xb0}, + // Block 0xf1, offset 0x521 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xf2, offset 0x522 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xf3, offset 0x523 + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xf4, offset 0x524 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xb0, hi: 0xb8}, + // Block 0xf5, offset 0x526 + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xf6, offset 0x527 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xf7, offset 0x529 + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xf8, offset 0x52b + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xf9, offset 0x52d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xfa, offset 0x531 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xfb, offset 0x532 + {value: 0x2013, lo: 0x80, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xbf}, + // Block 0xfc, offset 0x534 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xfd, offset 0x537 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xfe, offset 0x539 + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa3, hi: 0xa4}, + {value: 0x0030, lo: 0xb0, hi: 0xb1}, + // Block 0xff, offset 0x53c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0x100, offset 0x53e + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0x101, offset 0x543 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0x102, offset 0x548 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0x103, offset 0x54c + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0x104, offset 0x54d + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0x105, offset 0x550 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x106, offset 0x554 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0x107, offset 0x55f + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x108, offset 0x563 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0x109, offset 0x56b + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0x10a, offset 0x570 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0x10b, offset 0x574 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0x10c, offset 0x577 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0x10d, offset 0x57b + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10e, offset 0x57e + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x10f, offset 0x581 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x110, offset 0x586 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x111, offset 0x58a + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x112, offset 0x58e + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x113, offset 0x592 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x114, offset 0x596 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x115, offset 0x598 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x116, offset 0x59a + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x117, offset 0x59d + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x118, offset 0x5a2 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + // Block 0x119, offset 0x5a5 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + // Block 0x11a, offset 0x5a7 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0024, lo: 0xac, hi: 0xaf}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x11b, offset 0x5aa + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x11c, offset 0x5ac + {value: 0xbc52, lo: 0x80, hi: 0x81}, + {value: 0xbf52, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x11d, offset 0x5b2 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x11e, offset 0x5bb + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x11f, offset 0x5c0 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x120, offset 0x5c1 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x121, offset 0x5c4 + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x122, offset 0x5c5 + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x123, offset 0x5c6 + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x124, offset 0x5c7 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x125, offset 0x5c9 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x126, offset 0x5ca + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 15212 bytes (14KiB); checksum: 1EB13752 diff --git a/vendor/golang.org/x/text/cases/tables9.0.0.go b/vendor/golang.org/x/text/cases/tables9.0.0.go new file mode 100644 index 00000000000..636d5d14dfe --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables9.0.0.go @@ -0,0 +1,2216 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build !go1.10 +// +build !go1.10 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var xorData string = "" + // Size: 185 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a\x00\x02:" + + "\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&\x00\x01*" + + "\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2068 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ιΙΙ\x166ΐ" + + "Ϊ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12φΦΦ\x12" + + "\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x12\x12вВВ\x12\x12дД" + + "Д\x12\x12оОО\x12\x12сСС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13" + + "\x1bꙋꙊꙊ\x13\x1bẖH̱H̱\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1ba" + + "ʾAʾAʾ\x13\x1bṡṠṠ\x12\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166" + + "ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ" + + "\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ" + + "\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ" + + "\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨ" + + "Ι\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15" + + "\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ" + + "\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ" + + "\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰι" + + "ᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12" + + "\x12ιΙΙ\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1c" + + "ηιῃΗΙ\x166ῒΪ̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ" + + "̀\x166ΰΫ́Ϋ́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙ" + + "ῼ\x14$ώιΏΙΏͅ\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk" + + "\x12\x10åå\x12\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ" + + "\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ" + + "\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFF" + + "Ff\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12" + + "stSTSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄ" + + "ԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 11742 bytes (11.47 KiB). Checksum: 795fe57ee5135873. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 18: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 18 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 20 blocks, 1280 entries, 2560 bytes +// The third block is the zero block. +var caseValues = [1280]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x110a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x118a, + 0x19e: 0x120a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x128d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x130a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x144a, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x158a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x160a, 0x251: 0x168a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x170a, 0x256: 0x178a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x180a, 0x271: 0x188a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x190a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x0812, 0x281: 0x0812, 0x282: 0x0812, 0x283: 0x0812, 0x284: 0x0812, 0x285: 0x0812, + 0x288: 0x0813, 0x289: 0x0813, 0x28a: 0x0813, 0x28b: 0x0813, + 0x28c: 0x0813, 0x28d: 0x0813, 0x290: 0x239a, 0x291: 0x0812, + 0x292: 0x247a, 0x293: 0x0812, 0x294: 0x25ba, 0x295: 0x0812, 0x296: 0x26fa, 0x297: 0x0812, + 0x299: 0x0813, 0x29b: 0x0813, 0x29d: 0x0813, + 0x29f: 0x0813, 0x2a0: 0x0812, 0x2a1: 0x0812, 0x2a2: 0x0812, 0x2a3: 0x0812, + 0x2a4: 0x0812, 0x2a5: 0x0812, 0x2a6: 0x0812, 0x2a7: 0x0812, 0x2a8: 0x0813, 0x2a9: 0x0813, + 0x2aa: 0x0813, 0x2ab: 0x0813, 0x2ac: 0x0813, 0x2ad: 0x0813, 0x2ae: 0x0813, 0x2af: 0x0813, + 0x2b0: 0x8b52, 0x2b1: 0x8b52, 0x2b2: 0x8e52, 0x2b3: 0x8e52, 0x2b4: 0x9152, 0x2b5: 0x9152, + 0x2b6: 0x9452, 0x2b7: 0x9452, 0x2b8: 0x9752, 0x2b9: 0x9752, 0x2ba: 0x9a52, 0x2bb: 0x9a52, + 0x2bc: 0x4d52, 0x2bd: 0x4d52, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x283a, 0x2c1: 0x292a, 0x2c2: 0x2a1a, 0x2c3: 0x2b0a, 0x2c4: 0x2bfa, 0x2c5: 0x2cea, + 0x2c6: 0x2dda, 0x2c7: 0x2eca, 0x2c8: 0x2fb9, 0x2c9: 0x30a9, 0x2ca: 0x3199, 0x2cb: 0x3289, + 0x2cc: 0x3379, 0x2cd: 0x3469, 0x2ce: 0x3559, 0x2cf: 0x3649, 0x2d0: 0x373a, 0x2d1: 0x382a, + 0x2d2: 0x391a, 0x2d3: 0x3a0a, 0x2d4: 0x3afa, 0x2d5: 0x3bea, 0x2d6: 0x3cda, 0x2d7: 0x3dca, + 0x2d8: 0x3eb9, 0x2d9: 0x3fa9, 0x2da: 0x4099, 0x2db: 0x4189, 0x2dc: 0x4279, 0x2dd: 0x4369, + 0x2de: 0x4459, 0x2df: 0x4549, 0x2e0: 0x463a, 0x2e1: 0x472a, 0x2e2: 0x481a, 0x2e3: 0x490a, + 0x2e4: 0x49fa, 0x2e5: 0x4aea, 0x2e6: 0x4bda, 0x2e7: 0x4cca, 0x2e8: 0x4db9, 0x2e9: 0x4ea9, + 0x2ea: 0x4f99, 0x2eb: 0x5089, 0x2ec: 0x5179, 0x2ed: 0x5269, 0x2ee: 0x5359, 0x2ef: 0x5449, + 0x2f0: 0x0812, 0x2f1: 0x0812, 0x2f2: 0x553a, 0x2f3: 0x564a, 0x2f4: 0x571a, + 0x2f6: 0x57fa, 0x2f7: 0x58da, 0x2f8: 0x0813, 0x2f9: 0x0813, 0x2fa: 0x8b53, 0x2fb: 0x8b53, + 0x2fc: 0x5a19, 0x2fd: 0x0004, 0x2fe: 0x5aea, 0x2ff: 0x0004, + // Block 0xc, offset 0x300 + 0x300: 0x0004, 0x301: 0x0004, 0x302: 0x5b6a, 0x303: 0x5c7a, 0x304: 0x5d4a, + 0x306: 0x5e2a, 0x307: 0x5f0a, 0x308: 0x8e53, 0x309: 0x8e53, 0x30a: 0x9153, 0x30b: 0x9153, + 0x30c: 0x6049, 0x30d: 0x0004, 0x30e: 0x0004, 0x30f: 0x0004, 0x310: 0x0812, 0x311: 0x0812, + 0x312: 0x611a, 0x313: 0x625a, 0x316: 0x639a, 0x317: 0x647a, + 0x318: 0x0813, 0x319: 0x0813, 0x31a: 0x9453, 0x31b: 0x9453, 0x31d: 0x0004, + 0x31e: 0x0004, 0x31f: 0x0004, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x65ba, 0x323: 0x66fa, + 0x324: 0x683a, 0x325: 0x0912, 0x326: 0x691a, 0x327: 0x69fa, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x9a53, 0x32b: 0x9a53, 0x32c: 0x0913, 0x32d: 0x0004, 0x32e: 0x0004, 0x32f: 0x0004, + 0x332: 0x6b3a, 0x333: 0x6c4a, 0x334: 0x6d1a, + 0x336: 0x6dfa, 0x337: 0x6eda, 0x338: 0x9753, 0x339: 0x9753, 0x33a: 0x4d53, 0x33b: 0x4d53, + 0x33c: 0x7019, 0x33d: 0x0004, 0x33e: 0x0004, + // Block 0xd, offset 0x340 + 0x342: 0x0013, + 0x347: 0x0013, 0x34a: 0x0012, 0x34b: 0x0013, + 0x34c: 0x0013, 0x34d: 0x0013, 0x34e: 0x0012, 0x34f: 0x0012, 0x350: 0x0013, 0x351: 0x0013, + 0x352: 0x0013, 0x353: 0x0012, 0x355: 0x0013, + 0x359: 0x0013, 0x35a: 0x0013, 0x35b: 0x0013, 0x35c: 0x0013, 0x35d: 0x0013, + 0x364: 0x0013, 0x366: 0x70eb, 0x368: 0x0013, + 0x36a: 0x714b, 0x36b: 0x718b, 0x36c: 0x0013, 0x36d: 0x0013, 0x36f: 0x0012, + 0x370: 0x0013, 0x371: 0x0013, 0x372: 0x9d53, 0x373: 0x0013, 0x374: 0x0012, 0x375: 0x0010, + 0x376: 0x0010, 0x377: 0x0010, 0x378: 0x0010, 0x379: 0x0012, + 0x37c: 0x0012, 0x37d: 0x0012, 0x37e: 0x0013, 0x37f: 0x0013, + // Block 0xe, offset 0x380 + 0x380: 0x1a13, 0x381: 0x1a13, 0x382: 0x1e13, 0x383: 0x1e13, 0x384: 0x1a13, 0x385: 0x1a13, + 0x386: 0x2613, 0x387: 0x2613, 0x388: 0x2a13, 0x389: 0x2a13, 0x38a: 0x2e13, 0x38b: 0x2e13, + 0x38c: 0x2a13, 0x38d: 0x2a13, 0x38e: 0x2613, 0x38f: 0x2613, 0x390: 0xa052, 0x391: 0xa052, + 0x392: 0xa352, 0x393: 0xa352, 0x394: 0xa652, 0x395: 0xa652, 0x396: 0xa352, 0x397: 0xa352, + 0x398: 0xa052, 0x399: 0xa052, 0x39a: 0x1a12, 0x39b: 0x1a12, 0x39c: 0x1e12, 0x39d: 0x1e12, + 0x39e: 0x1a12, 0x39f: 0x1a12, 0x3a0: 0x2612, 0x3a1: 0x2612, 0x3a2: 0x2a12, 0x3a3: 0x2a12, + 0x3a4: 0x2e12, 0x3a5: 0x2e12, 0x3a6: 0x2a12, 0x3a7: 0x2a12, 0x3a8: 0x2612, 0x3a9: 0x2612, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x6552, 0x3c1: 0x6552, 0x3c2: 0x6552, 0x3c3: 0x6552, 0x3c4: 0x6552, 0x3c5: 0x6552, + 0x3c6: 0x6552, 0x3c7: 0x6552, 0x3c8: 0x6552, 0x3c9: 0x6552, 0x3ca: 0x6552, 0x3cb: 0x6552, + 0x3cc: 0x6552, 0x3cd: 0x6552, 0x3ce: 0x6552, 0x3cf: 0x6552, 0x3d0: 0xa952, 0x3d1: 0xa952, + 0x3d2: 0xa952, 0x3d3: 0xa952, 0x3d4: 0xa952, 0x3d5: 0xa952, 0x3d6: 0xa952, 0x3d7: 0xa952, + 0x3d8: 0xa952, 0x3d9: 0xa952, 0x3da: 0xa952, 0x3db: 0xa952, 0x3dc: 0xa952, 0x3dd: 0xa952, + 0x3de: 0xa952, 0x3e0: 0x0113, 0x3e1: 0x0112, 0x3e2: 0x71eb, 0x3e3: 0x8853, + 0x3e4: 0x724b, 0x3e5: 0x72aa, 0x3e6: 0x730a, 0x3e7: 0x0f13, 0x3e8: 0x0f12, 0x3e9: 0x0313, + 0x3ea: 0x0312, 0x3eb: 0x0713, 0x3ec: 0x0712, 0x3ed: 0x736b, 0x3ee: 0x73cb, 0x3ef: 0x742b, + 0x3f0: 0x748b, 0x3f1: 0x0012, 0x3f2: 0x0113, 0x3f3: 0x0112, 0x3f4: 0x0012, 0x3f5: 0x0313, + 0x3f6: 0x0312, 0x3f7: 0x0012, 0x3f8: 0x0012, 0x3f9: 0x0012, 0x3fa: 0x0012, 0x3fb: 0x0012, + 0x3fc: 0x0015, 0x3fd: 0x0015, 0x3fe: 0x74eb, 0x3ff: 0x754b, + // Block 0x10, offset 0x400 + 0x400: 0x0113, 0x401: 0x0112, 0x402: 0x0113, 0x403: 0x0112, 0x404: 0x0113, 0x405: 0x0112, + 0x406: 0x0113, 0x407: 0x0112, 0x408: 0x0014, 0x409: 0x0004, 0x40a: 0x0004, 0x40b: 0x0713, + 0x40c: 0x0712, 0x40d: 0x75ab, 0x40e: 0x0012, 0x40f: 0x0010, 0x410: 0x0113, 0x411: 0x0112, + 0x412: 0x0113, 0x413: 0x0112, 0x414: 0x0012, 0x415: 0x0012, 0x416: 0x0113, 0x417: 0x0112, + 0x418: 0x0113, 0x419: 0x0112, 0x41a: 0x0113, 0x41b: 0x0112, 0x41c: 0x0113, 0x41d: 0x0112, + 0x41e: 0x0113, 0x41f: 0x0112, 0x420: 0x0113, 0x421: 0x0112, 0x422: 0x0113, 0x423: 0x0112, + 0x424: 0x0113, 0x425: 0x0112, 0x426: 0x0113, 0x427: 0x0112, 0x428: 0x0113, 0x429: 0x0112, + 0x42a: 0x760b, 0x42b: 0x766b, 0x42c: 0x76cb, 0x42d: 0x772b, 0x42e: 0x778b, + 0x430: 0x77eb, 0x431: 0x784b, 0x432: 0x78ab, 0x433: 0xac53, 0x434: 0x0113, 0x435: 0x0112, + 0x436: 0x0113, 0x437: 0x0112, + // Block 0x11, offset 0x440 + 0x440: 0x790a, 0x441: 0x798a, 0x442: 0x7a0a, 0x443: 0x7a8a, 0x444: 0x7b3a, 0x445: 0x7bea, + 0x446: 0x7c6a, + 0x453: 0x7cea, 0x454: 0x7dca, 0x455: 0x7eaa, 0x456: 0x7f8a, 0x457: 0x806a, + 0x45d: 0x0010, + 0x45e: 0x0034, 0x45f: 0x0010, 0x460: 0x0010, 0x461: 0x0010, 0x462: 0x0010, 0x463: 0x0010, + 0x464: 0x0010, 0x465: 0x0010, 0x466: 0x0010, 0x467: 0x0010, 0x468: 0x0010, + 0x46a: 0x0010, 0x46b: 0x0010, 0x46c: 0x0010, 0x46d: 0x0010, 0x46e: 0x0010, 0x46f: 0x0010, + 0x470: 0x0010, 0x471: 0x0010, 0x472: 0x0010, 0x473: 0x0010, 0x474: 0x0010, 0x475: 0x0010, + 0x476: 0x0010, 0x478: 0x0010, 0x479: 0x0010, 0x47a: 0x0010, 0x47b: 0x0010, + 0x47c: 0x0010, 0x47e: 0x0010, + // Block 0x12, offset 0x480 + 0x480: 0x2213, 0x481: 0x2213, 0x482: 0x2613, 0x483: 0x2613, 0x484: 0x2213, 0x485: 0x2213, + 0x486: 0x2e13, 0x487: 0x2e13, 0x488: 0x2213, 0x489: 0x2213, 0x48a: 0x2613, 0x48b: 0x2613, + 0x48c: 0x2213, 0x48d: 0x2213, 0x48e: 0x3e13, 0x48f: 0x3e13, 0x490: 0x2213, 0x491: 0x2213, + 0x492: 0x2613, 0x493: 0x2613, 0x494: 0x2213, 0x495: 0x2213, 0x496: 0x2e13, 0x497: 0x2e13, + 0x498: 0x2213, 0x499: 0x2213, 0x49a: 0x2613, 0x49b: 0x2613, 0x49c: 0x2213, 0x49d: 0x2213, + 0x49e: 0xb553, 0x49f: 0xb553, 0x4a0: 0xb853, 0x4a1: 0xb853, 0x4a2: 0x2212, 0x4a3: 0x2212, + 0x4a4: 0x2612, 0x4a5: 0x2612, 0x4a6: 0x2212, 0x4a7: 0x2212, 0x4a8: 0x2e12, 0x4a9: 0x2e12, + 0x4aa: 0x2212, 0x4ab: 0x2212, 0x4ac: 0x2612, 0x4ad: 0x2612, 0x4ae: 0x2212, 0x4af: 0x2212, + 0x4b0: 0x3e12, 0x4b1: 0x3e12, 0x4b2: 0x2212, 0x4b3: 0x2212, 0x4b4: 0x2612, 0x4b5: 0x2612, + 0x4b6: 0x2212, 0x4b7: 0x2212, 0x4b8: 0x2e12, 0x4b9: 0x2e12, 0x4ba: 0x2212, 0x4bb: 0x2212, + 0x4bc: 0x2612, 0x4bd: 0x2612, 0x4be: 0x2212, 0x4bf: 0x2212, + // Block 0x13, offset 0x4c0 + 0x4c2: 0x0010, + 0x4c7: 0x0010, 0x4c9: 0x0010, 0x4cb: 0x0010, + 0x4cd: 0x0010, 0x4ce: 0x0010, 0x4cf: 0x0010, 0x4d1: 0x0010, + 0x4d2: 0x0010, 0x4d4: 0x0010, 0x4d7: 0x0010, + 0x4d9: 0x0010, 0x4db: 0x0010, 0x4dd: 0x0010, + 0x4df: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, + 0x4e4: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, 0x4e9: 0x0010, + 0x4ea: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f7: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x12, 0xc3: 0x13, 0xc4: 0x14, 0xc5: 0x15, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x16, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x17, 0xcc: 0x18, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x19, 0xd1: 0x1a, 0xd2: 0x1b, 0xd3: 0x1c, 0xd4: 0x1d, 0xd5: 0x1e, 0xd6: 0x1f, 0xd7: 0x20, + 0xd8: 0x21, 0xd9: 0x22, 0xda: 0x23, 0xdb: 0x24, 0xdc: 0x25, 0xdd: 0x26, 0xde: 0x27, 0xdf: 0x28, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x29, 0x121: 0x2a, 0x122: 0x2b, 0x123: 0x2c, 0x124: 0x2d, 0x125: 0x2e, 0x126: 0x2f, 0x127: 0x30, + 0x128: 0x31, 0x129: 0x32, 0x12a: 0x33, 0x12b: 0x34, 0x12c: 0x35, 0x12d: 0x36, 0x12e: 0x37, 0x12f: 0x38, + 0x130: 0x39, 0x131: 0x3a, 0x132: 0x3b, 0x133: 0x3c, 0x134: 0x3d, 0x135: 0x3e, 0x136: 0x3f, 0x137: 0x40, + 0x138: 0x41, 0x139: 0x42, 0x13a: 0x43, 0x13b: 0x44, 0x13c: 0x45, 0x13d: 0x46, 0x13e: 0x47, 0x13f: 0x48, + // Block 0x5, offset 0x140 + 0x140: 0x49, 0x141: 0x4a, 0x142: 0x4b, 0x143: 0x4c, 0x144: 0x23, 0x145: 0x23, 0x146: 0x23, 0x147: 0x23, + 0x148: 0x23, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x23, 0x152: 0x23, 0x153: 0x23, 0x154: 0x23, 0x155: 0x23, 0x156: 0x23, 0x157: 0x23, + 0x158: 0x23, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x08, 0x17e: 0x09, 0x17f: 0x0a, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0b, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0c, + 0x1b0: 0x7c, 0x1b1: 0x0d, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x23, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x23, 0x202: 0x23, 0x203: 0x23, 0x204: 0x23, 0x205: 0x23, 0x206: 0x23, 0x207: 0x23, + 0x208: 0x23, 0x209: 0x23, 0x20a: 0x23, 0x20b: 0x23, 0x20c: 0x23, 0x20d: 0x23, 0x20e: 0x23, 0x20f: 0x23, + 0x210: 0x23, 0x211: 0x23, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x23, 0x215: 0x23, 0x216: 0x23, 0x217: 0x23, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x0e, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x23, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x23, 0x231: 0x23, 0x232: 0x23, 0x233: 0x23, 0x234: 0x23, 0x235: 0x23, 0x236: 0x23, 0x237: 0x23, + 0x238: 0x23, 0x239: 0x23, 0x23a: 0x23, 0x23b: 0x23, 0x23c: 0x23, 0x23d: 0x23, 0x23e: 0x23, 0x23f: 0x23, + // Block 0x9, offset 0x240 + 0x240: 0x23, 0x241: 0x23, 0x242: 0x23, 0x243: 0x23, 0x244: 0x23, 0x245: 0x23, 0x246: 0x23, 0x247: 0x23, + 0x248: 0x23, 0x249: 0x23, 0x24a: 0x23, 0x24b: 0x23, 0x24c: 0x23, 0x24d: 0x23, 0x24e: 0x23, 0x24f: 0x23, + 0x250: 0x23, 0x251: 0x23, 0x252: 0x23, 0x253: 0x23, 0x254: 0x23, 0x255: 0x23, 0x256: 0x23, 0x257: 0x23, + 0x258: 0x23, 0x259: 0x23, 0x25a: 0x23, 0x25b: 0x23, 0x25c: 0x23, 0x25d: 0x23, 0x25e: 0x23, 0x25f: 0x23, + 0x260: 0x23, 0x261: 0x23, 0x262: 0x23, 0x263: 0x23, 0x264: 0x23, 0x265: 0x23, 0x266: 0x23, 0x267: 0x23, + 0x268: 0x23, 0x269: 0x23, 0x26a: 0x23, 0x26b: 0x23, 0x26c: 0x23, 0x26d: 0x23, 0x26e: 0x23, 0x26f: 0x23, + 0x270: 0x23, 0x271: 0x23, 0x272: 0x23, 0x273: 0x23, 0x274: 0x23, 0x275: 0x23, 0x276: 0x23, 0x277: 0x23, + 0x278: 0x23, 0x279: 0x23, 0x27a: 0x23, 0x27b: 0x23, 0x27c: 0x23, 0x27d: 0x23, 0x27e: 0x23, 0x27f: 0x23, + // Block 0xa, offset 0x280 + 0x280: 0x23, 0x281: 0x23, 0x282: 0x23, 0x283: 0x23, 0x284: 0x23, 0x285: 0x23, 0x286: 0x23, 0x287: 0x23, + 0x288: 0x23, 0x289: 0x23, 0x28a: 0x23, 0x28b: 0x23, 0x28c: 0x23, 0x28d: 0x23, 0x28e: 0x23, 0x28f: 0x23, + 0x290: 0x23, 0x291: 0x23, 0x292: 0x23, 0x293: 0x23, 0x294: 0x23, 0x295: 0x23, 0x296: 0x23, 0x297: 0x23, + 0x298: 0x23, 0x299: 0x23, 0x29a: 0x23, 0x29b: 0x23, 0x29c: 0x23, 0x29d: 0x23, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x0f, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x23, 0x2f1: 0x23, 0x2f2: 0x23, 0x2f3: 0x23, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x23, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x23, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x23, 0x319: 0x23, 0x31a: 0x23, 0x31b: 0x23, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x23, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, + // Block 0xd, offset 0x340 + 0x340: 0xd3, 0x341: 0xd4, 0x342: 0xd5, 0x343: 0xd6, 0x344: 0xd7, 0x345: 0xd8, 0x346: 0xd9, 0x347: 0xda, + 0x348: 0xdb, 0x34a: 0xdc, 0x34b: 0xdd, 0x34c: 0xde, 0x34d: 0xdf, + 0x350: 0xe0, 0x351: 0xe1, 0x352: 0xe2, 0x353: 0xe3, 0x356: 0xe4, 0x357: 0xe5, + 0x358: 0xe6, 0x359: 0xe7, 0x35a: 0xe8, 0x35b: 0xe9, 0x35c: 0xea, + 0x362: 0xeb, 0x363: 0xec, + 0x36b: 0xed, + 0x370: 0xee, 0x371: 0xef, 0x372: 0xf0, + // Block 0xe, offset 0x380 + 0x380: 0x23, 0x381: 0x23, 0x382: 0x23, 0x383: 0x23, 0x384: 0x23, 0x385: 0x23, 0x386: 0x23, 0x387: 0x23, + 0x388: 0x23, 0x389: 0x23, 0x38a: 0x23, 0x38b: 0x23, 0x38c: 0x23, 0x38d: 0x23, 0x38e: 0xf1, + 0x390: 0x23, 0x391: 0xf2, 0x392: 0x23, 0x393: 0x23, 0x394: 0x23, 0x395: 0xf3, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x23, 0x3c1: 0x23, 0x3c2: 0x23, 0x3c3: 0x23, 0x3c4: 0x23, 0x3c5: 0x23, 0x3c6: 0x23, 0x3c7: 0x23, + 0x3c8: 0x23, 0x3c9: 0x23, 0x3ca: 0x23, 0x3cb: 0x23, 0x3cc: 0x23, 0x3cd: 0x23, 0x3ce: 0x23, 0x3cf: 0x23, + 0x3d0: 0xf2, + // Block 0x10, offset 0x400 + 0x410: 0x23, 0x411: 0x23, 0x412: 0x23, 0x413: 0x23, 0x414: 0x23, 0x415: 0x23, 0x416: 0x23, 0x417: 0x23, + 0x418: 0x23, 0x419: 0xf4, + // Block 0x11, offset 0x440 + 0x460: 0x23, 0x461: 0x23, 0x462: 0x23, 0x463: 0x23, 0x464: 0x23, 0x465: 0x23, 0x466: 0x23, 0x467: 0x23, + 0x468: 0xed, 0x469: 0xf5, 0x46b: 0xf6, 0x46c: 0xf7, 0x46d: 0xf8, 0x46e: 0xf9, + 0x47c: 0x23, 0x47d: 0xfa, 0x47e: 0xfb, 0x47f: 0xfc, + // Block 0x12, offset 0x480 + 0x4b0: 0x23, 0x4b1: 0xfd, 0x4b2: 0xfe, + // Block 0x13, offset 0x4c0 + 0x4c5: 0xff, 0x4c6: 0x100, + 0x4c9: 0x101, + 0x4d0: 0x102, 0x4d1: 0x103, 0x4d2: 0x104, 0x4d3: 0x105, 0x4d4: 0x106, 0x4d5: 0x107, 0x4d6: 0x108, 0x4d7: 0x109, + 0x4d8: 0x10a, 0x4d9: 0x10b, 0x4da: 0x10c, 0x4db: 0x10d, 0x4dc: 0x10e, 0x4dd: 0x10f, 0x4de: 0x110, 0x4df: 0x111, + 0x4e8: 0x112, 0x4e9: 0x113, 0x4ea: 0x114, + // Block 0x14, offset 0x500 + 0x500: 0x115, + 0x520: 0x23, 0x521: 0x23, 0x522: 0x23, 0x523: 0x116, 0x524: 0x10, 0x525: 0x117, + 0x538: 0x118, 0x539: 0x11, 0x53a: 0x119, + // Block 0x15, offset 0x540 + 0x544: 0x11a, 0x545: 0x11b, 0x546: 0x11c, + 0x54f: 0x11d, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x11e, 0x5c1: 0x11f, 0x5c4: 0x11f, 0x5c5: 0x11f, 0x5c6: 0x11f, 0x5c7: 0x120, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 272 entries, 544 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x3a, 0x3d, 0x41, 0x44, 0x48, 0x52, 0x54, 0x59, 0x69, 0x70, 0x75, 0x83, 0x84, 0x92, 0xa1, 0xab, 0xae, 0xb4, 0xbc, 0xbe, 0xc0, 0xce, 0xd4, 0xe2, 0xed, 0xf8, 0x103, 0x10f, 0x119, 0x124, 0x12f, 0x13b, 0x147, 0x14f, 0x157, 0x161, 0x16c, 0x178, 0x17e, 0x189, 0x18e, 0x196, 0x199, 0x19e, 0x1a2, 0x1a6, 0x1ad, 0x1b6, 0x1be, 0x1bf, 0x1c8, 0x1cf, 0x1d7, 0x1dd, 0x1e3, 0x1e8, 0x1ec, 0x1ef, 0x1f1, 0x1f4, 0x1f9, 0x1fa, 0x1fc, 0x1fe, 0x200, 0x207, 0x20c, 0x210, 0x219, 0x21c, 0x21f, 0x225, 0x226, 0x231, 0x232, 0x233, 0x238, 0x245, 0x24d, 0x255, 0x25e, 0x267, 0x270, 0x275, 0x278, 0x281, 0x28e, 0x290, 0x297, 0x299, 0x2a4, 0x2a5, 0x2b0, 0x2b8, 0x2c0, 0x2c6, 0x2c7, 0x2d5, 0x2da, 0x2dd, 0x2e2, 0x2e6, 0x2ec, 0x2f1, 0x2f4, 0x2f9, 0x2fe, 0x2ff, 0x305, 0x307, 0x308, 0x30a, 0x30c, 0x30f, 0x310, 0x312, 0x315, 0x31b, 0x31f, 0x321, 0x327, 0x32e, 0x332, 0x33b, 0x33c, 0x344, 0x348, 0x34d, 0x355, 0x35b, 0x361, 0x36b, 0x370, 0x379, 0x37f, 0x386, 0x38a, 0x392, 0x394, 0x396, 0x399, 0x39b, 0x39d, 0x39e, 0x39f, 0x3a1, 0x3a3, 0x3a9, 0x3ae, 0x3b0, 0x3b6, 0x3b9, 0x3bb, 0x3c1, 0x3c6, 0x3c8, 0x3c9, 0x3ca, 0x3cb, 0x3cd, 0x3cf, 0x3d1, 0x3d4, 0x3d6, 0x3d9, 0x3e1, 0x3e4, 0x3e8, 0x3f0, 0x3f2, 0x3f3, 0x3f4, 0x3f6, 0x3fc, 0x3fe, 0x3ff, 0x401, 0x403, 0x405, 0x412, 0x413, 0x414, 0x418, 0x41a, 0x41b, 0x41c, 0x41d, 0x41e, 0x422, 0x426, 0x42c, 0x42e, 0x435, 0x438, 0x43c, 0x442, 0x44b, 0x451, 0x457, 0x461, 0x46b, 0x46d, 0x474, 0x47a, 0x480, 0x486, 0x489, 0x48f, 0x492, 0x49a, 0x49b, 0x4a2, 0x4a3, 0x4a6, 0x4a7, 0x4ad, 0x4b0, 0x4b8, 0x4b9, 0x4ba, 0x4bb, 0x4bc, 0x4be, 0x4c0, 0x4c2, 0x4c6, 0x4c7, 0x4c9, 0x4ca, 0x4cb, 0x4cd, 0x4d2, 0x4d7, 0x4db, 0x4dc, 0x4df, 0x4e3, 0x4ee, 0x4f2, 0x4fa, 0x4ff, 0x503, 0x506, 0x50a, 0x50d, 0x510, 0x515, 0x519, 0x51d, 0x521, 0x525, 0x527, 0x529, 0x52c, 0x531, 0x533, 0x538, 0x541, 0x546, 0x547, 0x54a, 0x54b, 0x54c, 0x54e, 0x54f, 0x550} + +// sparseValues: 1360 entries, 5440 bytes +var sparseValues = [1360]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0004, lo: 0x82, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x91}, + {value: 0x0004, lo: 0x92, hi: 0x96}, + {value: 0x0054, lo: 0x97, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xac}, + {value: 0x0004, lo: 0xad, hi: 0xad}, + {value: 0x0014, lo: 0xae, hi: 0xae}, + {value: 0x0004, lo: 0xaf, hi: 0xbf}, + // Block 0x6, offset 0x3a + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x3d + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x41 + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x44 + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x48 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x52 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x54 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x59 + {value: 0x6852, lo: 0x80, hi: 0x86}, + {value: 0x198a, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0024, lo: 0x92, hi: 0x95}, + {value: 0x0034, lo: 0x96, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x99}, + {value: 0x0034, lo: 0x9a, hi: 0x9b}, + {value: 0x0024, lo: 0x9c, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa7}, + {value: 0x0024, lo: 0xa8, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xbd}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x69 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xf, offset 0x70 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x10, offset 0x75 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x11, offset 0x83 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x12, offset 0x84 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x92 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x14, offset 0xa1 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x15, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x16, offset 0xae + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + // Block 0x17, offset 0xb4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x18, offset 0xbc + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + // Block 0x19, offset 0xbe + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x1a, offset 0xc0 + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1b, offset 0xce + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1c, offset 0xd4 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1d, offset 0xe2 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0xed + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + // Block 0x1f, offset 0xf8 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x20, offset 0x103 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x21, offset 0x10f + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x22, offset 0x119 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + // Block 0x23, offset 0x124 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x12f + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x26, offset 0x147 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x27, offset 0x14f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x28, offset 0x157 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x2a, offset 0x16c + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2b, offset 0x178 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2c, offset 0x17e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2d, offset 0x189 + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2e, offset 0x18e + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2f, offset 0x196 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x30, offset 0x199 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x31, offset 0x19e + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x32, offset 0x1a2 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x33, offset 0x1a6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x34, offset 0x1ad + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x1b6 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x36, offset 0x1be + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x37, offset 0x1bf + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x38, offset 0x1c8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x39, offset 0x1cf + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d7 + {value: 0x7053, lo: 0x80, hi: 0x85}, + {value: 0x7053, lo: 0x87, hi: 0x87}, + {value: 0x7053, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x3b, offset 0x1dd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x1e3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3d, offset 0x1e8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3e, offset 0x1ec + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3f, offset 0x1ef + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x40, offset 0x1f1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x41, offset 0x1f4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x42, offset 0x1f9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x43, offset 0x1fa + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x44, offset 0x1fc + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x45, offset 0x1fe + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x46, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x47, offset 0x207 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x48, offset 0x20c + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x49, offset 0x210 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x4a, offset 0x219 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x4b, offset 0x21c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb7}, + // Block 0x4c, offset 0x21f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x225 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4e, offset 0x226 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4f, offset 0x231 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x50, offset 0x232 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x51, offset 0x233 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x52, offset 0x238 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x54, offset 0x24d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x255 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x56, offset 0x25e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x57, offset 0x267 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x58, offset 0x270 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x59, offset 0x275 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x5a, offset 0x278 + {value: 0x1a6a, lo: 0x80, hi: 0x80}, + {value: 0x1aea, lo: 0x81, hi: 0x81}, + {value: 0x1b6a, lo: 0x82, hi: 0x82}, + {value: 0x1bea, lo: 0x83, hi: 0x83}, + {value: 0x1c6a, lo: 0x84, hi: 0x84}, + {value: 0x1cea, lo: 0x85, hi: 0x85}, + {value: 0x1d6a, lo: 0x86, hi: 0x86}, + {value: 0x1dea, lo: 0x87, hi: 0x87}, + {value: 0x1e6a, lo: 0x88, hi: 0x88}, + // Block 0x5b, offset 0x281 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5c, offset 0x28e + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5d, offset 0x290 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8452, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8852, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x297 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5f, offset 0x299 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x2a4 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x2a5 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x1f1a, lo: 0x96, hi: 0x96}, + {value: 0x1fca, lo: 0x97, hi: 0x97}, + {value: 0x207a, lo: 0x98, hi: 0x98}, + {value: 0x212a, lo: 0x99, hi: 0x99}, + {value: 0x21da, lo: 0x9a, hi: 0x9a}, + {value: 0x228a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x233b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x62, offset 0x2b0 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x63, offset 0x2b8 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2c0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x2c6 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x66, offset 0x2c7 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x67, offset 0x2d5 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0x9d52, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x68, offset 0x2da + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x69, offset 0x2dd + {value: 0xa053, lo: 0xb6, hi: 0xb7}, + {value: 0xa353, lo: 0xb8, hi: 0xb9}, + {value: 0xa653, lo: 0xba, hi: 0xbb}, + {value: 0xa353, lo: 0xbc, hi: 0xbd}, + {value: 0xa053, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x2e2 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xa953, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e6 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6c, offset 0x2ec + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6d, offset 0x2f1 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6f, offset 0x2f9 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x70, offset 0x2fe + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x71, offset 0x2ff + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x72, offset 0x305 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x73, offset 0x307 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x74, offset 0x308 + {value: 0x0010, lo: 0x85, hi: 0xad}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x75, offset 0x30a + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x76, offset 0x30c + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x77, offset 0x30f + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x78, offset 0x310 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x79, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x7a, offset 0x315 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x31b + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7c, offset 0x31f + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7d, offset 0x321 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0x9f}, + {value: 0x0004, lo: 0xa0, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7e, offset 0x327 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8453, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7f, offset 0x32e + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x80, offset 0x332 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x81, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x82, offset 0x33c + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x83, offset 0x344 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x348 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x85, offset 0x34d + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x86, offset 0x355 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x87, offset 0x35b + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x88, offset 0x361 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x89, offset 0x36b + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x370 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8b, offset 0x379 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37f + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xac52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x386 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x38a + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8f, offset 0x392 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x394 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x91, offset 0x396 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x92, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x93, offset 0x39b + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x94, offset 0x39d + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x95, offset 0x39e + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x96, offset 0x39f + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x97, offset 0x3a1 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x98, offset 0x3a3 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x99, offset 0x3a9 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x3ae + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3b0 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x3b6 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9d, offset 0x3b9 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9e, offset 0x3bb + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9f, offset 0x3c1 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x3c6 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa1, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa2, offset 0x3c9 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa3, offset 0x3ca + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa4, offset 0x3cb + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x3cd + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa6, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xa7, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa8, offset 0x3d4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa9, offset 0x3d6 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xaa, offset 0x3d9 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xaf53, lo: 0x98, hi: 0x9f}, + {value: 0xb253, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e1 + {value: 0xaf52, lo: 0x80, hi: 0x87}, + {value: 0xb252, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xac, offset 0x3e4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb253, lo: 0xb0, hi: 0xb7}, + {value: 0xaf53, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x3e8 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb252, lo: 0x98, hi: 0x9f}, + {value: 0xaf52, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xae, offset 0x3f0 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xaf, offset 0x3f2 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xb0, offset 0x3f3 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb1, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb2, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x3fc + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb4, offset 0x3fe + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb5, offset 0x3ff + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb6, offset 0x401 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb7, offset 0x403 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb8, offset 0x405 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb3}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x412 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xba, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xbb, offset 0x414 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbc, offset 0x418 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbd, offset 0x41a + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbe, offset 0x41b + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbf, offset 0x41c + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x41d + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc1, offset 0x41e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc2, offset 0x422 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x426 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc4, offset 0x42c + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc5, offset 0x42e + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc6, offset 0x435 + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc7, offset 0x438 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43c + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xc9, offset 0x442 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xca, offset 0x44b + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcb, offset 0x451 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcc, offset 0x457 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcd, offset 0x461 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xce, offset 0x46b + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xcf, offset 0x46d + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd0, offset 0x474 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x47a + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd2, offset 0x480 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x486 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd4, offset 0x489 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x48f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd6, offset 0x492 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd7, offset 0x49a + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd8, offset 0x49b + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd9, offset 0x4a2 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xda, offset 0x4a3 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdb, offset 0x4a6 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xdc, offset 0x4a7 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xdd, offset 0x4ad + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xde, offset 0x4b0 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xdf, offset 0x4b8 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe0, offset 0x4b9 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xe1, offset 0x4ba + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xe2, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xe3, offset 0x4bc + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe4, offset 0x4be + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xe5, offset 0x4c0 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xe6, offset 0x4c2 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xe7, offset 0x4c6 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xe8, offset 0x4c7 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xe9, offset 0x4c9 + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xea, offset 0x4ca + {value: 0x0014, lo: 0xa0, hi: 0xa0}, + // Block 0xeb, offset 0x4cb + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xec, offset 0x4cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xed, offset 0x4d2 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xee, offset 0x4d7 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xef, offset 0x4db + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xf0, offset 0x4dc + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xf1, offset 0x4df + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xf2, offset 0x4e3 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xf3, offset 0x4ee + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xf4, offset 0x4f2 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xf5, offset 0x4fa + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0xf6, offset 0x4ff + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0xf7, offset 0x503 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0xf8, offset 0x506 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0xf9, offset 0x50a + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0xfa, offset 0x50d + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xfb, offset 0x510 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0xfc, offset 0x515 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0xfd, offset 0x519 + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0xfe, offset 0x51d + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xff, offset 0x521 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x100, offset 0x525 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x101, offset 0x527 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x102, offset 0x529 + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x103, offset 0x52c + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x104, offset 0x531 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x105, offset 0x533 + {value: 0xb552, lo: 0x80, hi: 0x81}, + {value: 0xb852, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x106, offset 0x538 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x107, offset 0x541 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x108, offset 0x546 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x109, offset 0x547 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10a, offset 0x54a + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x10b, offset 0x54b + {value: 0x0004, lo: 0xbb, hi: 0xbf}, + // Block 0x10c, offset 0x54c + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x10d, offset 0x54e + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x10e, offset 0x54f + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 14027 bytes (13KiB); checksum: F17D40E8 diff --git a/vendor/golang.org/x/text/cases/trieval.go b/vendor/golang.org/x/text/cases/trieval.go new file mode 100644 index 00000000000..4e4d13fe5d1 --- /dev/null +++ b/vendor/golang.org/x/text/cases/trieval.go @@ -0,0 +1,217 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package cases + +// This file contains definitions for interpreting the trie value of the case +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds case information for a single rune. It is the value returned +// by a trie lookup. Most mapping information can be stored in a single 16-bit +// value. If not, for example when a rune is mapped to multiple runes, the value +// stores some basic case data and an index into an array with additional data. +// +// The per-rune values have the following format: +// +// if (exception) { +// 15..4 unsigned exception index +// } else { +// 15..8 XOR pattern or index to XOR pattern for case mapping +// Only 13..8 are used for XOR patterns. +// 7 inverseFold (fold to upper, not to lower) +// 6 index: interpret the XOR pattern as an index +// or isMid if case mode is cIgnorableUncased. +// 5..4 CCC: zero (normal or break), above or other +// } +// 3 exception: interpret this value as an exception index +// (TODO: is this bit necessary? Probably implied from case mode.) +// 2..0 case mode +// +// For the non-exceptional cases, a rune must be either uncased, lowercase or +// uppercase. If the rune is cased, the XOR pattern maps either a lowercase +// rune to uppercase or an uppercase rune to lowercase (applied to the 10 +// least-significant bits of the rune). +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + casedMask = 0x0003 + fullCasedMask = 0x0007 + ignorableMask = 0x0006 + ignorableValue = 0x0004 + + inverseFoldBit = 1 << 7 + isMidBit = 1 << 6 + + exceptionBit = 1 << 3 + exceptionShift = 4 + numExceptionBits = 12 + + xorIndexBit = 1 << 6 + xorShift = 8 + + // There is no mapping if all xor bits and the exception bit are zero. + hasMappingMask = 0xff80 | exceptionBit +) + +// The case mode bits encodes the case type of a rune. This includes uncased, +// title, upper and lower case and case ignorable. (For a definition of these +// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare +// cases, a rune can be both cased and case-ignorable. This is encoded by +// cIgnorableCased. A rune of this type is always lower case. Some runes are +// cased while not having a mapping. +// +// A common pattern for scripts in the Unicode standard is for upper and lower +// case runes to alternate for increasing rune values (e.g. the accented Latin +// ranges starting from U+0100 and U+1E00 among others and some Cyrillic +// characters). We use this property by defining a cXORCase mode, where the case +// mode (always upper or lower case) is derived from the rune value. As the XOR +// pattern for case mappings is often identical for successive runes, using +// cXORCase can result in large series of identical trie values. This, in turn, +// allows us to better compress the trie blocks. +const ( + cUncased info = iota // 000 + cTitle // 001 + cLower // 010 + cUpper // 011 + cIgnorableUncased // 100 + cIgnorableCased // 101 // lower case if mappings exist + cXORCase // 11x // case is cLower | ((rune&1) ^ x) + + maxCaseMode = cUpper +) + +func (c info) isCased() bool { + return c&casedMask != 0 +} + +func (c info) isCaseIgnorable() bool { + return c&ignorableMask == ignorableValue +} + +func (c info) isNotCasedAndNotCaseIgnorable() bool { + return c&fullCasedMask == 0 +} + +func (c info) isCaseIgnorableAndNotCased() bool { + return c&fullCasedMask == cIgnorableUncased +} + +func (c info) isMid() bool { + return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased +} + +// The case mapping implementation will need to know about various Canonical +// Combining Class (CCC) values. We encode two of these in the trie value: +// cccZero (0) and cccAbove (230). If the value is cccOther, it means that +// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that +// the rune also has the break category Break (see below). +const ( + cccBreak info = iota << 4 + cccZero + cccAbove + cccOther + + cccMask = cccBreak | cccZero | cccAbove | cccOther +) + +const ( + starter = 0 + above = 230 + iotaSubscript = 240 +) + +// The exceptions slice holds data that does not fit in a normal info entry. +// The entry is pointed to by the exception index in an entry. It has the +// following format: +// +// Header: +// +// byte 0: +// 7..6 unused +// 5..4 CCC type (same bits as entry) +// 3 unused +// 2..0 length of fold +// +// byte 1: +// 7..6 unused +// 5..3 length of 1st mapping of case type +// 2..0 length of 2nd mapping of case type +// +// case 1st 2nd +// lower -> upper, title +// upper -> lower, title +// title -> lower, upper +// +// Lengths with the value 0x7 indicate no value and implies no change. +// A length of 0 indicates a mapping to zero-length string. +// +// Body bytes: +// +// case folding bytes +// lowercase mapping bytes +// uppercase mapping bytes +// titlecase mapping bytes +// closure mapping bytes (for NFKC_Casefold). (TODO) +// +// Fallbacks: +// +// missing fold -> lower +// missing title -> upper +// all missing -> original rune +// +// exceptions starts with a dummy byte to enforce that there is no zero index +// value. +const ( + lengthMask = 0x07 + lengthBits = 3 + noChange = 0 +) + +// References to generated trie. + +var trie = newCaseTrie(0) + +var sparse = sparseBlocks{ + values: sparseValues[:], + offsets: sparseOffsets[:], +} + +// Sparse block lookup code. + +// valueRange is an entry in a sparse block. +type valueRange struct { + value uint16 + lo, hi byte +} + +type sparseBlocks struct { + values []valueRange + offsets []uint16 +} + +// lookup returns the value from values block n for byte b using binary search. +func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { + lo := s.offsets[n] + hi := s.offsets[n+1] + for lo < hi { + m := lo + (hi-lo)/2 + r := s.values[m] + if r.lo <= b && b <= r.hi { + return r.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} + +// lastRuneForTesting is the last rune used for testing. Everything after this +// is boring. +const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go new file mode 100644 index 00000000000..3cddbbdda8c --- /dev/null +++ b/vendor/golang.org/x/text/internal/internal.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains non-exported functionality that are used by +// packages in the text repository. +package internal // import "golang.org/x/text/internal" + +import ( + "sort" + + "golang.org/x/text/language" +) + +// SortTags sorts tags in place. +func SortTags(tags []language.Tag) { + sort.Sort(sorter(tags)) +} + +type sorter []language.Tag + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sorter) Less(i, j int) bool { + return s[i].String() < s[j].String() +} + +// UniqueTags sorts and filters duplicate tags in place and returns a slice with +// only unique tags. +func UniqueTags(tags []language.Tag) []language.Tag { + if len(tags) <= 1 { + return tags + } + SortTags(tags) + k := 0 + for i := 1; i < len(tags); i++ { + if tags[k].String() < tags[i].String() { + k++ + tags[k] = tags[i] + } + } + return tags[:k+1] +} diff --git a/vendor/golang.org/x/text/internal/language/common.go b/vendor/golang.org/x/text/internal/language/common.go new file mode 100644 index 00000000000..cdfdb749718 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/common.go @@ -0,0 +1,16 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// This file contains code common to the maketables.go and the package code. + +// AliasType is the type of an alias in AliasMap. +type AliasType int8 + +const ( + Deprecated AliasType = iota + Macro + Legacy + + AliasTypeUnknown AliasType = -1 +) diff --git a/vendor/golang.org/x/text/internal/language/compact.go b/vendor/golang.org/x/text/internal/language/compact.go new file mode 100644 index 00000000000..46a0015074f --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// CompactCoreInfo is a compact integer with the three core tags encoded. +type CompactCoreInfo uint32 + +// GetCompactCore generates a uint32 value that is guaranteed to be unique for +// different language, region, and script values. +func GetCompactCore(t Tag) (cci CompactCoreInfo, ok bool) { + if t.LangID > langNoIndexOffset { + return 0, false + } + cci |= CompactCoreInfo(t.LangID) << (8 + 12) + cci |= CompactCoreInfo(t.ScriptID) << 12 + cci |= CompactCoreInfo(t.RegionID) + return cci, true +} + +// Tag generates a tag from c. +func (c CompactCoreInfo) Tag() Tag { + return Tag{ + LangID: Language(c >> 20), + RegionID: Region(c & 0x3ff), + ScriptID: Script(c>>12) & 0xff, + } +} diff --git a/vendor/golang.org/x/text/internal/language/compact/compact.go b/vendor/golang.org/x/text/internal/language/compact/compact.go new file mode 100644 index 00000000000..1b36935ef7b --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/compact.go @@ -0,0 +1,61 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package compact defines a compact representation of language tags. +// +// Common language tags (at least all for which locale information is defined +// in CLDR) are assigned a unique index. Each Tag is associated with such an +// ID for selecting language-related resources (such as translations) as well +// as one for selecting regional defaults (currency, number formatting, etc.) +// +// It may want to export this functionality at some point, but at this point +// this is only available for use within x/text. +package compact // import "golang.org/x/text/internal/language/compact" + +import ( + "sort" + "strings" + + "golang.org/x/text/internal/language" +) + +// ID is an integer identifying a single tag. +type ID uint16 + +func getCoreIndex(t language.Tag) (id ID, ok bool) { + cci, ok := language.GetCompactCore(t) + if !ok { + return 0, false + } + i := sort.Search(len(coreTags), func(i int) bool { + return cci <= coreTags[i] + }) + if i == len(coreTags) || coreTags[i] != cci { + return 0, false + } + return ID(i), true +} + +// Parent returns the ID of the parent or the root ID if id is already the root. +func (id ID) Parent() ID { + return parents[id] +} + +// Tag converts id to an internal language Tag. +func (id ID) Tag() language.Tag { + if int(id) >= len(coreTags) { + return specialTags[int(id)-len(coreTags)] + } + return coreTags[id].Tag() +} + +var specialTags []language.Tag + +func init() { + tags := strings.Split(specialTagsStr, " ") + specialTags = make([]language.Tag, len(tags)) + for i, t := range tags { + specialTags[i] = language.MustParse(t) + } +} diff --git a/vendor/golang.org/x/text/internal/language/compact/language.go b/vendor/golang.org/x/text/internal/language/compact/language.go new file mode 100644 index 00000000000..83816a72a8a --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/language.go @@ -0,0 +1,260 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_index.go -output tables.go +//go:generate go run gen_parents.go + +package compact + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "strings" + + "golang.org/x/text/internal/language" +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag struct { + // NOTE: exported tags will become part of the public API. + language ID + locale ID + full fullTag // always a language.Tag for now. +} + +const _und = 0 + +type fullTag interface { + IsRoot() bool + Parent() language.Tag +} + +// Make a compact Tag from a fully specified internal language Tag. +func Make(t language.Tag) (tag Tag) { + if region := t.TypeForKey("rg"); len(region) == 6 && region[2:] == "zzzz" { + if r, err := language.ParseRegion(region[:2]); err == nil { + tFull := t + t, _ = t.SetTypeForKey("rg", "") + // TODO: should we not consider "va" for the language tag? + var exact1, exact2 bool + tag.language, exact1 = FromTag(t) + t.RegionID = r + tag.locale, exact2 = FromTag(t) + if !exact1 || !exact2 { + tag.full = tFull + } + return tag + } + } + lang, ok := FromTag(t) + tag.language = lang + tag.locale = lang + if !ok { + tag.full = t + } + return tag +} + +// Tag returns an internal language Tag version of this tag. +func (t Tag) Tag() language.Tag { + if t.full != nil { + return t.full.(language.Tag) + } + tag := t.language.Tag() + if t.language != t.locale { + loc := t.locale.Tag() + tag, _ = tag.SetTypeForKey("rg", strings.ToLower(loc.RegionID.String())+"zzzz") + } + return tag +} + +// IsCompact reports whether this tag is fully defined in terms of ID. +func (t *Tag) IsCompact() bool { + return t.full == nil +} + +// MayHaveVariants reports whether a tag may have variants. If it returns false +// it is guaranteed the tag does not have variants. +func (t Tag) MayHaveVariants() bool { + return t.full != nil || int(t.language) >= len(coreTags) +} + +// MayHaveExtensions reports whether a tag may have extensions. If it returns +// false it is guaranteed the tag does not have them. +func (t Tag) MayHaveExtensions() bool { + return t.full != nil || + int(t.language) >= len(coreTags) || + t.language != t.locale +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if t.full != nil { + return t.full.IsRoot() + } + return t.language == _und +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.full != nil { + return Make(t.full.Parent()) + } + if t.language != t.locale { + // Simulate stripping -u-rg-xxxxxx + return Tag{language: t.language, locale: t.language} + } + // TODO: use parent lookup table once cycle from internal package is + // removed. Probably by internalizing the table and declaring this fast + // enough. + // lang := compactID(internal.Parent(uint16(t.language))) + lang, _ := FromTag(t.language.Tag().Parent()) + return Tag{language: lang, locale: lang} +} + +// returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// LanguageID returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository.The index will change over time +// and should not be stored in persistent storage. If t does not match a compact +// index, exact will be false and the compact index will be returned for the +// first match after repeatedly taking the Parent of t. +func LanguageID(t Tag) (id ID, exact bool) { + return t.language, t.full == nil +} + +// RegionalID returns the ID for the regional variant of this tag. This index is +// used to indicate region-specific overrides, such as default currency, default +// calendar and week data, default time cycle, and default measurement system +// and unit preferences. +// +// For instance, the tag en-GB-u-rg-uszzzz specifies British English with US +// settings for currency, number formatting, etc. The CompactIndex for this tag +// will be that for en-GB, while the RegionalID will be the one corresponding to +// en-US. +func RegionalID(t Tag) (id ID, exact bool) { + return t.locale, t.full == nil +} + +// LanguageTag returns t stripped of regional variant indicators. +// +// At the moment this means it is stripped of a regional and variant subtag "rg" +// and "va" in the "u" extension. +func (t Tag) LanguageTag() Tag { + if t.full == nil { + return Tag{language: t.language, locale: t.language} + } + tt := t.Tag() + tt.SetTypeForKey("rg", "") + tt.SetTypeForKey("va", "") + return Make(tt) +} + +// RegionalTag returns the regional variant of the tag. +// +// At the moment this means that the region is set from the regional subtag +// "rg" in the "u" extension. +func (t Tag) RegionalTag() Tag { + rt := Tag{language: t.locale, locale: t.locale} + if t.full == nil { + return rt + } + b := language.Builder{} + tag := t.Tag() + // tag, _ = tag.SetTypeForKey("rg", "") + b.SetTag(t.locale.Tag()) + if v := tag.Variants(); v != "" { + for _, v := range strings.Split(v, "-") { + b.AddVariant(v) + } + } + for _, e := range tag.Extensions() { + b.AddExt(e) + } + return t +} + +// FromTag reports closest matching ID for an internal language Tag. +func FromTag(t language.Tag) (id ID, exact bool) { + // TODO: perhaps give more frequent tags a lower index. + // TODO: we could make the indexes stable. This will excluded some + // possibilities for optimization, so don't do this quite yet. + exact = true + + b, s, r := t.Raw() + if t.HasString() { + if t.IsPrivateUse() { + // We have no entries for user-defined tags. + return 0, false + } + hasExtra := false + if t.HasVariants() { + if t.HasExtensions() { + build := language.Builder{} + build.SetTag(language.Tag{LangID: b, ScriptID: s, RegionID: r}) + build.AddVariant(t.Variants()) + exact = false + t = build.Make() + } + hasExtra = true + } else if _, ok := t.Extension('u'); ok { + // TODO: va may mean something else. Consider not considering it. + // Strip all but the 'va' entry. + old := t + variant := t.TypeForKey("va") + t = language.Tag{LangID: b, ScriptID: s, RegionID: r} + if variant != "" { + t, _ = t.SetTypeForKey("va", variant) + hasExtra = true + } + exact = old == t + } else { + exact = false + } + if hasExtra { + // We have some variants. + for i, s := range specialTags { + if s == t { + return ID(i + len(coreTags)), exact + } + } + exact = false + } + } + if x, ok := getCoreIndex(t); ok { + return x, exact + } + exact = false + if r != 0 && s == 0 { + // Deal with cases where an extra script is inserted for the region. + t, _ := t.Maximize() + if x, ok := getCoreIndex(t); ok { + return x, exact + } + } + for t = t.Parent(); t != root; t = t.Parent() { + // No variants specified: just compare core components. + // The key has the form lllssrrr, where l, s, and r are nibbles for + // respectively the langID, scriptID, and regionID. + if x, ok := getCoreIndex(t); ok { + return x, exact + } + } + return 0, exact +} + +var root = language.Tag{} diff --git a/vendor/golang.org/x/text/internal/language/compact/parents.go b/vendor/golang.org/x/text/internal/language/compact/parents.go new file mode 100644 index 00000000000..8d810723c75 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/parents.go @@ -0,0 +1,120 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package compact + +// parents maps a compact index of a tag to the compact index of the parent of +// this tag. +var parents = []ID{ // 775 elements + // Entry 0 - 3F + 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0004, 0x0000, 0x0006, + 0x0000, 0x0008, 0x0000, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x0000, + 0x0000, 0x0028, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x0000, + 0x002f, 0x002e, 0x002e, 0x0000, 0x0033, 0x0000, 0x0035, 0x0000, + 0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x0000, 0x003e, + // Entry 40 - 7F + 0x0000, 0x0040, 0x0040, 0x0000, 0x0043, 0x0043, 0x0000, 0x0046, + 0x0000, 0x0048, 0x0000, 0x0000, 0x004b, 0x004a, 0x004a, 0x0000, + 0x004f, 0x004f, 0x004f, 0x004f, 0x0000, 0x0054, 0x0054, 0x0000, + 0x0057, 0x0000, 0x0059, 0x0000, 0x005b, 0x0000, 0x005d, 0x005d, + 0x0000, 0x0060, 0x0000, 0x0062, 0x0000, 0x0064, 0x0000, 0x0066, + 0x0066, 0x0000, 0x0069, 0x0000, 0x006b, 0x006b, 0x006b, 0x006b, + 0x006b, 0x006b, 0x006b, 0x0000, 0x0073, 0x0000, 0x0075, 0x0000, + 0x0077, 0x0000, 0x0000, 0x007a, 0x0000, 0x007c, 0x0000, 0x007e, + // Entry 80 - BF + 0x0000, 0x0080, 0x0080, 0x0000, 0x0083, 0x0083, 0x0000, 0x0086, + 0x0087, 0x0087, 0x0087, 0x0086, 0x0088, 0x0087, 0x0087, 0x0087, + 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, 0x0088, 0x0087, + 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0086, + // Entry C0 - FF + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, + 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0086, 0x0087, + 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0000, + 0x00ef, 0x0000, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, + 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f1, 0x00f1, + // Entry 100 - 13F + 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, + 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x0000, 0x010e, + 0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0114, 0x0000, + 0x0117, 0x0117, 0x0117, 0x0117, 0x0000, 0x011c, 0x0000, 0x011e, + 0x0000, 0x0120, 0x0120, 0x0000, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + // Entry 140 - 17F + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156, + 0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x015c, 0x015c, + 0x0000, 0x0160, 0x0000, 0x0000, 0x0163, 0x0000, 0x0165, 0x0000, + 0x0167, 0x0167, 0x0167, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000, + 0x016f, 0x0000, 0x0171, 0x0171, 0x0000, 0x0174, 0x0000, 0x0176, + 0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e, + // Entry 180 - 1BF + 0x0000, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0184, 0x0184, + 0x0184, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x018e, + 0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x0000, 0x0195, 0x0000, + 0x0197, 0x0000, 0x0000, 0x019a, 0x0000, 0x0000, 0x019d, 0x0000, + 0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000, + 0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x0000, 0x01ad, 0x0000, + 0x01af, 0x0000, 0x01b1, 0x01b1, 0x0000, 0x01b4, 0x0000, 0x01b6, + 0x0000, 0x01b8, 0x0000, 0x01ba, 0x0000, 0x01bc, 0x0000, 0x0000, + // Entry 1C0 - 1FF + 0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x0000, + 0x01c7, 0x0000, 0x01c9, 0x0000, 0x01cb, 0x01cb, 0x01cb, 0x01cb, + 0x0000, 0x01d0, 0x0000, 0x01d2, 0x01d2, 0x0000, 0x01d5, 0x0000, + 0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x0000, 0x01dd, 0x0000, + 0x01df, 0x01df, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6, + 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee, + 0x0000, 0x01f0, 0x0000, 0x0000, 0x01f3, 0x0000, 0x01f5, 0x01f5, + 0x01f5, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x0000, + // Entry 200 - 23F + 0x01ff, 0x0000, 0x0000, 0x0202, 0x0000, 0x0204, 0x0204, 0x0000, + 0x0207, 0x0000, 0x0209, 0x0209, 0x0000, 0x020c, 0x020c, 0x0000, + 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x0000, + 0x0217, 0x0000, 0x0219, 0x0000, 0x021b, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0221, 0x0000, 0x0000, 0x0224, 0x0000, 0x0226, + 0x0226, 0x0000, 0x0229, 0x0000, 0x022b, 0x022b, 0x0000, 0x0000, + 0x022f, 0x022e, 0x022e, 0x0000, 0x0000, 0x0234, 0x0000, 0x0236, + 0x0000, 0x0238, 0x0000, 0x0244, 0x023a, 0x0244, 0x0244, 0x0244, + // Entry 240 - 27F + 0x0244, 0x0244, 0x0244, 0x0244, 0x023a, 0x0244, 0x0244, 0x0000, + 0x0247, 0x0247, 0x0247, 0x0000, 0x024b, 0x0000, 0x024d, 0x0000, + 0x024f, 0x024f, 0x0000, 0x0252, 0x0000, 0x0254, 0x0254, 0x0254, + 0x0254, 0x0254, 0x0254, 0x0000, 0x025b, 0x0000, 0x025d, 0x0000, + 0x025f, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000, + 0x0000, 0x0268, 0x0268, 0x0268, 0x0000, 0x026c, 0x0000, 0x026e, + 0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0274, 0x0273, 0x0273, + 0x0000, 0x0278, 0x0000, 0x027a, 0x0000, 0x027c, 0x0000, 0x0000, + // Entry 280 - 2BF + 0x0000, 0x0000, 0x0281, 0x0000, 0x0000, 0x0284, 0x0000, 0x0286, + 0x0286, 0x0286, 0x0286, 0x0000, 0x028b, 0x028b, 0x028b, 0x0000, + 0x028f, 0x028f, 0x028f, 0x028f, 0x028f, 0x0000, 0x0295, 0x0295, + 0x0295, 0x0295, 0x0000, 0x0000, 0x0000, 0x0000, 0x029d, 0x029d, + 0x029d, 0x0000, 0x02a1, 0x02a1, 0x02a1, 0x02a1, 0x0000, 0x0000, + 0x02a7, 0x02a7, 0x02a7, 0x02a7, 0x0000, 0x02ac, 0x0000, 0x02ae, + 0x02ae, 0x0000, 0x02b1, 0x0000, 0x02b3, 0x0000, 0x02b5, 0x02b5, + 0x0000, 0x0000, 0x02b9, 0x0000, 0x0000, 0x0000, 0x02bd, 0x0000, + // Entry 2C0 - 2FF + 0x02bf, 0x02bf, 0x0000, 0x0000, 0x02c3, 0x0000, 0x02c5, 0x0000, + 0x02c7, 0x0000, 0x02c9, 0x0000, 0x02cb, 0x0000, 0x02cd, 0x02cd, + 0x0000, 0x0000, 0x02d1, 0x0000, 0x02d3, 0x02d0, 0x02d0, 0x0000, + 0x0000, 0x02d8, 0x02d7, 0x02d7, 0x0000, 0x0000, 0x02dd, 0x0000, + 0x02df, 0x0000, 0x02e1, 0x0000, 0x0000, 0x02e4, 0x0000, 0x02e6, + 0x0000, 0x0000, 0x02e9, 0x0000, 0x02eb, 0x0000, 0x02ed, 0x0000, + 0x02ef, 0x02ef, 0x0000, 0x0000, 0x02f3, 0x02f2, 0x02f2, 0x0000, + 0x02f7, 0x0000, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x0000, + // Entry 300 - 33F + 0x02ff, 0x0300, 0x02ff, 0x0000, 0x0303, 0x0051, 0x00e6, +} // Size: 1574 bytes + +// Total table size 1574 bytes (1KiB); checksum: 895AAF0B diff --git a/vendor/golang.org/x/text/internal/language/compact/tables.go b/vendor/golang.org/x/text/internal/language/compact/tables.go new file mode 100644 index 00000000000..32af9de5996 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/tables.go @@ -0,0 +1,1015 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package compact + +import "golang.org/x/text/internal/language" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +// NumCompactTags is the number of common tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = 775 +const ( + undIndex ID = 0 + afIndex ID = 1 + afNAIndex ID = 2 + afZAIndex ID = 3 + agqIndex ID = 4 + agqCMIndex ID = 5 + akIndex ID = 6 + akGHIndex ID = 7 + amIndex ID = 8 + amETIndex ID = 9 + arIndex ID = 10 + ar001Index ID = 11 + arAEIndex ID = 12 + arBHIndex ID = 13 + arDJIndex ID = 14 + arDZIndex ID = 15 + arEGIndex ID = 16 + arEHIndex ID = 17 + arERIndex ID = 18 + arILIndex ID = 19 + arIQIndex ID = 20 + arJOIndex ID = 21 + arKMIndex ID = 22 + arKWIndex ID = 23 + arLBIndex ID = 24 + arLYIndex ID = 25 + arMAIndex ID = 26 + arMRIndex ID = 27 + arOMIndex ID = 28 + arPSIndex ID = 29 + arQAIndex ID = 30 + arSAIndex ID = 31 + arSDIndex ID = 32 + arSOIndex ID = 33 + arSSIndex ID = 34 + arSYIndex ID = 35 + arTDIndex ID = 36 + arTNIndex ID = 37 + arYEIndex ID = 38 + arsIndex ID = 39 + asIndex ID = 40 + asINIndex ID = 41 + asaIndex ID = 42 + asaTZIndex ID = 43 + astIndex ID = 44 + astESIndex ID = 45 + azIndex ID = 46 + azCyrlIndex ID = 47 + azCyrlAZIndex ID = 48 + azLatnIndex ID = 49 + azLatnAZIndex ID = 50 + basIndex ID = 51 + basCMIndex ID = 52 + beIndex ID = 53 + beBYIndex ID = 54 + bemIndex ID = 55 + bemZMIndex ID = 56 + bezIndex ID = 57 + bezTZIndex ID = 58 + bgIndex ID = 59 + bgBGIndex ID = 60 + bhIndex ID = 61 + bmIndex ID = 62 + bmMLIndex ID = 63 + bnIndex ID = 64 + bnBDIndex ID = 65 + bnINIndex ID = 66 + boIndex ID = 67 + boCNIndex ID = 68 + boINIndex ID = 69 + brIndex ID = 70 + brFRIndex ID = 71 + brxIndex ID = 72 + brxINIndex ID = 73 + bsIndex ID = 74 + bsCyrlIndex ID = 75 + bsCyrlBAIndex ID = 76 + bsLatnIndex ID = 77 + bsLatnBAIndex ID = 78 + caIndex ID = 79 + caADIndex ID = 80 + caESIndex ID = 81 + caFRIndex ID = 82 + caITIndex ID = 83 + ccpIndex ID = 84 + ccpBDIndex ID = 85 + ccpINIndex ID = 86 + ceIndex ID = 87 + ceRUIndex ID = 88 + cggIndex ID = 89 + cggUGIndex ID = 90 + chrIndex ID = 91 + chrUSIndex ID = 92 + ckbIndex ID = 93 + ckbIQIndex ID = 94 + ckbIRIndex ID = 95 + csIndex ID = 96 + csCZIndex ID = 97 + cuIndex ID = 98 + cuRUIndex ID = 99 + cyIndex ID = 100 + cyGBIndex ID = 101 + daIndex ID = 102 + daDKIndex ID = 103 + daGLIndex ID = 104 + davIndex ID = 105 + davKEIndex ID = 106 + deIndex ID = 107 + deATIndex ID = 108 + deBEIndex ID = 109 + deCHIndex ID = 110 + deDEIndex ID = 111 + deITIndex ID = 112 + deLIIndex ID = 113 + deLUIndex ID = 114 + djeIndex ID = 115 + djeNEIndex ID = 116 + dsbIndex ID = 117 + dsbDEIndex ID = 118 + duaIndex ID = 119 + duaCMIndex ID = 120 + dvIndex ID = 121 + dyoIndex ID = 122 + dyoSNIndex ID = 123 + dzIndex ID = 124 + dzBTIndex ID = 125 + ebuIndex ID = 126 + ebuKEIndex ID = 127 + eeIndex ID = 128 + eeGHIndex ID = 129 + eeTGIndex ID = 130 + elIndex ID = 131 + elCYIndex ID = 132 + elGRIndex ID = 133 + enIndex ID = 134 + en001Index ID = 135 + en150Index ID = 136 + enAGIndex ID = 137 + enAIIndex ID = 138 + enASIndex ID = 139 + enATIndex ID = 140 + enAUIndex ID = 141 + enBBIndex ID = 142 + enBEIndex ID = 143 + enBIIndex ID = 144 + enBMIndex ID = 145 + enBSIndex ID = 146 + enBWIndex ID = 147 + enBZIndex ID = 148 + enCAIndex ID = 149 + enCCIndex ID = 150 + enCHIndex ID = 151 + enCKIndex ID = 152 + enCMIndex ID = 153 + enCXIndex ID = 154 + enCYIndex ID = 155 + enDEIndex ID = 156 + enDGIndex ID = 157 + enDKIndex ID = 158 + enDMIndex ID = 159 + enERIndex ID = 160 + enFIIndex ID = 161 + enFJIndex ID = 162 + enFKIndex ID = 163 + enFMIndex ID = 164 + enGBIndex ID = 165 + enGDIndex ID = 166 + enGGIndex ID = 167 + enGHIndex ID = 168 + enGIIndex ID = 169 + enGMIndex ID = 170 + enGUIndex ID = 171 + enGYIndex ID = 172 + enHKIndex ID = 173 + enIEIndex ID = 174 + enILIndex ID = 175 + enIMIndex ID = 176 + enINIndex ID = 177 + enIOIndex ID = 178 + enJEIndex ID = 179 + enJMIndex ID = 180 + enKEIndex ID = 181 + enKIIndex ID = 182 + enKNIndex ID = 183 + enKYIndex ID = 184 + enLCIndex ID = 185 + enLRIndex ID = 186 + enLSIndex ID = 187 + enMGIndex ID = 188 + enMHIndex ID = 189 + enMOIndex ID = 190 + enMPIndex ID = 191 + enMSIndex ID = 192 + enMTIndex ID = 193 + enMUIndex ID = 194 + enMWIndex ID = 195 + enMYIndex ID = 196 + enNAIndex ID = 197 + enNFIndex ID = 198 + enNGIndex ID = 199 + enNLIndex ID = 200 + enNRIndex ID = 201 + enNUIndex ID = 202 + enNZIndex ID = 203 + enPGIndex ID = 204 + enPHIndex ID = 205 + enPKIndex ID = 206 + enPNIndex ID = 207 + enPRIndex ID = 208 + enPWIndex ID = 209 + enRWIndex ID = 210 + enSBIndex ID = 211 + enSCIndex ID = 212 + enSDIndex ID = 213 + enSEIndex ID = 214 + enSGIndex ID = 215 + enSHIndex ID = 216 + enSIIndex ID = 217 + enSLIndex ID = 218 + enSSIndex ID = 219 + enSXIndex ID = 220 + enSZIndex ID = 221 + enTCIndex ID = 222 + enTKIndex ID = 223 + enTOIndex ID = 224 + enTTIndex ID = 225 + enTVIndex ID = 226 + enTZIndex ID = 227 + enUGIndex ID = 228 + enUMIndex ID = 229 + enUSIndex ID = 230 + enVCIndex ID = 231 + enVGIndex ID = 232 + enVIIndex ID = 233 + enVUIndex ID = 234 + enWSIndex ID = 235 + enZAIndex ID = 236 + enZMIndex ID = 237 + enZWIndex ID = 238 + eoIndex ID = 239 + eo001Index ID = 240 + esIndex ID = 241 + es419Index ID = 242 + esARIndex ID = 243 + esBOIndex ID = 244 + esBRIndex ID = 245 + esBZIndex ID = 246 + esCLIndex ID = 247 + esCOIndex ID = 248 + esCRIndex ID = 249 + esCUIndex ID = 250 + esDOIndex ID = 251 + esEAIndex ID = 252 + esECIndex ID = 253 + esESIndex ID = 254 + esGQIndex ID = 255 + esGTIndex ID = 256 + esHNIndex ID = 257 + esICIndex ID = 258 + esMXIndex ID = 259 + esNIIndex ID = 260 + esPAIndex ID = 261 + esPEIndex ID = 262 + esPHIndex ID = 263 + esPRIndex ID = 264 + esPYIndex ID = 265 + esSVIndex ID = 266 + esUSIndex ID = 267 + esUYIndex ID = 268 + esVEIndex ID = 269 + etIndex ID = 270 + etEEIndex ID = 271 + euIndex ID = 272 + euESIndex ID = 273 + ewoIndex ID = 274 + ewoCMIndex ID = 275 + faIndex ID = 276 + faAFIndex ID = 277 + faIRIndex ID = 278 + ffIndex ID = 279 + ffCMIndex ID = 280 + ffGNIndex ID = 281 + ffMRIndex ID = 282 + ffSNIndex ID = 283 + fiIndex ID = 284 + fiFIIndex ID = 285 + filIndex ID = 286 + filPHIndex ID = 287 + foIndex ID = 288 + foDKIndex ID = 289 + foFOIndex ID = 290 + frIndex ID = 291 + frBEIndex ID = 292 + frBFIndex ID = 293 + frBIIndex ID = 294 + frBJIndex ID = 295 + frBLIndex ID = 296 + frCAIndex ID = 297 + frCDIndex ID = 298 + frCFIndex ID = 299 + frCGIndex ID = 300 + frCHIndex ID = 301 + frCIIndex ID = 302 + frCMIndex ID = 303 + frDJIndex ID = 304 + frDZIndex ID = 305 + frFRIndex ID = 306 + frGAIndex ID = 307 + frGFIndex ID = 308 + frGNIndex ID = 309 + frGPIndex ID = 310 + frGQIndex ID = 311 + frHTIndex ID = 312 + frKMIndex ID = 313 + frLUIndex ID = 314 + frMAIndex ID = 315 + frMCIndex ID = 316 + frMFIndex ID = 317 + frMGIndex ID = 318 + frMLIndex ID = 319 + frMQIndex ID = 320 + frMRIndex ID = 321 + frMUIndex ID = 322 + frNCIndex ID = 323 + frNEIndex ID = 324 + frPFIndex ID = 325 + frPMIndex ID = 326 + frREIndex ID = 327 + frRWIndex ID = 328 + frSCIndex ID = 329 + frSNIndex ID = 330 + frSYIndex ID = 331 + frTDIndex ID = 332 + frTGIndex ID = 333 + frTNIndex ID = 334 + frVUIndex ID = 335 + frWFIndex ID = 336 + frYTIndex ID = 337 + furIndex ID = 338 + furITIndex ID = 339 + fyIndex ID = 340 + fyNLIndex ID = 341 + gaIndex ID = 342 + gaIEIndex ID = 343 + gdIndex ID = 344 + gdGBIndex ID = 345 + glIndex ID = 346 + glESIndex ID = 347 + gswIndex ID = 348 + gswCHIndex ID = 349 + gswFRIndex ID = 350 + gswLIIndex ID = 351 + guIndex ID = 352 + guINIndex ID = 353 + guwIndex ID = 354 + guzIndex ID = 355 + guzKEIndex ID = 356 + gvIndex ID = 357 + gvIMIndex ID = 358 + haIndex ID = 359 + haGHIndex ID = 360 + haNEIndex ID = 361 + haNGIndex ID = 362 + hawIndex ID = 363 + hawUSIndex ID = 364 + heIndex ID = 365 + heILIndex ID = 366 + hiIndex ID = 367 + hiINIndex ID = 368 + hrIndex ID = 369 + hrBAIndex ID = 370 + hrHRIndex ID = 371 + hsbIndex ID = 372 + hsbDEIndex ID = 373 + huIndex ID = 374 + huHUIndex ID = 375 + hyIndex ID = 376 + hyAMIndex ID = 377 + idIndex ID = 378 + idIDIndex ID = 379 + igIndex ID = 380 + igNGIndex ID = 381 + iiIndex ID = 382 + iiCNIndex ID = 383 + inIndex ID = 384 + ioIndex ID = 385 + isIndex ID = 386 + isISIndex ID = 387 + itIndex ID = 388 + itCHIndex ID = 389 + itITIndex ID = 390 + itSMIndex ID = 391 + itVAIndex ID = 392 + iuIndex ID = 393 + iwIndex ID = 394 + jaIndex ID = 395 + jaJPIndex ID = 396 + jboIndex ID = 397 + jgoIndex ID = 398 + jgoCMIndex ID = 399 + jiIndex ID = 400 + jmcIndex ID = 401 + jmcTZIndex ID = 402 + jvIndex ID = 403 + jwIndex ID = 404 + kaIndex ID = 405 + kaGEIndex ID = 406 + kabIndex ID = 407 + kabDZIndex ID = 408 + kajIndex ID = 409 + kamIndex ID = 410 + kamKEIndex ID = 411 + kcgIndex ID = 412 + kdeIndex ID = 413 + kdeTZIndex ID = 414 + keaIndex ID = 415 + keaCVIndex ID = 416 + khqIndex ID = 417 + khqMLIndex ID = 418 + kiIndex ID = 419 + kiKEIndex ID = 420 + kkIndex ID = 421 + kkKZIndex ID = 422 + kkjIndex ID = 423 + kkjCMIndex ID = 424 + klIndex ID = 425 + klGLIndex ID = 426 + klnIndex ID = 427 + klnKEIndex ID = 428 + kmIndex ID = 429 + kmKHIndex ID = 430 + knIndex ID = 431 + knINIndex ID = 432 + koIndex ID = 433 + koKPIndex ID = 434 + koKRIndex ID = 435 + kokIndex ID = 436 + kokINIndex ID = 437 + ksIndex ID = 438 + ksINIndex ID = 439 + ksbIndex ID = 440 + ksbTZIndex ID = 441 + ksfIndex ID = 442 + ksfCMIndex ID = 443 + kshIndex ID = 444 + kshDEIndex ID = 445 + kuIndex ID = 446 + kwIndex ID = 447 + kwGBIndex ID = 448 + kyIndex ID = 449 + kyKGIndex ID = 450 + lagIndex ID = 451 + lagTZIndex ID = 452 + lbIndex ID = 453 + lbLUIndex ID = 454 + lgIndex ID = 455 + lgUGIndex ID = 456 + lktIndex ID = 457 + lktUSIndex ID = 458 + lnIndex ID = 459 + lnAOIndex ID = 460 + lnCDIndex ID = 461 + lnCFIndex ID = 462 + lnCGIndex ID = 463 + loIndex ID = 464 + loLAIndex ID = 465 + lrcIndex ID = 466 + lrcIQIndex ID = 467 + lrcIRIndex ID = 468 + ltIndex ID = 469 + ltLTIndex ID = 470 + luIndex ID = 471 + luCDIndex ID = 472 + luoIndex ID = 473 + luoKEIndex ID = 474 + luyIndex ID = 475 + luyKEIndex ID = 476 + lvIndex ID = 477 + lvLVIndex ID = 478 + masIndex ID = 479 + masKEIndex ID = 480 + masTZIndex ID = 481 + merIndex ID = 482 + merKEIndex ID = 483 + mfeIndex ID = 484 + mfeMUIndex ID = 485 + mgIndex ID = 486 + mgMGIndex ID = 487 + mghIndex ID = 488 + mghMZIndex ID = 489 + mgoIndex ID = 490 + mgoCMIndex ID = 491 + mkIndex ID = 492 + mkMKIndex ID = 493 + mlIndex ID = 494 + mlINIndex ID = 495 + mnIndex ID = 496 + mnMNIndex ID = 497 + moIndex ID = 498 + mrIndex ID = 499 + mrINIndex ID = 500 + msIndex ID = 501 + msBNIndex ID = 502 + msMYIndex ID = 503 + msSGIndex ID = 504 + mtIndex ID = 505 + mtMTIndex ID = 506 + muaIndex ID = 507 + muaCMIndex ID = 508 + myIndex ID = 509 + myMMIndex ID = 510 + mznIndex ID = 511 + mznIRIndex ID = 512 + nahIndex ID = 513 + naqIndex ID = 514 + naqNAIndex ID = 515 + nbIndex ID = 516 + nbNOIndex ID = 517 + nbSJIndex ID = 518 + ndIndex ID = 519 + ndZWIndex ID = 520 + ndsIndex ID = 521 + ndsDEIndex ID = 522 + ndsNLIndex ID = 523 + neIndex ID = 524 + neINIndex ID = 525 + neNPIndex ID = 526 + nlIndex ID = 527 + nlAWIndex ID = 528 + nlBEIndex ID = 529 + nlBQIndex ID = 530 + nlCWIndex ID = 531 + nlNLIndex ID = 532 + nlSRIndex ID = 533 + nlSXIndex ID = 534 + nmgIndex ID = 535 + nmgCMIndex ID = 536 + nnIndex ID = 537 + nnNOIndex ID = 538 + nnhIndex ID = 539 + nnhCMIndex ID = 540 + noIndex ID = 541 + nqoIndex ID = 542 + nrIndex ID = 543 + nsoIndex ID = 544 + nusIndex ID = 545 + nusSSIndex ID = 546 + nyIndex ID = 547 + nynIndex ID = 548 + nynUGIndex ID = 549 + omIndex ID = 550 + omETIndex ID = 551 + omKEIndex ID = 552 + orIndex ID = 553 + orINIndex ID = 554 + osIndex ID = 555 + osGEIndex ID = 556 + osRUIndex ID = 557 + paIndex ID = 558 + paArabIndex ID = 559 + paArabPKIndex ID = 560 + paGuruIndex ID = 561 + paGuruINIndex ID = 562 + papIndex ID = 563 + plIndex ID = 564 + plPLIndex ID = 565 + prgIndex ID = 566 + prg001Index ID = 567 + psIndex ID = 568 + psAFIndex ID = 569 + ptIndex ID = 570 + ptAOIndex ID = 571 + ptBRIndex ID = 572 + ptCHIndex ID = 573 + ptCVIndex ID = 574 + ptGQIndex ID = 575 + ptGWIndex ID = 576 + ptLUIndex ID = 577 + ptMOIndex ID = 578 + ptMZIndex ID = 579 + ptPTIndex ID = 580 + ptSTIndex ID = 581 + ptTLIndex ID = 582 + quIndex ID = 583 + quBOIndex ID = 584 + quECIndex ID = 585 + quPEIndex ID = 586 + rmIndex ID = 587 + rmCHIndex ID = 588 + rnIndex ID = 589 + rnBIIndex ID = 590 + roIndex ID = 591 + roMDIndex ID = 592 + roROIndex ID = 593 + rofIndex ID = 594 + rofTZIndex ID = 595 + ruIndex ID = 596 + ruBYIndex ID = 597 + ruKGIndex ID = 598 + ruKZIndex ID = 599 + ruMDIndex ID = 600 + ruRUIndex ID = 601 + ruUAIndex ID = 602 + rwIndex ID = 603 + rwRWIndex ID = 604 + rwkIndex ID = 605 + rwkTZIndex ID = 606 + sahIndex ID = 607 + sahRUIndex ID = 608 + saqIndex ID = 609 + saqKEIndex ID = 610 + sbpIndex ID = 611 + sbpTZIndex ID = 612 + sdIndex ID = 613 + sdPKIndex ID = 614 + sdhIndex ID = 615 + seIndex ID = 616 + seFIIndex ID = 617 + seNOIndex ID = 618 + seSEIndex ID = 619 + sehIndex ID = 620 + sehMZIndex ID = 621 + sesIndex ID = 622 + sesMLIndex ID = 623 + sgIndex ID = 624 + sgCFIndex ID = 625 + shIndex ID = 626 + shiIndex ID = 627 + shiLatnIndex ID = 628 + shiLatnMAIndex ID = 629 + shiTfngIndex ID = 630 + shiTfngMAIndex ID = 631 + siIndex ID = 632 + siLKIndex ID = 633 + skIndex ID = 634 + skSKIndex ID = 635 + slIndex ID = 636 + slSIIndex ID = 637 + smaIndex ID = 638 + smiIndex ID = 639 + smjIndex ID = 640 + smnIndex ID = 641 + smnFIIndex ID = 642 + smsIndex ID = 643 + snIndex ID = 644 + snZWIndex ID = 645 + soIndex ID = 646 + soDJIndex ID = 647 + soETIndex ID = 648 + soKEIndex ID = 649 + soSOIndex ID = 650 + sqIndex ID = 651 + sqALIndex ID = 652 + sqMKIndex ID = 653 + sqXKIndex ID = 654 + srIndex ID = 655 + srCyrlIndex ID = 656 + srCyrlBAIndex ID = 657 + srCyrlMEIndex ID = 658 + srCyrlRSIndex ID = 659 + srCyrlXKIndex ID = 660 + srLatnIndex ID = 661 + srLatnBAIndex ID = 662 + srLatnMEIndex ID = 663 + srLatnRSIndex ID = 664 + srLatnXKIndex ID = 665 + ssIndex ID = 666 + ssyIndex ID = 667 + stIndex ID = 668 + svIndex ID = 669 + svAXIndex ID = 670 + svFIIndex ID = 671 + svSEIndex ID = 672 + swIndex ID = 673 + swCDIndex ID = 674 + swKEIndex ID = 675 + swTZIndex ID = 676 + swUGIndex ID = 677 + syrIndex ID = 678 + taIndex ID = 679 + taINIndex ID = 680 + taLKIndex ID = 681 + taMYIndex ID = 682 + taSGIndex ID = 683 + teIndex ID = 684 + teINIndex ID = 685 + teoIndex ID = 686 + teoKEIndex ID = 687 + teoUGIndex ID = 688 + tgIndex ID = 689 + tgTJIndex ID = 690 + thIndex ID = 691 + thTHIndex ID = 692 + tiIndex ID = 693 + tiERIndex ID = 694 + tiETIndex ID = 695 + tigIndex ID = 696 + tkIndex ID = 697 + tkTMIndex ID = 698 + tlIndex ID = 699 + tnIndex ID = 700 + toIndex ID = 701 + toTOIndex ID = 702 + trIndex ID = 703 + trCYIndex ID = 704 + trTRIndex ID = 705 + tsIndex ID = 706 + ttIndex ID = 707 + ttRUIndex ID = 708 + twqIndex ID = 709 + twqNEIndex ID = 710 + tzmIndex ID = 711 + tzmMAIndex ID = 712 + ugIndex ID = 713 + ugCNIndex ID = 714 + ukIndex ID = 715 + ukUAIndex ID = 716 + urIndex ID = 717 + urINIndex ID = 718 + urPKIndex ID = 719 + uzIndex ID = 720 + uzArabIndex ID = 721 + uzArabAFIndex ID = 722 + uzCyrlIndex ID = 723 + uzCyrlUZIndex ID = 724 + uzLatnIndex ID = 725 + uzLatnUZIndex ID = 726 + vaiIndex ID = 727 + vaiLatnIndex ID = 728 + vaiLatnLRIndex ID = 729 + vaiVaiiIndex ID = 730 + vaiVaiiLRIndex ID = 731 + veIndex ID = 732 + viIndex ID = 733 + viVNIndex ID = 734 + voIndex ID = 735 + vo001Index ID = 736 + vunIndex ID = 737 + vunTZIndex ID = 738 + waIndex ID = 739 + waeIndex ID = 740 + waeCHIndex ID = 741 + woIndex ID = 742 + woSNIndex ID = 743 + xhIndex ID = 744 + xogIndex ID = 745 + xogUGIndex ID = 746 + yavIndex ID = 747 + yavCMIndex ID = 748 + yiIndex ID = 749 + yi001Index ID = 750 + yoIndex ID = 751 + yoBJIndex ID = 752 + yoNGIndex ID = 753 + yueIndex ID = 754 + yueHansIndex ID = 755 + yueHansCNIndex ID = 756 + yueHantIndex ID = 757 + yueHantHKIndex ID = 758 + zghIndex ID = 759 + zghMAIndex ID = 760 + zhIndex ID = 761 + zhHansIndex ID = 762 + zhHansCNIndex ID = 763 + zhHansHKIndex ID = 764 + zhHansMOIndex ID = 765 + zhHansSGIndex ID = 766 + zhHantIndex ID = 767 + zhHantHKIndex ID = 768 + zhHantMOIndex ID = 769 + zhHantTWIndex ID = 770 + zuIndex ID = 771 + zuZAIndex ID = 772 + caESvalenciaIndex ID = 773 + enUSuvaposixIndex ID = 774 +) + +var coreTags = []language.CompactCoreInfo{ // 773 elements + // Entry 0 - 1F + 0x00000000, 0x01600000, 0x016000d2, 0x01600161, + 0x01c00000, 0x01c00052, 0x02100000, 0x02100080, + 0x02700000, 0x0270006f, 0x03a00000, 0x03a00001, + 0x03a00023, 0x03a00039, 0x03a00062, 0x03a00067, + 0x03a0006b, 0x03a0006c, 0x03a0006d, 0x03a00097, + 0x03a0009b, 0x03a000a1, 0x03a000a8, 0x03a000ac, + 0x03a000b0, 0x03a000b9, 0x03a000ba, 0x03a000c9, + 0x03a000e1, 0x03a000ed, 0x03a000f3, 0x03a00108, + // Entry 20 - 3F + 0x03a0010b, 0x03a00115, 0x03a00117, 0x03a0011c, + 0x03a00120, 0x03a00128, 0x03a0015e, 0x04000000, + 0x04300000, 0x04300099, 0x04400000, 0x0440012f, + 0x04800000, 0x0480006e, 0x05800000, 0x05820000, + 0x05820032, 0x0585a000, 0x0585a032, 0x05e00000, + 0x05e00052, 0x07100000, 0x07100047, 0x07500000, + 0x07500162, 0x07900000, 0x0790012f, 0x07e00000, + 0x07e00038, 0x08200000, 0x0a000000, 0x0a0000c3, + // Entry 40 - 5F + 0x0a500000, 0x0a500035, 0x0a500099, 0x0a900000, + 0x0a900053, 0x0a900099, 0x0b200000, 0x0b200078, + 0x0b500000, 0x0b500099, 0x0b700000, 0x0b720000, + 0x0b720033, 0x0b75a000, 0x0b75a033, 0x0d700000, + 0x0d700022, 0x0d70006e, 0x0d700078, 0x0d70009e, + 0x0db00000, 0x0db00035, 0x0db00099, 0x0dc00000, + 0x0dc00106, 0x0df00000, 0x0df00131, 0x0e500000, + 0x0e500135, 0x0e900000, 0x0e90009b, 0x0e90009c, + // Entry 60 - 7F + 0x0fa00000, 0x0fa0005e, 0x0fe00000, 0x0fe00106, + 0x10000000, 0x1000007b, 0x10100000, 0x10100063, + 0x10100082, 0x10800000, 0x108000a4, 0x10d00000, + 0x10d0002e, 0x10d00036, 0x10d0004e, 0x10d00060, + 0x10d0009e, 0x10d000b2, 0x10d000b7, 0x11700000, + 0x117000d4, 0x11f00000, 0x11f00060, 0x12400000, + 0x12400052, 0x12800000, 0x12b00000, 0x12b00114, + 0x12d00000, 0x12d00043, 0x12f00000, 0x12f000a4, + // Entry 80 - 9F + 0x13000000, 0x13000080, 0x13000122, 0x13600000, + 0x1360005d, 0x13600087, 0x13900000, 0x13900001, + 0x1390001a, 0x13900025, 0x13900026, 0x1390002d, + 0x1390002e, 0x1390002f, 0x13900034, 0x13900036, + 0x1390003a, 0x1390003d, 0x13900042, 0x13900046, + 0x13900048, 0x13900049, 0x1390004a, 0x1390004e, + 0x13900050, 0x13900052, 0x1390005c, 0x1390005d, + 0x13900060, 0x13900061, 0x13900063, 0x13900064, + // Entry A0 - BF + 0x1390006d, 0x13900072, 0x13900073, 0x13900074, + 0x13900075, 0x1390007b, 0x1390007c, 0x1390007f, + 0x13900080, 0x13900081, 0x13900083, 0x1390008a, + 0x1390008c, 0x1390008d, 0x13900096, 0x13900097, + 0x13900098, 0x13900099, 0x1390009a, 0x1390009f, + 0x139000a0, 0x139000a4, 0x139000a7, 0x139000a9, + 0x139000ad, 0x139000b1, 0x139000b4, 0x139000b5, + 0x139000bf, 0x139000c0, 0x139000c6, 0x139000c7, + // Entry C0 - DF + 0x139000ca, 0x139000cb, 0x139000cc, 0x139000ce, + 0x139000d0, 0x139000d2, 0x139000d5, 0x139000d6, + 0x139000d9, 0x139000dd, 0x139000df, 0x139000e0, + 0x139000e6, 0x139000e7, 0x139000e8, 0x139000eb, + 0x139000ec, 0x139000f0, 0x13900107, 0x13900109, + 0x1390010a, 0x1390010b, 0x1390010c, 0x1390010d, + 0x1390010e, 0x1390010f, 0x13900112, 0x13900117, + 0x1390011b, 0x1390011d, 0x1390011f, 0x13900125, + // Entry E0 - FF + 0x13900129, 0x1390012c, 0x1390012d, 0x1390012f, + 0x13900131, 0x13900133, 0x13900135, 0x13900139, + 0x1390013c, 0x1390013d, 0x1390013f, 0x13900142, + 0x13900161, 0x13900162, 0x13900164, 0x13c00000, + 0x13c00001, 0x13e00000, 0x13e0001f, 0x13e0002c, + 0x13e0003f, 0x13e00041, 0x13e00048, 0x13e00051, + 0x13e00054, 0x13e00056, 0x13e00059, 0x13e00065, + 0x13e00068, 0x13e00069, 0x13e0006e, 0x13e00086, + // Entry 100 - 11F + 0x13e00089, 0x13e0008f, 0x13e00094, 0x13e000cf, + 0x13e000d8, 0x13e000e2, 0x13e000e4, 0x13e000e7, + 0x13e000ec, 0x13e000f1, 0x13e0011a, 0x13e00135, + 0x13e00136, 0x13e0013b, 0x14000000, 0x1400006a, + 0x14500000, 0x1450006e, 0x14600000, 0x14600052, + 0x14800000, 0x14800024, 0x1480009c, 0x14e00000, + 0x14e00052, 0x14e00084, 0x14e000c9, 0x14e00114, + 0x15100000, 0x15100072, 0x15300000, 0x153000e7, + // Entry 120 - 13F + 0x15800000, 0x15800063, 0x15800076, 0x15e00000, + 0x15e00036, 0x15e00037, 0x15e0003a, 0x15e0003b, + 0x15e0003c, 0x15e00049, 0x15e0004b, 0x15e0004c, + 0x15e0004d, 0x15e0004e, 0x15e0004f, 0x15e00052, + 0x15e00062, 0x15e00067, 0x15e00078, 0x15e0007a, + 0x15e0007e, 0x15e00084, 0x15e00085, 0x15e00086, + 0x15e00091, 0x15e000a8, 0x15e000b7, 0x15e000ba, + 0x15e000bb, 0x15e000be, 0x15e000bf, 0x15e000c3, + // Entry 140 - 15F + 0x15e000c8, 0x15e000c9, 0x15e000cc, 0x15e000d3, + 0x15e000d4, 0x15e000e5, 0x15e000ea, 0x15e00102, + 0x15e00107, 0x15e0010a, 0x15e00114, 0x15e0011c, + 0x15e00120, 0x15e00122, 0x15e00128, 0x15e0013f, + 0x15e00140, 0x15e0015f, 0x16900000, 0x1690009e, + 0x16d00000, 0x16d000d9, 0x16e00000, 0x16e00096, + 0x17e00000, 0x17e0007b, 0x19000000, 0x1900006e, + 0x1a300000, 0x1a30004e, 0x1a300078, 0x1a3000b2, + // Entry 160 - 17F + 0x1a400000, 0x1a400099, 0x1a900000, 0x1ab00000, + 0x1ab000a4, 0x1ac00000, 0x1ac00098, 0x1b400000, + 0x1b400080, 0x1b4000d4, 0x1b4000d6, 0x1b800000, + 0x1b800135, 0x1bc00000, 0x1bc00097, 0x1be00000, + 0x1be00099, 0x1d100000, 0x1d100033, 0x1d100090, + 0x1d200000, 0x1d200060, 0x1d500000, 0x1d500092, + 0x1d700000, 0x1d700028, 0x1e100000, 0x1e100095, + 0x1e700000, 0x1e7000d6, 0x1ea00000, 0x1ea00053, + // Entry 180 - 19F + 0x1f300000, 0x1f500000, 0x1f800000, 0x1f80009d, + 0x1f900000, 0x1f90004e, 0x1f90009e, 0x1f900113, + 0x1f900138, 0x1fa00000, 0x1fb00000, 0x20000000, + 0x200000a2, 0x20300000, 0x20700000, 0x20700052, + 0x20800000, 0x20a00000, 0x20a0012f, 0x20e00000, + 0x20f00000, 0x21000000, 0x2100007d, 0x21200000, + 0x21200067, 0x21600000, 0x21700000, 0x217000a4, + 0x21f00000, 0x22300000, 0x2230012f, 0x22700000, + // Entry 1A0 - 1BF + 0x2270005a, 0x23400000, 0x234000c3, 0x23900000, + 0x239000a4, 0x24200000, 0x242000ae, 0x24400000, + 0x24400052, 0x24500000, 0x24500082, 0x24600000, + 0x246000a4, 0x24a00000, 0x24a000a6, 0x25100000, + 0x25100099, 0x25400000, 0x254000aa, 0x254000ab, + 0x25600000, 0x25600099, 0x26a00000, 0x26a00099, + 0x26b00000, 0x26b0012f, 0x26d00000, 0x26d00052, + 0x26e00000, 0x26e00060, 0x27400000, 0x28100000, + // Entry 1C0 - 1DF + 0x2810007b, 0x28a00000, 0x28a000a5, 0x29100000, + 0x2910012f, 0x29500000, 0x295000b7, 0x2a300000, + 0x2a300131, 0x2af00000, 0x2af00135, 0x2b500000, + 0x2b50002a, 0x2b50004b, 0x2b50004c, 0x2b50004d, + 0x2b800000, 0x2b8000af, 0x2bf00000, 0x2bf0009b, + 0x2bf0009c, 0x2c000000, 0x2c0000b6, 0x2c200000, + 0x2c20004b, 0x2c400000, 0x2c4000a4, 0x2c500000, + 0x2c5000a4, 0x2c700000, 0x2c7000b8, 0x2d100000, + // Entry 1E0 - 1FF + 0x2d1000a4, 0x2d10012f, 0x2e900000, 0x2e9000a4, + 0x2ed00000, 0x2ed000cc, 0x2f100000, 0x2f1000bf, + 0x2f200000, 0x2f2000d1, 0x2f400000, 0x2f400052, + 0x2ff00000, 0x2ff000c2, 0x30400000, 0x30400099, + 0x30b00000, 0x30b000c5, 0x31000000, 0x31b00000, + 0x31b00099, 0x31f00000, 0x31f0003e, 0x31f000d0, + 0x31f0010d, 0x32000000, 0x320000cb, 0x32500000, + 0x32500052, 0x33100000, 0x331000c4, 0x33a00000, + // Entry 200 - 21F + 0x33a0009c, 0x34100000, 0x34500000, 0x345000d2, + 0x34700000, 0x347000da, 0x34700110, 0x34e00000, + 0x34e00164, 0x35000000, 0x35000060, 0x350000d9, + 0x35100000, 0x35100099, 0x351000db, 0x36700000, + 0x36700030, 0x36700036, 0x36700040, 0x3670005b, + 0x367000d9, 0x36700116, 0x3670011b, 0x36800000, + 0x36800052, 0x36a00000, 0x36a000da, 0x36c00000, + 0x36c00052, 0x36f00000, 0x37500000, 0x37600000, + // Entry 220 - 23F + 0x37a00000, 0x38000000, 0x38000117, 0x38700000, + 0x38900000, 0x38900131, 0x39000000, 0x3900006f, + 0x390000a4, 0x39500000, 0x39500099, 0x39800000, + 0x3980007d, 0x39800106, 0x39d00000, 0x39d05000, + 0x39d050e8, 0x39d36000, 0x39d36099, 0x3a100000, + 0x3b300000, 0x3b3000e9, 0x3bd00000, 0x3bd00001, + 0x3be00000, 0x3be00024, 0x3c000000, 0x3c00002a, + 0x3c000041, 0x3c00004e, 0x3c00005a, 0x3c000086, + // Entry 240 - 25F + 0x3c00008b, 0x3c0000b7, 0x3c0000c6, 0x3c0000d1, + 0x3c0000ee, 0x3c000118, 0x3c000126, 0x3c400000, + 0x3c40003f, 0x3c400069, 0x3c4000e4, 0x3d400000, + 0x3d40004e, 0x3d900000, 0x3d90003a, 0x3dc00000, + 0x3dc000bc, 0x3dc00104, 0x3de00000, 0x3de0012f, + 0x3e200000, 0x3e200047, 0x3e2000a5, 0x3e2000ae, + 0x3e2000bc, 0x3e200106, 0x3e200130, 0x3e500000, + 0x3e500107, 0x3e600000, 0x3e60012f, 0x3eb00000, + // Entry 260 - 27F + 0x3eb00106, 0x3ec00000, 0x3ec000a4, 0x3f300000, + 0x3f30012f, 0x3fa00000, 0x3fa000e8, 0x3fc00000, + 0x3fd00000, 0x3fd00072, 0x3fd000da, 0x3fd0010c, + 0x3ff00000, 0x3ff000d1, 0x40100000, 0x401000c3, + 0x40200000, 0x4020004c, 0x40700000, 0x40800000, + 0x4085a000, 0x4085a0ba, 0x408e8000, 0x408e80ba, + 0x40c00000, 0x40c000b3, 0x41200000, 0x41200111, + 0x41600000, 0x4160010f, 0x41c00000, 0x41d00000, + // Entry 280 - 29F + 0x41e00000, 0x41f00000, 0x41f00072, 0x42200000, + 0x42300000, 0x42300164, 0x42900000, 0x42900062, + 0x4290006f, 0x429000a4, 0x42900115, 0x43100000, + 0x43100027, 0x431000c2, 0x4310014d, 0x43200000, + 0x43220000, 0x43220033, 0x432200bd, 0x43220105, + 0x4322014d, 0x4325a000, 0x4325a033, 0x4325a0bd, + 0x4325a105, 0x4325a14d, 0x43700000, 0x43a00000, + 0x43b00000, 0x44400000, 0x44400031, 0x44400072, + // Entry 2A0 - 2BF + 0x4440010c, 0x44500000, 0x4450004b, 0x445000a4, + 0x4450012f, 0x44500131, 0x44e00000, 0x45000000, + 0x45000099, 0x450000b3, 0x450000d0, 0x4500010d, + 0x46100000, 0x46100099, 0x46400000, 0x464000a4, + 0x46400131, 0x46700000, 0x46700124, 0x46b00000, + 0x46b00123, 0x46f00000, 0x46f0006d, 0x46f0006f, + 0x47100000, 0x47600000, 0x47600127, 0x47a00000, + 0x48000000, 0x48200000, 0x48200129, 0x48a00000, + // Entry 2C0 - 2DF + 0x48a0005d, 0x48a0012b, 0x48e00000, 0x49400000, + 0x49400106, 0x4a400000, 0x4a4000d4, 0x4a900000, + 0x4a9000ba, 0x4ac00000, 0x4ac00053, 0x4ae00000, + 0x4ae00130, 0x4b400000, 0x4b400099, 0x4b4000e8, + 0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000, + 0x4bc20137, 0x4bc5a000, 0x4bc5a137, 0x4be00000, + 0x4be5a000, 0x4be5a0b4, 0x4bef1000, 0x4bef10b4, + 0x4c000000, 0x4c300000, 0x4c30013e, 0x4c900000, + // Entry 2E0 - 2FF + 0x4c900001, 0x4cc00000, 0x4cc0012f, 0x4ce00000, + 0x4cf00000, 0x4cf0004e, 0x4e500000, 0x4e500114, + 0x4f200000, 0x4fb00000, 0x4fb00131, 0x50900000, + 0x50900052, 0x51200000, 0x51200001, 0x51800000, + 0x5180003b, 0x518000d6, 0x51f00000, 0x51f3b000, + 0x51f3b053, 0x51f3c000, 0x51f3c08d, 0x52800000, + 0x528000ba, 0x52900000, 0x5293b000, 0x5293b053, + 0x5293b08d, 0x5293b0c6, 0x5293b10d, 0x5293c000, + // Entry 300 - 31F + 0x5293c08d, 0x5293c0c6, 0x5293c12e, 0x52f00000, + 0x52f00161, +} // Size: 3116 bytes + +const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix" + +// Total table size 3147 bytes (3KiB); checksum: 6772C83C diff --git a/vendor/golang.org/x/text/internal/language/compact/tags.go b/vendor/golang.org/x/text/internal/language/compact/tags.go new file mode 100644 index 00000000000..ca135d295ae --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/tags.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package compact + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag{language: afIndex, locale: afIndex} + Amharic Tag = Tag{language: amIndex, locale: amIndex} + Arabic Tag = Tag{language: arIndex, locale: arIndex} + ModernStandardArabic Tag = Tag{language: ar001Index, locale: ar001Index} + Azerbaijani Tag = Tag{language: azIndex, locale: azIndex} + Bulgarian Tag = Tag{language: bgIndex, locale: bgIndex} + Bengali Tag = Tag{language: bnIndex, locale: bnIndex} + Catalan Tag = Tag{language: caIndex, locale: caIndex} + Czech Tag = Tag{language: csIndex, locale: csIndex} + Danish Tag = Tag{language: daIndex, locale: daIndex} + German Tag = Tag{language: deIndex, locale: deIndex} + Greek Tag = Tag{language: elIndex, locale: elIndex} + English Tag = Tag{language: enIndex, locale: enIndex} + AmericanEnglish Tag = Tag{language: enUSIndex, locale: enUSIndex} + BritishEnglish Tag = Tag{language: enGBIndex, locale: enGBIndex} + Spanish Tag = Tag{language: esIndex, locale: esIndex} + EuropeanSpanish Tag = Tag{language: esESIndex, locale: esESIndex} + LatinAmericanSpanish Tag = Tag{language: es419Index, locale: es419Index} + Estonian Tag = Tag{language: etIndex, locale: etIndex} + Persian Tag = Tag{language: faIndex, locale: faIndex} + Finnish Tag = Tag{language: fiIndex, locale: fiIndex} + Filipino Tag = Tag{language: filIndex, locale: filIndex} + French Tag = Tag{language: frIndex, locale: frIndex} + CanadianFrench Tag = Tag{language: frCAIndex, locale: frCAIndex} + Gujarati Tag = Tag{language: guIndex, locale: guIndex} + Hebrew Tag = Tag{language: heIndex, locale: heIndex} + Hindi Tag = Tag{language: hiIndex, locale: hiIndex} + Croatian Tag = Tag{language: hrIndex, locale: hrIndex} + Hungarian Tag = Tag{language: huIndex, locale: huIndex} + Armenian Tag = Tag{language: hyIndex, locale: hyIndex} + Indonesian Tag = Tag{language: idIndex, locale: idIndex} + Icelandic Tag = Tag{language: isIndex, locale: isIndex} + Italian Tag = Tag{language: itIndex, locale: itIndex} + Japanese Tag = Tag{language: jaIndex, locale: jaIndex} + Georgian Tag = Tag{language: kaIndex, locale: kaIndex} + Kazakh Tag = Tag{language: kkIndex, locale: kkIndex} + Khmer Tag = Tag{language: kmIndex, locale: kmIndex} + Kannada Tag = Tag{language: knIndex, locale: knIndex} + Korean Tag = Tag{language: koIndex, locale: koIndex} + Kirghiz Tag = Tag{language: kyIndex, locale: kyIndex} + Lao Tag = Tag{language: loIndex, locale: loIndex} + Lithuanian Tag = Tag{language: ltIndex, locale: ltIndex} + Latvian Tag = Tag{language: lvIndex, locale: lvIndex} + Macedonian Tag = Tag{language: mkIndex, locale: mkIndex} + Malayalam Tag = Tag{language: mlIndex, locale: mlIndex} + Mongolian Tag = Tag{language: mnIndex, locale: mnIndex} + Marathi Tag = Tag{language: mrIndex, locale: mrIndex} + Malay Tag = Tag{language: msIndex, locale: msIndex} + Burmese Tag = Tag{language: myIndex, locale: myIndex} + Nepali Tag = Tag{language: neIndex, locale: neIndex} + Dutch Tag = Tag{language: nlIndex, locale: nlIndex} + Norwegian Tag = Tag{language: noIndex, locale: noIndex} + Punjabi Tag = Tag{language: paIndex, locale: paIndex} + Polish Tag = Tag{language: plIndex, locale: plIndex} + Portuguese Tag = Tag{language: ptIndex, locale: ptIndex} + BrazilianPortuguese Tag = Tag{language: ptBRIndex, locale: ptBRIndex} + EuropeanPortuguese Tag = Tag{language: ptPTIndex, locale: ptPTIndex} + Romanian Tag = Tag{language: roIndex, locale: roIndex} + Russian Tag = Tag{language: ruIndex, locale: ruIndex} + Sinhala Tag = Tag{language: siIndex, locale: siIndex} + Slovak Tag = Tag{language: skIndex, locale: skIndex} + Slovenian Tag = Tag{language: slIndex, locale: slIndex} + Albanian Tag = Tag{language: sqIndex, locale: sqIndex} + Serbian Tag = Tag{language: srIndex, locale: srIndex} + SerbianLatin Tag = Tag{language: srLatnIndex, locale: srLatnIndex} + Swedish Tag = Tag{language: svIndex, locale: svIndex} + Swahili Tag = Tag{language: swIndex, locale: swIndex} + Tamil Tag = Tag{language: taIndex, locale: taIndex} + Telugu Tag = Tag{language: teIndex, locale: teIndex} + Thai Tag = Tag{language: thIndex, locale: thIndex} + Turkish Tag = Tag{language: trIndex, locale: trIndex} + Ukrainian Tag = Tag{language: ukIndex, locale: ukIndex} + Urdu Tag = Tag{language: urIndex, locale: urIndex} + Uzbek Tag = Tag{language: uzIndex, locale: uzIndex} + Vietnamese Tag = Tag{language: viIndex, locale: viIndex} + Chinese Tag = Tag{language: zhIndex, locale: zhIndex} + SimplifiedChinese Tag = Tag{language: zhHansIndex, locale: zhHansIndex} + TraditionalChinese Tag = Tag{language: zhHantIndex, locale: zhHantIndex} + Zulu Tag = Tag{language: zuIndex, locale: zuIndex} +) diff --git a/vendor/golang.org/x/text/internal/language/compose.go b/vendor/golang.org/x/text/internal/language/compose.go new file mode 100644 index 00000000000..4ae78e0fa5f --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compose.go @@ -0,0 +1,167 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "sort" + "strings" +) + +// A Builder allows constructing a Tag from individual components. +// Its main user is Compose in the top-level language package. +type Builder struct { + Tag Tag + + private string // the x extension + variants []string + extensions []string +} + +// Make returns a new Tag from the current settings. +func (b *Builder) Make() Tag { + t := b.Tag + + if len(b.extensions) > 0 || len(b.variants) > 0 { + sort.Sort(sortVariants(b.variants)) + sort.Strings(b.extensions) + + if b.private != "" { + b.extensions = append(b.extensions, b.private) + } + n := maxCoreSize + tokenLen(b.variants...) + tokenLen(b.extensions...) + buf := make([]byte, n) + p := t.genCoreBytes(buf) + t.pVariant = byte(p) + p += appendTokens(buf[p:], b.variants...) + t.pExt = uint16(p) + p += appendTokens(buf[p:], b.extensions...) + t.str = string(buf[:p]) + // We may not always need to remake the string, but when or when not + // to do so is rather tricky. + scan := makeScanner(buf[:p]) + t, _ = parse(&scan, "") + return t + + } else if b.private != "" { + t.str = b.private + t.RemakeString() + } + return t +} + +// SetTag copies all the settings from a given Tag. Any previously set values +// are discarded. +func (b *Builder) SetTag(t Tag) { + b.Tag.LangID = t.LangID + b.Tag.RegionID = t.RegionID + b.Tag.ScriptID = t.ScriptID + // TODO: optimize + b.variants = b.variants[:0] + if variants := t.Variants(); variants != "" { + for _, vr := range strings.Split(variants[1:], "-") { + b.variants = append(b.variants, vr) + } + } + b.extensions, b.private = b.extensions[:0], "" + for _, e := range t.Extensions() { + b.AddExt(e) + } +} + +// AddExt adds extension e to the tag. e must be a valid extension as returned +// by Tag.Extension. If the extension already exists, it will be discarded, +// except for a -u extension, where non-existing key-type pairs will added. +func (b *Builder) AddExt(e string) { + if e[0] == 'x' { + if b.private == "" { + b.private = e + } + return + } + for i, s := range b.extensions { + if s[0] == e[0] { + if e[0] == 'u' { + b.extensions[i] += e[1:] + } + return + } + } + b.extensions = append(b.extensions, e) +} + +// SetExt sets the extension e to the tag. e must be a valid extension as +// returned by Tag.Extension. If the extension already exists, it will be +// overwritten, except for a -u extension, where the individual key-type pairs +// will be set. +func (b *Builder) SetExt(e string) { + if e[0] == 'x' { + b.private = e + return + } + for i, s := range b.extensions { + if s[0] == e[0] { + if e[0] == 'u' { + b.extensions[i] = e + s[1:] + } else { + b.extensions[i] = e + } + return + } + } + b.extensions = append(b.extensions, e) +} + +// AddVariant adds any number of variants. +func (b *Builder) AddVariant(v ...string) { + for _, v := range v { + if v != "" { + b.variants = append(b.variants, v) + } + } +} + +// ClearVariants removes any variants previously added, including those +// copied from a Tag in SetTag. +func (b *Builder) ClearVariants() { + b.variants = b.variants[:0] +} + +// ClearExtensions removes any extensions previously added, including those +// copied from a Tag in SetTag. +func (b *Builder) ClearExtensions() { + b.private = "" + b.extensions = b.extensions[:0] +} + +func tokenLen(token ...string) (n int) { + for _, t := range token { + n += len(t) + 1 + } + return +} + +func appendTokens(b []byte, token ...string) int { + p := 0 + for _, t := range token { + b[p] = '-' + copy(b[p+1:], t) + p += 1 + len(t) + } + return p +} + +type sortVariants []string + +func (s sortVariants) Len() int { + return len(s) +} + +func (s sortVariants) Swap(i, j int) { + s[j], s[i] = s[i], s[j] +} + +func (s sortVariants) Less(i, j int) bool { + return variantIndex[s[i]] < variantIndex[s[j]] +} diff --git a/vendor/golang.org/x/text/internal/language/coverage.go b/vendor/golang.org/x/text/internal/language/coverage.go new file mode 100644 index 00000000000..9b20b88feb8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/coverage.go @@ -0,0 +1,28 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func BaseLanguages() []Language { + base := make([]Language, 0, NumLanguages) + for i := 0; i < langNoIndexOffset; i++ { + // We included "und" already for the value 0. + if i != nonCanonicalUnd { + base = append(base, Language(i)) + } + } + i := langNoIndexOffset + for _, v := range langNoIndex { + for k := 0; k < 8; k++ { + if v&1 == 1 { + base = append(base, Language(i)) + } + v >>= 1 + i++ + } + } + return base +} diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go new file mode 100644 index 00000000000..6105bc7fadc --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go -output tables.go + +package language // import "golang.org/x/text/internal/language" + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // maxCoreSize is the maximum size of a BCP 47 tag without variants and + // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes. + maxCoreSize = 12 + + // max99thPercentileSize is a somewhat arbitrary buffer size that presumably + // is large enough to hold at least 99% of the BCP 47 tags. + max99thPercentileSize = 32 + + // maxSimpleUExtensionSize is the maximum size of a -u extension with one + // key-type pair. Equals len("-u-") + key (2) + dash + max value (8). + maxSimpleUExtensionSize = 14 +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. The zero value of Tag is Und. +type Tag struct { + // TODO: the following fields have the form TagTypeID. This name is chosen + // to allow refactoring the public package without conflicting with its + // Base, Script, and Region methods. Once the transition is fully completed + // the ID can be stripped from the name. + + LangID Language + RegionID Region + // TODO: we will soon run out of positions for ScriptID. Idea: instead of + // storing lang, region, and ScriptID codes, store only the compact index and + // have a lookup table from this code to its expansion. This greatly speeds + // up table lookup, speed up common variant cases. + // This will also immediately free up 3 extra bytes. Also, the pVariant + // field can now be moved to the lookup table, as the compact index uniquely + // determines the offset of a possible variant. + ScriptID Script + pVariant byte // offset in str, includes preceding '-' + pExt uint16 // offset of first extension, includes preceding '-' + + // str is the string representation of the Tag. It will only be used if the + // tag has variants or extensions. + str string +} + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + t, _ := Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +// TODO: consider removing +func (t Tag) Raw() (b Language, s Script, r Region) { + return t.LangID, t.ScriptID, t.RegionID +} + +// equalTags compares language, script and region subtags only. +func (t Tag) equalTags(a Tag) bool { + return t.LangID == a.LangID && t.ScriptID == a.ScriptID && t.RegionID == a.RegionID +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if int(t.pVariant) < len(t.str) { + return false + } + return t.equalTags(Und) +} + +// IsPrivateUse reports whether the Tag consists solely of an IsPrivateUse use +// tag. +func (t Tag) IsPrivateUse() bool { + return t.str != "" && t.pVariant == 0 +} + +// RemakeString is used to update t.str in case lang, script or region changed. +// It is assumed that pExt and pVariant still point to the start of the +// respective parts. +func (t *Tag) RemakeString() { + if t.str == "" { + return + } + extra := t.str[t.pVariant:] + if t.pVariant > 0 { + extra = extra[1:] + } + if t.equalTags(Und) && strings.HasPrefix(extra, "x-") { + t.str = extra + t.pVariant = 0 + t.pExt = 0 + return + } + var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases. + b := buf[:t.genCoreBytes(buf[:])] + if extra != "" { + diff := len(b) - int(t.pVariant) + b = append(b, '-') + b = append(b, extra...) + t.pVariant = uint8(int(t.pVariant) + diff) + t.pExt = uint16(int(t.pExt) + diff) + } else { + t.pVariant = uint8(len(b)) + t.pExt = uint16(len(b)) + } + t.str = string(b) +} + +// genCoreBytes writes a string for the base languages, script and region tags +// to the given buffer and returns the number of bytes written. It will never +// write more than maxCoreSize bytes. +func (t *Tag) genCoreBytes(buf []byte) int { + n := t.LangID.StringToBuf(buf[:]) + if t.ScriptID != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.ScriptID.String()) + } + if t.RegionID != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.RegionID.String()) + } + return n +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + if t.str != "" { + return t.str + } + if t.ScriptID == 0 && t.RegionID == 0 { + return t.LangID.String() + } + buf := [maxCoreSize]byte{} + return string(buf[:t.genCoreBytes(buf[:])]) +} + +// MarshalText implements encoding.TextMarshaler. +func (t Tag) MarshalText() (text []byte, err error) { + if t.str != "" { + text = append(text, t.str...) + } else if t.ScriptID == 0 && t.RegionID == 0 { + text = append(text, t.LangID.String()...) + } else { + buf := [maxCoreSize]byte{} + text = buf[:t.genCoreBytes(buf[:])] + } + return text, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (t *Tag) UnmarshalText(text []byte) error { + tag, err := Parse(string(text)) + *t = tag + return err +} + +// Variants returns the part of the tag holding all variants or the empty string +// if there are no variants defined. +func (t Tag) Variants() string { + if t.pVariant == 0 { + return "" + } + return t.str[t.pVariant:t.pExt] +} + +// VariantOrPrivateUseTags returns variants or private use tags. +func (t Tag) VariantOrPrivateUseTags() string { + if t.pExt > 0 { + return t.str[t.pVariant:t.pExt] + } + return t.str[t.pVariant:] +} + +// HasString reports whether this tag defines more than just the raw +// components. +func (t Tag) HasString() bool { + return t.str != "" +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.str != "" { + // Strip the variants and extensions. + b, s, r := t.Raw() + t = Tag{LangID: b, ScriptID: s, RegionID: r} + if t.RegionID == 0 && t.ScriptID != 0 && t.LangID != 0 { + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID == t.ScriptID { + return Tag{LangID: t.LangID} + } + } + return t + } + if t.LangID != 0 { + if t.RegionID != 0 { + maxScript := t.ScriptID + if maxScript == 0 { + max, _ := addTags(t) + maxScript = max.ScriptID + } + + for i := range parents { + if Language(parents[i].lang) == t.LangID && Script(parents[i].maxScript) == maxScript { + for _, r := range parents[i].fromRegion { + if Region(r) == t.RegionID { + return Tag{ + LangID: t.LangID, + ScriptID: Script(parents[i].script), + RegionID: Region(parents[i].toRegion), + } + } + } + } + } + + // Strip the script if it is the default one. + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID != maxScript { + return Tag{LangID: t.LangID, ScriptID: maxScript} + } + return Tag{LangID: t.LangID} + } else if t.ScriptID != 0 { + // The parent for an base-script pair with a non-default script is + // "und" instead of the base language. + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID != t.ScriptID { + return Und + } + return Tag{LangID: t.LangID} + } + } + return Und +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (ext string, err error) { + defer func() { + if recover() != nil { + ext = "" + err = ErrSyntax + } + }() + + scan := makeScannerString(s) + var end int + if n := len(scan.token); n != 1 { + return "", ErrSyntax + } + scan.toLower(0, len(scan.b)) + end = parseExtension(&scan) + if end != len(s) { + return "", ErrSyntax + } + return string(scan.b), nil +} + +// HasVariants reports whether t has variants. +func (t Tag) HasVariants() bool { + return uint16(t.pVariant) < t.pExt +} + +// HasExtensions reports whether t has extensions. +func (t Tag) HasExtensions() bool { + return int(t.pExt) < len(t.str) +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext string, ok bool) { + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + if ext[0] == x { + return ext, true + } + } + return "", false +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []string { + e := []string{} + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + e = append(e, ext) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. +func (t Tag) TypeForKey(key string) string { + if _, start, end, _ := t.findTypeForKey(key); end != start { + s := t.str[start:end] + if p := strings.IndexByte(s, '-'); p >= 0 { + s = s[:p] + } + return s + } + return "" +} + +var ( + errPrivateUse = errors.New("cannot set a key on a private use tag") + errInvalidArguments = errors.New("invalid key or type") +) + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + if t.IsPrivateUse() { + return t, errPrivateUse + } + if len(key) != 2 { + return t, errInvalidArguments + } + + // Remove the setting if value is "". + if value == "" { + start, sep, end, _ := t.findTypeForKey(key) + if start != sep { + // Remove a possible empty extension. + switch { + case t.str[start-2] != '-': // has previous elements. + case end == len(t.str), // end of string + end+2 < len(t.str) && t.str[end+2] == '-': // end of extension + start -= 2 + } + if start == int(t.pVariant) && end == len(t.str) { + t.str = "" + t.pVariant, t.pExt = 0, 0 + } else { + t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:]) + } + } + return t, nil + } + + if len(value) < 3 || len(value) > 8 { + return t, errInvalidArguments + } + + var ( + buf [maxCoreSize + maxSimpleUExtensionSize]byte + uStart int // start of the -u extension. + ) + + // Generate the tag string if needed. + if t.str == "" { + uStart = t.genCoreBytes(buf[:]) + buf[uStart] = '-' + uStart++ + } + + // Create new key-type pair and parse it to verify. + b := buf[uStart:] + copy(b, "u-") + copy(b[2:], key) + b[4] = '-' + b = b[:5+copy(b[5:], value)] + scan := makeScanner(b) + if parseExtensions(&scan); scan.err != nil { + return t, scan.err + } + + // Assemble the replacement string. + if t.str == "" { + t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1) + t.str = string(buf[:uStart+len(b)]) + } else { + s := t.str + start, sep, end, hasExt := t.findTypeForKey(key) + if start == sep { + if hasExt { + b = b[2:] + } + t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:]) + } else { + t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:]) + } + } + return t, nil +} + +// findKeyAndType returns the start and end position for the type corresponding +// to key or the point at which to insert the key-value pair if the type +// wasn't found. The hasExt return value reports whether an -u extension was present. +// Note: the extensions are typically very small and are likely to contain +// only one key-type pair. +func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { + p := int(t.pExt) + if len(key) != 2 || p == len(t.str) || p == 0 { + return p, p, p, false + } + s := t.str + + // Find the correct extension. + for p++; s[p] != 'u'; p++ { + if s[p] > 'u' { + p-- + return p, p, p, false + } + if p = nextExtension(s, p); p == len(s) { + return len(s), len(s), len(s), false + } + } + // Proceed to the hyphen following the extension name. + p++ + + // curKey is the key currently being processed. + curKey := "" + + // Iterate over keys until we get the end of a section. + for { + end = p + for p++; p < len(s) && s[p] != '-'; p++ { + } + n := p - end - 1 + if n <= 2 && curKey == key { + if sep < end { + sep++ + } + return start, sep, end, true + } + switch n { + case 0, // invalid string + 1: // next extension + return end, end, end, true + case 2: + // next key + curKey = s[end+1 : p] + if curKey > key { + return end, end, end, true + } + start = end + sep = p + } + } +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (l Language, err error) { + defer func() { + if recover() != nil { + l = 0 + err = ErrSyntax + } + }() + + if n := len(s); n < 2 || 3 < n { + return 0, ErrSyntax + } + var buf [3]byte + return getLangID(buf[:copy(buf[:], s)]) +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (scr Script, err error) { + defer func() { + if recover() != nil { + scr = 0 + err = ErrSyntax + } + }() + + if len(s) != 4 { + return 0, ErrSyntax + } + var buf [4]byte + return getScriptID(script, buf[:copy(buf[:], s)]) +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + return getRegionM49(r) +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (r Region, err error) { + defer func() { + if recover() != nil { + r = 0 + err = ErrSyntax + } + }() + + if n := len(s); n < 2 || 3 < n { + return 0, ErrSyntax + } + var buf [3]byte + return getRegionID(buf[:copy(buf[:], s)]) +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + if r == 0 || r.IsGroup() || r.IsPrivateUse() && r != _XK { + return false + } + return true +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + if r == 0 { + return false + } + return int(regionInclusion[r]) < len(regionContainment) +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + if r == c { + return true + } + g := regionInclusion[r] + if g >= nRegionGroups { + return false + } + m := regionContainment[g] + + d := regionInclusion[c] + b := regionInclusionBits[d] + + // A contained country may belong to multiple disjoint groups. Matching any + // of these indicates containment. If the contained region is a group, it + // must strictly be a subset. + if d >= nRegionGroups { + return b&m != 0 + } + return b&^m == 0 +} + +var errNoTLD = errors.New("language: region is not a valid ccTLD") + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the + // difference between ISO 3166-1 and IANA ccTLD. + if r == _GB { + r = _UK + } + if (r.typ() & ccTLD) == 0 { + return 0, errNoTLD + } + return r, nil +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + if cr := normRegion(r); cr != 0 { + return cr + } + return r +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + ID uint8 + str string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (v Variant, err error) { + defer func() { + if recover() != nil { + v = Variant{} + err = ErrSyntax + } + }() + + s = strings.ToLower(s) + if id, ok := variantIndex[s]; ok { + return Variant{id, s}, nil + } + return Variant{}, NewValueError([]byte(s)) +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.str +} diff --git a/vendor/golang.org/x/text/internal/language/lookup.go b/vendor/golang.org/x/text/internal/language/lookup.go new file mode 100644 index 00000000000..231b4fbdebf --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/lookup.go @@ -0,0 +1,412 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "golang.org/x/text/internal/tag" +) + +// findIndex tries to find the given tag in idx and returns a standardized error +// if it could not be found. +func findIndex(idx tag.Index, key []byte, form string) (index int, err error) { + if !tag.FixCase(form, key) { + return 0, ErrSyntax + } + i := idx.Index(key) + if i == -1 { + return 0, NewValueError(key) + } + return i, nil +} + +func searchUint(imap []uint16, key uint16) int { + return sort.Search(len(imap), func(i int) bool { + return imap[i] >= key + }) +} + +type Language uint16 + +// getLangID returns the langID of s if s is a canonical subtag +// or langUnknown if s is not a canonical subtag. +func getLangID(s []byte) (Language, error) { + if len(s) == 2 { + return getLangISO2(s) + } + return getLangISO3(s) +} + +// TODO language normalization as well as the AliasMaps could be moved to the +// higher level package, but it is a bit tricky to separate the generation. + +func (id Language) Canonicalize() (Language, AliasType) { + return normLang(id) +} + +// normLang returns the mapped langID of id according to mapping m. +func normLang(id Language) (Language, AliasType) { + k := sort.Search(len(AliasMap), func(i int) bool { + return AliasMap[i].From >= uint16(id) + }) + if k < len(AliasMap) && AliasMap[k].From == uint16(id) { + return Language(AliasMap[k].To), AliasTypes[k] + } + return id, AliasTypeUnknown +} + +// getLangISO2 returns the langID for the given 2-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO2(s []byte) (Language, error) { + if !tag.FixCase("zz", s) { + return 0, ErrSyntax + } + if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 { + return Language(i), nil + } + return 0, NewValueError(s) +} + +const base = 'z' - 'a' + 1 + +func strToInt(s []byte) uint { + v := uint(0) + for i := 0; i < len(s); i++ { + v *= base + v += uint(s[i] - 'a') + } + return v +} + +// converts the given integer to the original ASCII string passed to strToInt. +// len(s) must match the number of characters obtained. +func intToStr(v uint, s []byte) { + for i := len(s) - 1; i >= 0; i-- { + s[i] = byte(v%base) + 'a' + v /= base + } +} + +// getLangISO3 returns the langID for the given 3-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO3(s []byte) (Language, error) { + if tag.FixCase("und", s) { + // first try to match canonical 3-letter entries + for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) { + if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] { + // We treat "und" as special and always translate it to "unspecified". + // Note that ZZ and Zzzz are private use and are not treated as + // unspecified by default. + id := Language(i) + if id == nonCanonicalUnd { + return 0, nil + } + return id, nil + } + } + if i := altLangISO3.Index(s); i != -1 { + return Language(altLangIndex[altLangISO3.Elem(i)[3]]), nil + } + n := strToInt(s) + if langNoIndex[n/8]&(1<<(n%8)) != 0 { + return Language(n) + langNoIndexOffset, nil + } + // Check for non-canonical uses of ISO3. + for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) { + if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] { + return Language(i), nil + } + } + return 0, NewValueError(s) + } + return 0, ErrSyntax +} + +// StringToBuf writes the string to b and returns the number of bytes +// written. cap(b) must be >= 3. +func (id Language) StringToBuf(b []byte) int { + if id >= langNoIndexOffset { + intToStr(uint(id)-langNoIndexOffset, b[:3]) + return 3 + } else if id == 0 { + return copy(b, "und") + } + l := lang[id<<2:] + if l[3] == 0 { + return copy(b, l[:3]) + } + return copy(b, l[:2]) +} + +// String returns the BCP 47 representation of the langID. +// Use b as variable name, instead of id, to ensure the variable +// used is consistent with that of Base in which this type is embedded. +func (b Language) String() string { + if b == 0 { + return "und" + } else if b >= langNoIndexOffset { + b -= langNoIndexOffset + buf := [3]byte{} + intToStr(uint(b), buf[:]) + return string(buf[:]) + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } + return l[:2] +} + +// ISO3 returns the ISO 639-3 language code. +func (b Language) ISO3() string { + if b == 0 || b >= langNoIndexOffset { + return b.String() + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } else if l[2] == 0 { + return altLangISO3.Elem(int(l[3]))[:3] + } + // This allocation will only happen for 3-letter ISO codes + // that are non-canonical BCP 47 language identifiers. + return l[0:1] + l[2:4] +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b Language) IsPrivateUse() bool { + return langPrivateStart <= b && b <= langPrivateEnd +} + +// SuppressScript returns the script marked as SuppressScript in the IANA +// language tag repository, or 0 if there is no such script. +func (b Language) SuppressScript() Script { + if b < langNoIndexOffset { + return Script(suppressScript[b]) + } + return 0 +} + +type Region uint16 + +// getRegionID returns the region id for s if s is a valid 2-letter region code +// or unknownRegion. +func getRegionID(s []byte) (Region, error) { + if len(s) == 3 { + if isAlpha(s[0]) { + return getRegionISO3(s) + } + if i, err := strconv.ParseUint(string(s), 10, 10); err == nil { + return getRegionM49(int(i)) + } + } + return getRegionISO2(s) +} + +// getRegionISO2 returns the regionID for the given 2-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO2(s []byte) (Region, error) { + i, err := findIndex(regionISO, s, "ZZ") + if err != nil { + return 0, err + } + return Region(i) + isoRegionOffset, nil +} + +// getRegionISO3 returns the regionID for the given 3-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO3(s []byte) (Region, error) { + if tag.FixCase("ZZZ", s) { + for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) { + if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] { + return Region(i) + isoRegionOffset, nil + } + } + for i := 0; i < len(altRegionISO3); i += 3 { + if tag.Compare(altRegionISO3[i:i+3], s) == 0 { + return Region(altRegionIDs[i/3]), nil + } + } + return 0, NewValueError(s) + } + return 0, ErrSyntax +} + +func getRegionM49(n int) (Region, error) { + if 0 < n && n <= 999 { + const ( + searchBits = 7 + regionBits = 9 + regionMask = 1<> searchBits + buf := fromM49[m49Index[idx]:m49Index[idx+1]] + val := uint16(n) << regionBits // we rely on bits shifting out + i := sort.Search(len(buf), func(i int) bool { + return buf[i] >= val + }) + if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val { + return Region(r & regionMask), nil + } + } + var e ValueError + fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n) + return 0, e +} + +// normRegion returns a region if r is deprecated or 0 otherwise. +// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ). +// TODO: consider mapping split up regions to new most populous one (like CLDR). +func normRegion(r Region) Region { + m := regionOldMap + k := sort.Search(len(m), func(i int) bool { + return m[i].From >= uint16(r) + }) + if k < len(m) && m[k].From == uint16(r) { + return Region(m[k].To) + } + return 0 +} + +const ( + iso3166UserAssigned = 1 << iota + ccTLD + bcp47Region +) + +func (r Region) typ() byte { + return regionTypes[r] +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r Region) String() string { + if r < isoRegionOffset { + if r == 0 { + return "ZZ" + } + return fmt.Sprintf("%03d", r.M49()) + } + r -= isoRegionOffset + return regionISO.Elem(int(r))[:2] +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r Region) ISO3() string { + if r < isoRegionOffset { + return "ZZZ" + } + r -= isoRegionOffset + reg := regionISO.Elem(int(r)) + switch reg[2] { + case 0: + return altRegionISO3[reg[3]:][:3] + case ' ': + return "ZZZ" + } + return reg[0:1] + reg[2:4] +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r Region) M49() int { + return int(m49[r]) +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r Region) IsPrivateUse() bool { + return r.typ()&iso3166UserAssigned != 0 +} + +type Script uint16 + +// getScriptID returns the script id for string s. It assumes that s +// is of the format [A-Z][a-z]{3}. +func getScriptID(idx tag.Index, s []byte) (Script, error) { + i, err := findIndex(idx, s, "Zzzz") + return Script(i), err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s Script) String() string { + if s == 0 { + return "Zzzz" + } + return script.Elem(int(s)) +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s Script) IsPrivateUse() bool { + return _Qaaa <= s && s <= _Qabx +} + +const ( + maxAltTaglen = len("en-US-POSIX") + maxLen = maxAltTaglen +) + +var ( + // grandfatheredMap holds a mapping from legacy and grandfathered tags to + // their base language or index to more elaborate tag. + grandfatheredMap = map[[maxLen]byte]int16{ + [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban + [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami + [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn + [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak + [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon + [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux + [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo + [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn + [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao + [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay + [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu + [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok + [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL + [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE + [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu + [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan + [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang + + // Grandfathered tags with no modern replacement will be converted as + // follows: + [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish + [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed + [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default + [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian + [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min + + // CLDR-specific tag. + [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root + [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX" + } + + altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102} + + altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix" +) + +func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) { + if v, ok := grandfatheredMap[s]; ok { + if v < 0 { + return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true + } + t.LangID = Language(v) + return t, true + } + return t, false +} diff --git a/vendor/golang.org/x/text/internal/language/match.go b/vendor/golang.org/x/text/internal/language/match.go new file mode 100644 index 00000000000..75a2dbca764 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/match.go @@ -0,0 +1,226 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "errors" + +type scriptRegionFlags uint8 + +const ( + isList = 1 << iota + scriptInFrom + regionInFrom +) + +func (t *Tag) setUndefinedLang(id Language) { + if t.LangID == 0 { + t.LangID = id + } +} + +func (t *Tag) setUndefinedScript(id Script) { + if t.ScriptID == 0 { + t.ScriptID = id + } +} + +func (t *Tag) setUndefinedRegion(id Region) { + if t.RegionID == 0 || t.RegionID.Contains(id) { + t.RegionID = id + } +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// addLikelySubtags sets subtags to their most likely value, given the locale. +// In most cases this means setting fields for unknown values, but in some +// cases it may alter a value. It returns an ErrMissingLikelyTagsData error +// if the given locale cannot be expanded. +func (t Tag) addLikelySubtags() (Tag, error) { + id, err := addTags(t) + if err != nil { + return t, err + } else if id.equalTags(t) { + return t, nil + } + id.RemakeString() + return id, nil +} + +// specializeRegion attempts to specialize a group region. +func specializeRegion(t *Tag) bool { + if i := regionInclusion[t.RegionID]; i < nRegionGroups { + x := likelyRegionGroup[i] + if Language(x.lang) == t.LangID && Script(x.script) == t.ScriptID { + t.RegionID = Region(x.region) + } + return true + } + return false +} + +// Maximize returns a new tag with missing tags filled in. +func (t Tag) Maximize() (Tag, error) { + return addTags(t) +} + +func addTags(t Tag) (Tag, error) { + // We leave private use identifiers alone. + if t.IsPrivateUse() { + return t, nil + } + if t.ScriptID != 0 && t.RegionID != 0 { + if t.LangID != 0 { + // already fully specified + specializeRegion(&t) + return t, nil + } + // Search matches for und-script-region. Note that for these cases + // region will never be a group so there is no need to check for this. + list := likelyRegion[t.RegionID : t.RegionID+1] + if x := list[0]; x.flags&isList != 0 { + list = likelyRegionList[x.lang : x.lang+uint16(x.script)] + } + for _, x := range list { + // Deviating from the spec. See match_test.go for details. + if Script(x.script) == t.ScriptID { + t.setUndefinedLang(Language(x.lang)) + return t, nil + } + } + } + if t.LangID != 0 { + // Search matches for lang-script and lang-region, where lang != und. + if t.LangID < langNoIndexOffset { + x := likelyLang[t.LangID] + if x.flags&isList != 0 { + list := likelyLangList[x.region : x.region+uint16(x.script)] + if t.ScriptID != 0 { + for _, x := range list { + if Script(x.script) == t.ScriptID && x.flags&scriptInFrom != 0 { + t.setUndefinedRegion(Region(x.region)) + return t, nil + } + } + } else if t.RegionID != 0 { + count := 0 + goodScript := true + tt := t + for _, x := range list { + // We visit all entries for which the script was not + // defined, including the ones where the region was not + // defined. This allows for proper disambiguation within + // regions. + if x.flags&scriptInFrom == 0 && t.RegionID.Contains(Region(x.region)) { + tt.RegionID = Region(x.region) + tt.setUndefinedScript(Script(x.script)) + goodScript = goodScript && tt.ScriptID == Script(x.script) + count++ + } + } + if count == 1 { + return tt, nil + } + // Even if we fail to find a unique Region, we might have + // an unambiguous script. + if goodScript { + t.ScriptID = tt.ScriptID + } + } + } + } + } else { + // Search matches for und-script. + if t.ScriptID != 0 { + x := likelyScript[t.ScriptID] + if x.region != 0 { + t.setUndefinedRegion(Region(x.region)) + t.setUndefinedLang(Language(x.lang)) + return t, nil + } + } + // Search matches for und-region. If und-script-region exists, it would + // have been found earlier. + if t.RegionID != 0 { + if i := regionInclusion[t.RegionID]; i < nRegionGroups { + x := likelyRegionGroup[i] + if x.region != 0 { + t.setUndefinedLang(Language(x.lang)) + t.setUndefinedScript(Script(x.script)) + t.RegionID = Region(x.region) + } + } else { + x := likelyRegion[t.RegionID] + if x.flags&isList != 0 { + x = likelyRegionList[x.lang] + } + if x.script != 0 && x.flags != scriptInFrom { + t.setUndefinedLang(Language(x.lang)) + t.setUndefinedScript(Script(x.script)) + return t, nil + } + } + } + } + + // Search matches for lang. + if t.LangID < langNoIndexOffset { + x := likelyLang[t.LangID] + if x.flags&isList != 0 { + x = likelyLangList[x.region] + } + if x.region != 0 { + t.setUndefinedScript(Script(x.script)) + t.setUndefinedRegion(Region(x.region)) + } + specializeRegion(&t) + if t.LangID == 0 { + t.LangID = _en // default language + } + return t, nil + } + return t, ErrMissingLikelyTagsData +} + +func (t *Tag) setTagsFrom(id Tag) { + t.LangID = id.LangID + t.ScriptID = id.ScriptID + t.RegionID = id.RegionID +} + +// minimize removes the region or script subtags from t such that +// t.addLikelySubtags() == t.minimize().addLikelySubtags(). +func (t Tag) minimize() (Tag, error) { + t, err := minimizeTags(t) + if err != nil { + return t, err + } + t.RemakeString() + return t, nil +} + +// minimizeTags mimics the behavior of the ICU 51 C implementation. +func minimizeTags(t Tag) (Tag, error) { + if t.equalTags(Und) { + return t, nil + } + max, err := addTags(t) + if err != nil { + return t, err + } + for _, id := range [...]Tag{ + {LangID: t.LangID}, + {LangID: t.LangID, RegionID: t.RegionID}, + {LangID: t.LangID, ScriptID: t.ScriptID}, + } { + if x, err := addTags(id); err == nil && max.equalTags(x) { + t.setTagsFrom(id) + break + } + } + return t, nil +} diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go new file mode 100644 index 00000000000..aad1e0acf77 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -0,0 +1,608 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "errors" + "fmt" + "sort" + + "golang.org/x/text/internal/tag" +) + +// isAlpha returns true if the byte is not a digit. +// b must be an ASCII letter or digit. +func isAlpha(b byte) bool { + return b > '9' +} + +// isAlphaNum returns true if the string contains only ASCII letters or digits. +func isAlphaNum(s []byte) bool { + for _, c := range s { + if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') { + return false + } + } + return true +} + +// ErrSyntax is returned by any of the parsing functions when the +// input is not well-formed, according to BCP 47. +// TODO: return the position at which the syntax error occurred? +var ErrSyntax = errors.New("language: tag is not well-formed") + +// ErrDuplicateKey is returned when a tag contains the same key twice with +// different values in the -u section. +var ErrDuplicateKey = errors.New("language: different values for same key in -u extension") + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError struct { + v [8]byte +} + +// NewValueError creates a new ValueError. +func NewValueError(tag []byte) ValueError { + var e ValueError + copy(e.v[:], tag) + return e +} + +func (e ValueError) tag() []byte { + n := bytes.IndexByte(e.v[:], 0) + if n == -1 { + n = 8 + } + return e.v[:n] +} + +// Error implements the error interface. +func (e ValueError) Error() string { + return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag()) +} + +// Subtag returns the subtag for which the error occurred. +func (e ValueError) Subtag() string { + return string(e.tag()) +} + +// scanner is used to scan BCP 47 tokens, which are separated by _ or -. +type scanner struct { + b []byte + bytes [max99thPercentileSize]byte + token []byte + start int // start position of the current token + end int // end position of the current token + next int // next point for scan + err error + done bool +} + +func makeScannerString(s string) scanner { + scan := scanner{} + if len(s) <= len(scan.bytes) { + scan.b = scan.bytes[:copy(scan.bytes[:], s)] + } else { + scan.b = []byte(s) + } + scan.init() + return scan +} + +// makeScanner returns a scanner using b as the input buffer. +// b is not copied and may be modified by the scanner routines. +func makeScanner(b []byte) scanner { + scan := scanner{b: b} + scan.init() + return scan +} + +func (s *scanner) init() { + for i, c := range s.b { + if c == '_' { + s.b[i] = '-' + } + } + s.scan() +} + +// restToLower converts the string between start and end to lower case. +func (s *scanner) toLower(start, end int) { + for i := start; i < end; i++ { + c := s.b[i] + if 'A' <= c && c <= 'Z' { + s.b[i] += 'a' - 'A' + } + } +} + +func (s *scanner) setError(e error) { + if s.err == nil || (e == ErrSyntax && s.err != ErrSyntax) { + s.err = e + } +} + +// resizeRange shrinks or grows the array at position oldStart such that +// a new string of size newSize can fit between oldStart and oldEnd. +// Sets the scan point to after the resized range. +func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { + s.start = oldStart + if end := oldStart + newSize; end != oldEnd { + diff := end - oldEnd + var b []byte + if n := len(s.b) + diff; n > cap(s.b) { + b = make([]byte, n) + copy(b, s.b[:oldStart]) + } else { + b = s.b[:n] + } + copy(b[end:], s.b[oldEnd:]) + s.b = b + s.next = end + (s.next - s.end) + s.end = end + } +} + +// replace replaces the current token with repl. +func (s *scanner) replace(repl string) { + s.resizeRange(s.start, s.end, len(repl)) + copy(s.b[s.start:], repl) +} + +// gobble removes the current token from the input. +// Caller must call scan after calling gobble. +func (s *scanner) gobble(e error) { + s.setError(e) + if s.start == 0 { + s.b = s.b[:+copy(s.b, s.b[s.next:])] + s.end = 0 + } else { + s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])] + s.end = s.start - 1 + } + s.next = s.start +} + +// deleteRange removes the given range from s.b before the current token. +func (s *scanner) deleteRange(start, end int) { + s.b = s.b[:start+copy(s.b[start:], s.b[end:])] + diff := end - start + s.next -= diff + s.start -= diff + s.end -= diff +} + +// scan parses the next token of a BCP 47 string. Tokens that are larger +// than 8 characters or include non-alphanumeric characters result in an error +// and are gobbled and removed from the output. +// It returns the end position of the last token consumed. +func (s *scanner) scan() (end int) { + end = s.end + s.token = nil + for s.start = s.next; s.next < len(s.b); { + i := bytes.IndexByte(s.b[s.next:], '-') + if i == -1 { + s.end = len(s.b) + s.next = len(s.b) + i = s.end - s.start + } else { + s.end = s.next + i + s.next = s.end + 1 + } + token := s.b[s.start:s.end] + if i < 1 || i > 8 || !isAlphaNum(token) { + s.gobble(ErrSyntax) + continue + } + s.token = token + return end + } + if n := len(s.b); n > 0 && s.b[n-1] == '-' { + s.setError(ErrSyntax) + s.b = s.b[:len(s.b)-1] + } + s.done = true + return end +} + +// acceptMinSize parses multiple tokens of the given size or greater. +// It returns the end position of the last token consumed. +func (s *scanner) acceptMinSize(min int) (end int) { + end = s.end + s.scan() + for ; len(s.token) >= min; s.scan() { + end = s.end + } + return end +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +func Parse(s string) (t Tag, err error) { + // TODO: consider supporting old-style locale key-value pairs. + if s == "" { + return Und, ErrSyntax + } + defer func() { + if recover() != nil { + t = Und + err = ErrSyntax + return + } + }() + if len(s) <= maxAltTaglen { + b := [maxAltTaglen]byte{} + for i, c := range s { + // Generating invalid UTF-8 is okay as it won't match. + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } else if c == '_' { + c = '-' + } + b[i] = byte(c) + } + if t, ok := grandfathered(b); ok { + return t, nil + } + } + scan := makeScannerString(s) + return parse(&scan, s) +} + +func parse(scan *scanner, s string) (t Tag, err error) { + t = Und + var end int + if n := len(scan.token); n <= 1 { + scan.toLower(0, len(scan.b)) + if n == 0 || scan.token[0] != 'x' { + return t, ErrSyntax + } + end = parseExtensions(scan) + } else if n >= 4 { + return Und, ErrSyntax + } else { // the usual case + t, end = parseTag(scan, true) + if n := len(scan.token); n == 1 { + t.pExt = uint16(end) + end = parseExtensions(scan) + } else if end < len(scan.b) { + scan.setError(ErrSyntax) + scan.b = scan.b[:end] + } + } + if int(t.pVariant) < len(scan.b) { + if end < len(s) { + s = s[:end] + } + if len(s) > 0 && tag.Compare(s, scan.b) == 0 { + t.str = s + } else { + t.str = string(scan.b) + } + } else { + t.pVariant, t.pExt = 0, 0 + } + return t, scan.err +} + +// parseTag parses language, script, region and variants. +// It returns a Tag and the end position in the input that was parsed. +// If doNorm is true, then - will be normalized to . +func parseTag(scan *scanner, doNorm bool) (t Tag, end int) { + var e error + // TODO: set an error if an unknown lang, script or region is encountered. + t.LangID, e = getLangID(scan.token) + scan.setError(e) + scan.replace(t.LangID.String()) + langStart := scan.start + end = scan.scan() + for len(scan.token) == 3 && isAlpha(scan.token[0]) { + // From http://tools.ietf.org/html/bcp47, - tags are equivalent + // to a tag of the form . + if doNorm { + lang, e := getLangID(scan.token) + if lang != 0 { + t.LangID = lang + langStr := lang.String() + copy(scan.b[langStart:], langStr) + scan.b[langStart+len(langStr)] = '-' + scan.start = langStart + len(langStr) + 1 + } + scan.gobble(e) + } + end = scan.scan() + } + if len(scan.token) == 4 && isAlpha(scan.token[0]) { + t.ScriptID, e = getScriptID(script, scan.token) + if t.ScriptID == 0 { + scan.gobble(e) + } + end = scan.scan() + } + if n := len(scan.token); n >= 2 && n <= 3 { + t.RegionID, e = getRegionID(scan.token) + if t.RegionID == 0 { + scan.gobble(e) + } else { + scan.replace(t.RegionID.String()) + } + end = scan.scan() + } + scan.toLower(scan.start, len(scan.b)) + t.pVariant = byte(end) + end = parseVariants(scan, end, t) + t.pExt = uint16(end) + return t, end +} + +var separator = []byte{'-'} + +// parseVariants scans tokens as long as each token is a valid variant string. +// Duplicate variants are removed. +func parseVariants(scan *scanner, end int, t Tag) int { + start := scan.start + varIDBuf := [4]uint8{} + variantBuf := [4][]byte{} + varID := varIDBuf[:0] + variant := variantBuf[:0] + last := -1 + needSort := false + for ; len(scan.token) >= 4; scan.scan() { + // TODO: measure the impact of needing this conversion and redesign + // the data structure if there is an issue. + v, ok := variantIndex[string(scan.token)] + if !ok { + // unknown variant + // TODO: allow user-defined variants? + scan.gobble(NewValueError(scan.token)) + continue + } + varID = append(varID, v) + variant = append(variant, scan.token) + if !needSort { + if last < int(v) { + last = int(v) + } else { + needSort = true + // There is no legal combinations of more than 7 variants + // (and this is by no means a useful sequence). + const maxVariants = 8 + if len(varID) > maxVariants { + break + } + } + } + end = scan.end + } + if needSort { + sort.Sort(variantsSort{varID, variant}) + k, l := 0, -1 + for i, v := range varID { + w := int(v) + if l == w { + // Remove duplicates. + continue + } + varID[k] = varID[i] + variant[k] = variant[i] + k++ + l = w + } + if str := bytes.Join(variant[:k], separator); len(str) == 0 { + end = start - 1 + } else { + scan.resizeRange(start, end, len(str)) + copy(scan.b[scan.start:], str) + end = scan.end + } + } + return end +} + +type variantsSort struct { + i []uint8 + v [][]byte +} + +func (s variantsSort) Len() int { + return len(s.i) +} + +func (s variantsSort) Swap(i, j int) { + s.i[i], s.i[j] = s.i[j], s.i[i] + s.v[i], s.v[j] = s.v[j], s.v[i] +} + +func (s variantsSort) Less(i, j int) bool { + return s.i[i] < s.i[j] +} + +type bytesSort struct { + b [][]byte + n int // first n bytes to compare +} + +func (b bytesSort) Len() int { + return len(b.b) +} + +func (b bytesSort) Swap(i, j int) { + b.b[i], b.b[j] = b.b[j], b.b[i] +} + +func (b bytesSort) Less(i, j int) bool { + for k := 0; k < b.n; k++ { + if b.b[i][k] == b.b[j][k] { + continue + } + return b.b[i][k] < b.b[j][k] + } + return false +} + +// parseExtensions parses and normalizes the extensions in the buffer. +// It returns the last position of scan.b that is part of any extension. +// It also trims scan.b to remove excess parts accordingly. +func parseExtensions(scan *scanner) int { + start := scan.start + exts := [][]byte{} + private := []byte{} + end := scan.end + for len(scan.token) == 1 { + extStart := scan.start + ext := scan.token[0] + end = parseExtension(scan) + extension := scan.b[extStart:end] + if len(extension) < 3 || (ext != 'x' && len(extension) < 4) { + scan.setError(ErrSyntax) + end = extStart + continue + } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) { + scan.b = scan.b[:end] + return end + } else if ext == 'x' { + private = extension + break + } + exts = append(exts, extension) + } + sort.Sort(bytesSort{exts, 1}) + if len(private) > 0 { + exts = append(exts, private) + } + scan.b = scan.b[:start] + if len(exts) > 0 { + scan.b = append(scan.b, bytes.Join(exts, separator)...) + } else if start > 0 { + // Strip trailing '-'. + scan.b = scan.b[:start-1] + } + return end +} + +// parseExtension parses a single extension and returns the position of +// the extension end. +func parseExtension(scan *scanner) int { + start, end := scan.start, scan.end + switch scan.token[0] { + case 'u': // https://www.ietf.org/rfc/rfc6067.txt + attrStart := end + scan.scan() + for last := []byte{}; len(scan.token) > 2; scan.scan() { + if bytes.Compare(scan.token, last) != -1 { + // Attributes are unsorted. Start over from scratch. + p := attrStart + 1 + scan.next = p + attrs := [][]byte{} + for scan.scan(); len(scan.token) > 2; scan.scan() { + attrs = append(attrs, scan.token) + end = scan.end + } + sort.Sort(bytesSort{attrs, 3}) + copy(scan.b[p:], bytes.Join(attrs, separator)) + break + } + last = scan.token + end = scan.end + } + // Scan key-type sequences. A key is of length 2 and may be followed + // by 0 or more "type" subtags from 3 to the maximum of 8 letters. + var last, key []byte + for attrEnd := end; len(scan.token) == 2; last = key { + key = scan.token + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } + // TODO: check key value validity + if bytes.Compare(key, last) != 1 || scan.err != nil { + // We have an invalid key or the keys are not sorted. + // Start scanning keys from scratch and reorder. + p := attrEnd + 1 + scan.next = p + keys := [][]byte{} + for scan.scan(); len(scan.token) == 2; { + keyStart := scan.start + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } + keys = append(keys, scan.b[keyStart:end]) + } + sort.Stable(bytesSort{keys, 2}) + if n := len(keys); n > 0 { + k := 0 + for i := 1; i < n; i++ { + if !bytes.Equal(keys[k][:2], keys[i][:2]) { + k++ + keys[k] = keys[i] + } else if !bytes.Equal(keys[k], keys[i]) { + scan.setError(ErrDuplicateKey) + } + } + keys = keys[:k+1] + } + reordered := bytes.Join(keys, separator) + if e := p + len(reordered); e < end { + scan.deleteRange(e, end) + end = e + } + copy(scan.b[p:], reordered) + break + } + } + case 't': // https://www.ietf.org/rfc/rfc6497.txt + scan.scan() + if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { + _, end = parseTag(scan, false) + scan.toLower(start, end) + } + for len(scan.token) == 2 && !isAlpha(scan.token[1]) { + end = scan.acceptMinSize(3) + } + case 'x': + end = scan.acceptMinSize(1) + default: + end = scan.acceptMinSize(2) + } + return end +} + +// getExtension returns the name, body and end position of the extension. +func getExtension(s string, p int) (end int, ext string) { + if s[p] == '-' { + p++ + } + if s[p] == 'x' { + return len(s), s[p:] + } + end = nextExtension(s, p) + return end, s[p:end] +} + +// nextExtension finds the next extension within the string, searching +// for the -- pattern from position p. +// In the fast majority of cases, language tags will have at most +// one extension and extensions tend to be small. +func nextExtension(s string, p int) int { + for n := len(s) - 3; p < n; { + if s[p] == '-' { + if s[p+2] == '-' { + return p + } + p += 3 + } else { + p++ + } + } + return len(s) +} diff --git a/vendor/golang.org/x/text/internal/language/tables.go b/vendor/golang.org/x/text/internal/language/tables.go new file mode 100644 index 00000000000..fb6b58378bd --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/tables.go @@ -0,0 +1,3472 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +import "golang.org/x/text/internal/tag" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +const NumLanguages = 8752 + +const NumScripts = 258 + +const NumRegions = 357 + +type FromTo struct { + From uint16 + To uint16 +} + +const nonCanonicalUnd = 1201 +const ( + _af = 22 + _am = 39 + _ar = 58 + _az = 88 + _bg = 126 + _bn = 165 + _ca = 215 + _cs = 250 + _da = 257 + _de = 269 + _el = 310 + _en = 313 + _es = 318 + _et = 320 + _fa = 328 + _fi = 337 + _fil = 339 + _fr = 350 + _gu = 420 + _he = 444 + _hi = 446 + _hr = 465 + _hu = 469 + _hy = 471 + _id = 481 + _is = 504 + _it = 505 + _ja = 512 + _ka = 528 + _kk = 578 + _km = 586 + _kn = 593 + _ko = 596 + _ky = 650 + _lo = 696 + _lt = 704 + _lv = 711 + _mk = 767 + _ml = 772 + _mn = 779 + _mo = 784 + _mr = 795 + _ms = 799 + _mul = 806 + _my = 817 + _nb = 839 + _ne = 849 + _nl = 871 + _no = 879 + _pa = 925 + _pl = 947 + _pt = 960 + _ro = 988 + _ru = 994 + _sh = 1031 + _si = 1036 + _sk = 1042 + _sl = 1046 + _sq = 1073 + _sr = 1074 + _sv = 1092 + _sw = 1093 + _ta = 1104 + _te = 1121 + _th = 1131 + _tl = 1146 + _tn = 1152 + _tr = 1162 + _uk = 1198 + _ur = 1204 + _uz = 1212 + _vi = 1219 + _zh = 1321 + _zu = 1327 + _jbo = 515 + _ami = 1650 + _bnn = 2357 + _hak = 438 + _tlh = 14467 + _lb = 661 + _nv = 899 + _pwn = 12055 + _tao = 14188 + _tay = 14198 + _tsu = 14662 + _nn = 874 + _sfb = 13629 + _vgt = 15701 + _sgg = 13660 + _cmn = 3007 + _nan = 835 + _hsn = 467 +) + +const langPrivateStart = 0x2f72 + +const langPrivateEnd = 0x3179 + +// lang holds an alphabetically sorted list of ISO-639 language identifiers. +// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +// For 2-byte language identifiers, the two successive bytes have the following meaning: +// - if the first letter of the 2- and 3-letter ISO codes are the same: +// the second and third letter of the 3-letter ISO code. +// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +// +// For 3-byte language identifiers the 4th byte is 0. +const lang tag.Index = "" + // Size: 5324 bytes + "---\x00aaaraai\x00aak\x00aau\x00abbkabi\x00abq\x00abr\x00abt\x00aby\x00a" + + "cd\x00ace\x00ach\x00ada\x00ade\x00adj\x00ady\x00adz\x00aeveaeb\x00aey" + + "\x00affragc\x00agd\x00agg\x00agm\x00ago\x00agq\x00aha\x00ahl\x00aho\x00a" + + "jg\x00akkaakk\x00ala\x00ali\x00aln\x00alt\x00ammhamm\x00amn\x00amo\x00am" + + "p\x00anrganc\x00ank\x00ann\x00any\x00aoj\x00aom\x00aoz\x00apc\x00apd\x00" + + "ape\x00apr\x00aps\x00apz\x00arraarc\x00arh\x00arn\x00aro\x00arq\x00ars" + + "\x00ary\x00arz\x00assmasa\x00ase\x00asg\x00aso\x00ast\x00ata\x00atg\x00a" + + "tj\x00auy\x00avvaavl\x00avn\x00avt\x00avu\x00awa\x00awb\x00awo\x00awx" + + "\x00ayymayb\x00azzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bav\x00bax\x00" + + "bba\x00bbb\x00bbc\x00bbd\x00bbj\x00bbp\x00bbr\x00bcf\x00bch\x00bci\x00bc" + + "m\x00bcn\x00bco\x00bcq\x00bcu\x00bdd\x00beelbef\x00beh\x00bej\x00bem\x00" + + "bet\x00bew\x00bex\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc\x00bgn" + + "\x00bgx\x00bhihbhb\x00bhg\x00bhi\x00bhk\x00bhl\x00bho\x00bhy\x00biisbib" + + "\x00big\x00bik\x00bim\x00bin\x00bio\x00biq\x00bjh\x00bji\x00bjj\x00bjn" + + "\x00bjo\x00bjr\x00bjt\x00bjz\x00bkc\x00bkm\x00bkq\x00bku\x00bkv\x00blt" + + "\x00bmambmh\x00bmk\x00bmq\x00bmu\x00bnenbng\x00bnm\x00bnp\x00boodboj\x00" + + "bom\x00bon\x00bpy\x00bqc\x00bqi\x00bqp\x00bqv\x00brrebra\x00brh\x00brx" + + "\x00brz\x00bsosbsj\x00bsq\x00bss\x00bst\x00bto\x00btt\x00btv\x00bua\x00b" + + "uc\x00bud\x00bug\x00buk\x00bum\x00buo\x00bus\x00buu\x00bvb\x00bwd\x00bwr" + + "\x00bxh\x00bye\x00byn\x00byr\x00bys\x00byv\x00byx\x00bza\x00bze\x00bzf" + + "\x00bzh\x00bzw\x00caatcan\x00cbj\x00cch\x00ccp\x00ceheceb\x00cfa\x00cgg" + + "\x00chhachk\x00chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00cjv\x00ckb\x00c" + + "kl\x00cko\x00cky\x00cla\x00cme\x00cmg\x00cooscop\x00cps\x00crrecrh\x00cr" + + "j\x00crk\x00crl\x00crm\x00crs\x00csescsb\x00csw\x00ctd\x00cuhucvhvcyymda" + + "andad\x00daf\x00dag\x00dah\x00dak\x00dar\x00dav\x00dbd\x00dbq\x00dcc\x00" + + "ddn\x00deeuded\x00den\x00dga\x00dgh\x00dgi\x00dgl\x00dgr\x00dgz\x00dia" + + "\x00dje\x00dnj\x00dob\x00doi\x00dop\x00dow\x00dri\x00drs\x00dsb\x00dtm" + + "\x00dtp\x00dts\x00dty\x00dua\x00duc\x00dud\x00dug\x00dvivdva\x00dww\x00d" + + "yo\x00dyu\x00dzzodzg\x00ebu\x00eeweefi\x00egl\x00egy\x00eka\x00eky\x00el" + + "llema\x00emi\x00enngenn\x00enq\x00eopoeri\x00es\x00\x05esu\x00etstetr" + + "\x00ett\x00etu\x00etx\x00euusewo\x00ext\x00faasfaa\x00fab\x00fag\x00fai" + + "\x00fan\x00ffulffi\x00ffm\x00fiinfia\x00fil\x00fit\x00fjijflr\x00fmp\x00" + + "foaofod\x00fon\x00for\x00fpe\x00fqs\x00frrafrc\x00frp\x00frr\x00frs\x00f" + + "ub\x00fud\x00fue\x00fuf\x00fuh\x00fuq\x00fur\x00fuv\x00fuy\x00fvr\x00fyr" + + "ygalegaa\x00gaf\x00gag\x00gah\x00gaj\x00gam\x00gan\x00gaw\x00gay\x00gba" + + "\x00gbf\x00gbm\x00gby\x00gbz\x00gcr\x00gdlagde\x00gdn\x00gdr\x00geb\x00g" + + "ej\x00gel\x00gez\x00gfk\x00ggn\x00ghs\x00gil\x00gim\x00gjk\x00gjn\x00gju" + + "\x00gkn\x00gkp\x00gllgglk\x00gmm\x00gmv\x00gnrngnd\x00gng\x00god\x00gof" + + "\x00goi\x00gom\x00gon\x00gor\x00gos\x00got\x00grb\x00grc\x00grt\x00grw" + + "\x00gsw\x00guujgub\x00guc\x00gud\x00gur\x00guw\x00gux\x00guz\x00gvlvgvf" + + "\x00gvr\x00gvs\x00gwc\x00gwi\x00gwt\x00gyi\x00haauhag\x00hak\x00ham\x00h" + + "aw\x00haz\x00hbb\x00hdy\x00heebhhy\x00hiinhia\x00hif\x00hig\x00hih\x00hi" + + "l\x00hla\x00hlu\x00hmd\x00hmt\x00hnd\x00hne\x00hnj\x00hnn\x00hno\x00homo" + + "hoc\x00hoj\x00hot\x00hrrvhsb\x00hsn\x00htathuunhui\x00hyyehzerianaian" + + "\x00iar\x00iba\x00ibb\x00iby\x00ica\x00ich\x00idndidd\x00idi\x00idu\x00i" + + "eleife\x00igboigb\x00ige\x00iiiiijj\x00ikpkikk\x00ikt\x00ikw\x00ikx\x00i" + + "lo\x00imo\x00inndinh\x00iodoiou\x00iri\x00isslittaiukuiw\x00\x03iwm\x00i" + + "ws\x00izh\x00izi\x00japnjab\x00jam\x00jbo\x00jbu\x00jen\x00jgk\x00jgo" + + "\x00ji\x00\x06jib\x00jmc\x00jml\x00jra\x00jut\x00jvavjwavkaatkaa\x00kab" + + "\x00kac\x00kad\x00kai\x00kaj\x00kam\x00kao\x00kbd\x00kbm\x00kbp\x00kbq" + + "\x00kbx\x00kby\x00kcg\x00kck\x00kcl\x00kct\x00kde\x00kdh\x00kdl\x00kdt" + + "\x00kea\x00ken\x00kez\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgf\x00kgp\x00k" + + "ha\x00khb\x00khn\x00khq\x00khs\x00kht\x00khw\x00khz\x00kiikkij\x00kiu" + + "\x00kiw\x00kjuakjd\x00kjg\x00kjs\x00kjy\x00kkazkkc\x00kkj\x00klalkln\x00" + + "klq\x00klt\x00klx\x00kmhmkmb\x00kmh\x00kmo\x00kms\x00kmu\x00kmw\x00knank" + + "nf\x00knp\x00koorkoi\x00kok\x00kol\x00kos\x00koz\x00kpe\x00kpf\x00kpo" + + "\x00kpr\x00kpx\x00kqb\x00kqf\x00kqs\x00kqy\x00kraukrc\x00kri\x00krj\x00k" + + "rl\x00krs\x00kru\x00ksasksb\x00ksd\x00ksf\x00ksh\x00ksj\x00ksr\x00ktb" + + "\x00ktm\x00kto\x00kuurkub\x00kud\x00kue\x00kuj\x00kum\x00kun\x00kup\x00k" + + "us\x00kvomkvg\x00kvr\x00kvx\x00kw\x00\x01kwj\x00kwo\x00kxa\x00kxc\x00kxm" + + "\x00kxp\x00kxw\x00kxz\x00kyirkye\x00kyx\x00kzr\x00laatlab\x00lad\x00lag" + + "\x00lah\x00laj\x00las\x00lbtzlbe\x00lbu\x00lbw\x00lcm\x00lcp\x00ldb\x00l" + + "ed\x00lee\x00lem\x00lep\x00leq\x00leu\x00lez\x00lguglgg\x00liimlia\x00li" + + "d\x00lif\x00lig\x00lih\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lle\x00lln" + + "\x00lmn\x00lmo\x00lmp\x00lninlns\x00lnu\x00loaoloj\x00lok\x00lol\x00lor" + + "\x00los\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy\x00luz\x00lvav" + + "lwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00man\x00mas\x00ma" + + "w\x00maz\x00mbh\x00mbo\x00mbq\x00mbu\x00mbw\x00mci\x00mcp\x00mcq\x00mcr" + + "\x00mcu\x00mda\x00mde\x00mdf\x00mdh\x00mdj\x00mdr\x00mdx\x00med\x00mee" + + "\x00mek\x00men\x00mer\x00met\x00meu\x00mfa\x00mfe\x00mfn\x00mfo\x00mfq" + + "\x00mglgmgh\x00mgl\x00mgo\x00mgp\x00mgy\x00mhahmhi\x00mhl\x00mirimif\x00" + + "min\x00mis\x00miw\x00mkkdmki\x00mkl\x00mkp\x00mkw\x00mlalmle\x00mlp\x00m" + + "ls\x00mmo\x00mmu\x00mmx\x00mnonmna\x00mnf\x00mni\x00mnw\x00moolmoa\x00mo" + + "e\x00moh\x00mos\x00mox\x00mpp\x00mps\x00mpt\x00mpx\x00mql\x00mrarmrd\x00" + + "mrj\x00mro\x00mssamtltmtc\x00mtf\x00mti\x00mtr\x00mua\x00mul\x00mur\x00m" + + "us\x00mva\x00mvn\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00mxm\x00myyamyk" + + "\x00mym\x00myv\x00myw\x00myx\x00myz\x00mzk\x00mzm\x00mzn\x00mzp\x00mzw" + + "\x00mzz\x00naaunac\x00naf\x00nah\x00nak\x00nan\x00nap\x00naq\x00nas\x00n" + + "bobnca\x00nce\x00ncf\x00nch\x00nco\x00ncu\x00nddendc\x00nds\x00neepneb" + + "\x00new\x00nex\x00nfr\x00ngdonga\x00ngb\x00ngl\x00nhb\x00nhe\x00nhw\x00n" + + "if\x00nii\x00nij\x00nin\x00niu\x00niy\x00niz\x00njo\x00nkg\x00nko\x00nll" + + "dnmg\x00nmz\x00nnnonnf\x00nnh\x00nnk\x00nnm\x00noornod\x00noe\x00non\x00" + + "nop\x00nou\x00nqo\x00nrblnrb\x00nsk\x00nsn\x00nso\x00nss\x00ntm\x00ntr" + + "\x00nui\x00nup\x00nus\x00nuv\x00nux\x00nvavnwb\x00nxq\x00nxr\x00nyyanym" + + "\x00nyn\x00nzi\x00occiogc\x00ojjiokr\x00okv\x00omrmong\x00onn\x00ons\x00" + + "opm\x00orrioro\x00oru\x00osssosa\x00ota\x00otk\x00ozm\x00paanpag\x00pal" + + "\x00pam\x00pap\x00pau\x00pbi\x00pcd\x00pcm\x00pdc\x00pdt\x00ped\x00peo" + + "\x00pex\x00pfl\x00phl\x00phn\x00pilipil\x00pip\x00pka\x00pko\x00plolpla" + + "\x00pms\x00png\x00pnn\x00pnt\x00pon\x00ppo\x00pra\x00prd\x00prg\x00psusp" + + "ss\x00ptorptp\x00puu\x00pwa\x00quuequc\x00qug\x00rai\x00raj\x00rao\x00rc" + + "f\x00rej\x00rel\x00res\x00rgn\x00rhg\x00ria\x00rif\x00rjs\x00rkt\x00rmoh" + + "rmf\x00rmo\x00rmt\x00rmu\x00rnunrna\x00rng\x00roonrob\x00rof\x00roo\x00r" + + "ro\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00rwo\x00ryu\x00saansaf" + + "\x00sah\x00saq\x00sas\x00sat\x00sav\x00saz\x00sba\x00sbe\x00sbp\x00scrds" + + "ck\x00scl\x00scn\x00sco\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00se" + + "i\x00ses\x00sgagsga\x00sgs\x00sgw\x00sgz\x00sh\x00\x02shi\x00shk\x00shn" + + "\x00shu\x00siinsid\x00sig\x00sil\x00sim\x00sjr\x00sklkskc\x00skr\x00sks" + + "\x00sllvsld\x00sli\x00sll\x00sly\x00smmosma\x00smi\x00smj\x00smn\x00smp" + + "\x00smq\x00sms\x00snnasnc\x00snk\x00snp\x00snx\x00sny\x00soomsok\x00soq" + + "\x00sou\x00soy\x00spd\x00spl\x00sps\x00sqqisrrpsrb\x00srn\x00srr\x00srx" + + "\x00ssswssd\x00ssg\x00ssy\x00stotstk\x00stq\x00suunsua\x00sue\x00suk\x00" + + "sur\x00sus\x00svweswwaswb\x00swc\x00swg\x00swp\x00swv\x00sxn\x00sxw\x00s" + + "yl\x00syr\x00szl\x00taamtaj\x00tal\x00tan\x00taq\x00tbc\x00tbd\x00tbf" + + "\x00tbg\x00tbo\x00tbw\x00tbz\x00tci\x00tcy\x00tdd\x00tdg\x00tdh\x00teelt" + + "ed\x00tem\x00teo\x00tet\x00tfi\x00tggktgc\x00tgo\x00tgu\x00thhathl\x00th" + + "q\x00thr\x00tiirtif\x00tig\x00tik\x00tim\x00tio\x00tiv\x00tkuktkl\x00tkr" + + "\x00tkt\x00tlgltlf\x00tlx\x00tly\x00tmh\x00tmy\x00tnsntnh\x00toontof\x00" + + "tog\x00toq\x00tpi\x00tpm\x00tpz\x00tqo\x00trurtru\x00trv\x00trw\x00tssot" + + "sd\x00tsf\x00tsg\x00tsj\x00tsw\x00ttatttd\x00tte\x00ttj\x00ttr\x00tts" + + "\x00ttt\x00tuh\x00tul\x00tum\x00tuq\x00tvd\x00tvl\x00tvu\x00twwitwh\x00t" + + "wq\x00txg\x00tyahtya\x00tyv\x00tzm\x00ubu\x00udm\x00ugiguga\x00ukkruli" + + "\x00umb\x00und\x00unr\x00unx\x00urrduri\x00urt\x00urw\x00usa\x00utr\x00u" + + "vh\x00uvl\x00uzzbvag\x00vai\x00van\x00veenvec\x00vep\x00viievic\x00viv" + + "\x00vls\x00vmf\x00vmw\x00voolvot\x00vro\x00vun\x00vut\x00walnwae\x00waj" + + "\x00wal\x00wan\x00war\x00wbp\x00wbq\x00wbr\x00wci\x00wer\x00wgi\x00whg" + + "\x00wib\x00wiu\x00wiv\x00wja\x00wji\x00wls\x00wmo\x00wnc\x00wni\x00wnu" + + "\x00woolwob\x00wos\x00wrs\x00wsk\x00wtm\x00wuu\x00wuv\x00wwa\x00xav\x00x" + + "bi\x00xcr\x00xes\x00xhhoxla\x00xlc\x00xld\x00xmf\x00xmn\x00xmr\x00xna" + + "\x00xnr\x00xog\x00xon\x00xpr\x00xrb\x00xsa\x00xsi\x00xsm\x00xsr\x00xwe" + + "\x00yam\x00yao\x00yap\x00yas\x00yat\x00yav\x00yay\x00yaz\x00yba\x00ybb" + + "\x00yby\x00yer\x00ygr\x00ygw\x00yiidyko\x00yle\x00ylg\x00yll\x00yml\x00y" + + "ooryon\x00yrb\x00yre\x00yrl\x00yss\x00yua\x00yue\x00yuj\x00yut\x00yuw" + + "\x00zahazag\x00zbl\x00zdj\x00zea\x00zgh\x00zhhozhx\x00zia\x00zlm\x00zmi" + + "\x00zne\x00zuulzxx\x00zza\x00\xff\xff\xff\xff" + +const langNoIndexOffset = 1330 + +// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +// in lookup tables. The language ids for these language codes are derived directly +// from the letters and are not consecutive. +// Size: 2197 bytes, 2197 elements +var langNoIndex = [2197]uint8{ + // Entry 0 - 3F + 0xff, 0xf8, 0xed, 0xfe, 0xeb, 0xd3, 0x3b, 0xd2, + 0xfb, 0xbf, 0x7a, 0xfa, 0x37, 0x1d, 0x3c, 0x57, + 0x6e, 0x97, 0x73, 0x38, 0xfb, 0xea, 0xbf, 0x70, + 0xad, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x84, 0x62, + 0xe9, 0xbf, 0xfd, 0xbf, 0xbf, 0xf7, 0xfd, 0x77, + 0x0f, 0xff, 0xef, 0x6f, 0xff, 0xfb, 0xdf, 0xe2, + 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xbc, 0x0a, 0x6a, + 0x7c, 0xea, 0xe3, 0xfa, 0x7a, 0xbf, 0x67, 0xff, + // Entry 40 - 7F + 0xff, 0xff, 0xff, 0xdf, 0x2a, 0x54, 0x91, 0xc0, + 0x5d, 0xe3, 0x97, 0x14, 0x07, 0x20, 0xdd, 0xed, + 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0x35, + 0x7c, 0x5f, 0xff, 0x5f, 0x8e, 0x6e, 0xdf, 0xff, + 0xff, 0xff, 0x55, 0x7c, 0xd3, 0xfd, 0xbf, 0xb5, + 0x7b, 0xdf, 0x7f, 0xf7, 0xca, 0xfe, 0xdb, 0xa3, + 0xa8, 0xff, 0x1f, 0x67, 0x7d, 0xeb, 0xef, 0xce, + 0xff, 0xff, 0x9f, 0xff, 0xb7, 0xef, 0xfe, 0xcf, + // Entry 80 - BF + 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x6f, 0xff, 0xff, + 0xbb, 0xee, 0xf7, 0xbd, 0xdb, 0xff, 0x5f, 0xf7, + 0xfd, 0xf2, 0xfd, 0xff, 0x5e, 0x2f, 0x3b, 0xba, + 0x7e, 0xff, 0xff, 0xfe, 0xf7, 0xff, 0xdd, 0xff, + 0xfd, 0xdf, 0xfb, 0xfe, 0x9d, 0xb4, 0xd3, 0xff, + 0xef, 0xff, 0xdf, 0xf7, 0x7f, 0xb7, 0xfd, 0xd5, + 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c, + 0x08, 0x21, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80, + // Entry C0 - FF + 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96, + 0x1b, 0x14, 0x08, 0xf3, 0x2b, 0xe7, 0x17, 0x56, + 0x05, 0x7d, 0x0e, 0x1c, 0x37, 0x7b, 0xf3, 0xef, + 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10, + 0xbc, 0x85, 0xaf, 0xdf, 0xff, 0xff, 0x7b, 0x35, + 0x3e, 0xc7, 0xc7, 0xdf, 0xff, 0x01, 0x81, 0x00, + 0xb0, 0x05, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x40, 0x00, 0x40, 0x92, 0x21, 0x50, 0xb1, 0x5d, + // Entry 100 - 13F + 0xfd, 0xdc, 0xbe, 0x5e, 0x00, 0x00, 0x02, 0x64, + 0x0d, 0x19, 0x41, 0xdf, 0x79, 0x22, 0x00, 0x00, + 0x00, 0x5e, 0x64, 0xdc, 0x24, 0xe5, 0xd9, 0xe3, + 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x41, 0x0c, + 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc7, 0x67, 0x5f, + 0x56, 0x99, 0x5e, 0xb5, 0x6c, 0xaf, 0x03, 0x00, + 0x02, 0x00, 0x00, 0x00, 0xc0, 0x37, 0xda, 0x56, + 0x90, 0x69, 0x01, 0x2c, 0x96, 0x69, 0x20, 0xfb, + // Entry 140 - 17F + 0xff, 0x3f, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x16, + 0x03, 0x00, 0x00, 0xb0, 0x14, 0x03, 0x50, 0x06, + 0x0a, 0x00, 0x01, 0x00, 0x00, 0x10, 0x11, 0x09, + 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x04, + 0x08, 0x00, 0x00, 0x05, 0x00, 0x80, 0x28, 0x04, + 0x00, 0x00, 0x40, 0xd5, 0x2d, 0x00, 0x64, 0x35, + 0x24, 0x52, 0xf4, 0xd5, 0xbf, 0x62, 0xc9, 0x03, + // Entry 180 - 1BF + 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x13, 0x39, 0x01, 0xdd, 0x57, 0x98, + 0x21, 0x18, 0x81, 0x00, 0x00, 0x01, 0x40, 0x82, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0x80, 0xea, + 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x03, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x20, 0x04, 0xa6, 0x00, 0x04, 0x00, 0x00, + 0x81, 0x50, 0x00, 0x00, 0x00, 0x11, 0x84, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x55, + 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x40, + 0x30, 0x83, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x1e, 0xcd, 0xbf, 0x7a, 0xbf, + // Entry 200 - 23F + 0xdf, 0xc3, 0x83, 0x82, 0xc0, 0xfb, 0x57, 0x27, + 0xed, 0x55, 0xe7, 0x01, 0x00, 0x20, 0xb2, 0xc5, + 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xdf, 0xe1, 0xdf, + 0x03, 0x44, 0x08, 0x90, 0x01, 0x04, 0x81, 0xe3, + 0x92, 0x54, 0xdb, 0x28, 0xd3, 0x5f, 0xfe, 0x6d, + 0x79, 0xed, 0x1c, 0x7d, 0x04, 0x08, 0x00, 0x01, + 0x21, 0x12, 0x64, 0x5f, 0xdd, 0x0e, 0x85, 0x4f, + 0x40, 0x40, 0x00, 0x04, 0xf1, 0xfd, 0x3d, 0x54, + // Entry 240 - 27F + 0xe8, 0x03, 0xb4, 0x27, 0x23, 0x0d, 0x00, 0x00, + 0x20, 0x7b, 0x78, 0x02, 0x07, 0x84, 0x00, 0xf0, + 0xbb, 0x7e, 0x5a, 0x00, 0x18, 0x04, 0x81, 0x00, + 0x00, 0x00, 0x80, 0x10, 0x90, 0x1c, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04, + 0x08, 0xa0, 0x70, 0xa5, 0x0c, 0x40, 0x00, 0x00, + 0x91, 0x24, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff, + 0x7b, 0x7f, 0x70, 0x00, 0x05, 0x9b, 0xdd, 0x66, + // Entry 280 - 2BF + 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05, + 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51, + 0xe2, 0xef, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x60, + 0xe7, 0x48, 0x00, 0x81, 0x20, 0xc0, 0x05, 0x80, + 0x03, 0x00, 0x00, 0x00, 0x8c, 0x50, 0x40, 0x04, + 0x84, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20, + // Entry 2C0 - 2FF + 0x02, 0x50, 0x80, 0x11, 0x00, 0x91, 0x6c, 0xe2, + 0x50, 0x27, 0x1d, 0x11, 0x29, 0x06, 0x59, 0xe9, + 0x33, 0x08, 0x00, 0x20, 0x04, 0x40, 0x10, 0x00, + 0x00, 0x00, 0x50, 0x44, 0x92, 0x49, 0xd6, 0x5d, + 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00, + 0x08, 0x00, 0x80, 0x00, 0x40, 0x04, 0x00, 0x01, + 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x00, 0x08, + 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x8d, 0x12, 0x00, + // Entry 300 - 33F + 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa0, + 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, + 0x04, 0x10, 0xd0, 0x9d, 0x95, 0x13, 0x04, 0x80, + 0x00, 0x01, 0xd0, 0x16, 0x40, 0x00, 0x10, 0xb0, + 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00, + 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0x80, + 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00, + // Entry 340 - 37F + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, + 0x00, 0x10, 0x00, 0x00, 0x00, 0xf0, 0x84, 0xe3, + 0xdd, 0xbf, 0xf9, 0xf9, 0x3b, 0x7f, 0x7f, 0xdb, + 0xfd, 0xfc, 0xfe, 0xdf, 0xff, 0xfd, 0xff, 0xf6, + 0xfb, 0xfc, 0xf7, 0x1f, 0xff, 0xb3, 0x6c, 0xff, + 0xd9, 0xad, 0xdf, 0xfe, 0xef, 0xba, 0xdf, 0xff, + 0xff, 0xff, 0xb7, 0xdd, 0x7d, 0xbf, 0xab, 0x7f, + 0xfd, 0xfd, 0xdf, 0x2f, 0x9c, 0xdf, 0xf3, 0x6f, + // Entry 380 - 3BF + 0xdf, 0xdd, 0xff, 0xfb, 0xee, 0xd2, 0xab, 0x5f, + 0xd5, 0xdf, 0x7f, 0xff, 0xeb, 0xff, 0xe4, 0x4d, + 0xf9, 0xff, 0xfe, 0xf7, 0xfd, 0xdf, 0xfb, 0xbf, + 0xee, 0xdb, 0x6f, 0xef, 0xff, 0x7f, 0xff, 0xff, + 0xf7, 0x5f, 0xd3, 0x3b, 0xfd, 0xd9, 0xdf, 0xeb, + 0xbc, 0x08, 0x05, 0x24, 0xff, 0x07, 0x70, 0xfe, + 0xe6, 0x5e, 0x00, 0x08, 0x00, 0x83, 0x3d, 0x1b, + 0x06, 0xe6, 0x72, 0x60, 0xd1, 0x3c, 0x7f, 0x44, + // Entry 3C0 - 3FF + 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xbd, 0x7f, 0x57, + 0xf2, 0xff, 0x31, 0xff, 0xf2, 0x1e, 0x90, 0xf7, + 0xf1, 0xf9, 0x45, 0x80, 0x01, 0x02, 0x00, 0x00, + 0x40, 0x54, 0x9f, 0x8a, 0xdb, 0xf9, 0x2e, 0x11, + 0x86, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x40, 0x01, + 0x05, 0xd1, 0x50, 0x5c, 0x00, 0x40, 0x00, 0x10, + 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2, + 0xb9, 0xfd, 0xfc, 0xba, 0xfe, 0xef, 0xc7, 0xbe, + // Entry 400 - 43F + 0x53, 0x6f, 0xdf, 0xe7, 0xdb, 0x65, 0xbb, 0x7f, + 0xfa, 0xff, 0x77, 0xf3, 0xef, 0xbf, 0xfd, 0xf7, + 0xdf, 0xdf, 0x9b, 0x7f, 0xff, 0xff, 0x7f, 0x6f, + 0xf7, 0xfb, 0xeb, 0xdf, 0xbc, 0xff, 0xbf, 0x6b, + 0x7b, 0xfb, 0xff, 0xce, 0x76, 0xbd, 0xf7, 0xf7, + 0xdf, 0xdc, 0xf7, 0xf7, 0xff, 0xdf, 0xf3, 0xfe, + 0xef, 0xff, 0xff, 0xff, 0xb6, 0x7f, 0x7f, 0xde, + 0xf7, 0xb9, 0xeb, 0x77, 0xff, 0xfb, 0xbf, 0xdf, + // Entry 440 - 47F + 0xfd, 0xfe, 0xfb, 0xff, 0xfe, 0xeb, 0x1f, 0x7d, + 0x2f, 0xfd, 0xb6, 0xb5, 0xa5, 0xfc, 0xff, 0xfd, + 0x7f, 0x4e, 0xbf, 0x8f, 0xae, 0xff, 0xee, 0xdf, + 0x7f, 0xf7, 0x73, 0x02, 0x02, 0x04, 0xfc, 0xf7, + 0xff, 0xb7, 0xd7, 0xef, 0xfe, 0xcd, 0xf5, 0xce, + 0xe2, 0x8e, 0xe7, 0xbf, 0xb7, 0xff, 0x56, 0xfd, + 0xcd, 0xff, 0xfb, 0xff, 0xdf, 0xd7, 0xea, 0xff, + 0xe5, 0x5f, 0x6d, 0x0f, 0xa7, 0x51, 0x06, 0xc4, + // Entry 480 - 4BF + 0x93, 0x50, 0x5d, 0xaf, 0xa6, 0xff, 0x99, 0xfb, + 0x63, 0x1d, 0x53, 0xff, 0xef, 0xb7, 0x35, 0x20, + 0x14, 0x00, 0x55, 0x51, 0x82, 0x65, 0xf5, 0x41, + 0xe2, 0xff, 0xfc, 0xdf, 0x02, 0x05, 0xc5, 0x05, + 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x05, + 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x51, 0x20, 0x05, 0x04, 0x01, 0x00, 0x00, + 0x06, 0x01, 0x20, 0x00, 0x18, 0x01, 0x92, 0xf1, + // Entry 4C0 - 4FF + 0xfd, 0x47, 0x69, 0x06, 0x95, 0x06, 0x57, 0xed, + 0xfb, 0x4d, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40, + 0x00, 0x11, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83, + 0xb8, 0x4f, 0x10, 0x8e, 0x89, 0x46, 0xde, 0xf7, + 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00, + 0x01, 0x00, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x3d, + 0xbe, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41, + // Entry 500 - 53F + 0x30, 0xff, 0x79, 0x72, 0x04, 0x00, 0x00, 0x49, + 0x2d, 0x14, 0x27, 0x57, 0xed, 0xf1, 0x3f, 0xe7, + 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xf8, + 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xe7, 0xf7, + 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7e, 0x10, + 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9, + 0x5b, 0x05, 0x86, 0xed, 0xf5, 0x77, 0xbd, 0x3c, + 0x00, 0x00, 0x00, 0x42, 0x71, 0x42, 0x00, 0x40, + // Entry 540 - 57F + 0x00, 0x00, 0x01, 0x43, 0x19, 0x00, 0x08, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // Entry 580 - 5BF + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d, + 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0xce, 0xfb, 0xbf, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x00, 0x30, 0x15, 0xa3, 0x10, 0x00, 0x00, 0x00, + 0x11, 0x04, 0x16, 0x00, 0x00, 0x02, 0x00, 0x81, + 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40, + // Entry 5C0 - 5FF + 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0x3e, 0x02, + 0xaa, 0x10, 0x5d, 0x98, 0x52, 0x00, 0x80, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x02, 0x02, + 0x19, 0x00, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d, + 0x31, 0x00, 0x00, 0x00, 0x01, 0x18, 0x02, 0x20, + 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00, + 0x00, 0x1f, 0xdf, 0xd2, 0xb9, 0xff, 0xfd, 0x3f, + 0x1f, 0x98, 0xcf, 0x9c, 0xff, 0xaf, 0x5f, 0xfe, + // Entry 600 - 63F + 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xd9, + 0xb7, 0xf6, 0xfb, 0xb3, 0xc7, 0xff, 0x6f, 0xf1, + 0x73, 0xb1, 0x7f, 0x9f, 0x7f, 0xbd, 0xfc, 0xb7, + 0xee, 0x1c, 0xfa, 0xcb, 0xef, 0xdd, 0xf9, 0xbd, + 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x9f, + 0xd4, 0x77, 0xf5, 0x7d, 0xfb, 0xff, 0xeb, 0xfe, + 0xbe, 0x5f, 0x46, 0x5b, 0xe9, 0x5f, 0x50, 0x18, + 0x02, 0xfa, 0xf7, 0x9d, 0x15, 0x97, 0x05, 0x0f, + // Entry 640 - 67F + 0x75, 0xc4, 0x7d, 0x81, 0x92, 0xf5, 0x57, 0x6c, + 0xff, 0xe4, 0xef, 0x6f, 0xff, 0xfc, 0xdd, 0xde, + 0xfc, 0xfd, 0x76, 0x5f, 0x7a, 0x3f, 0x00, 0x98, + 0x02, 0xfb, 0xa3, 0xef, 0xf3, 0xd6, 0xf2, 0xff, + 0xb9, 0xda, 0x7d, 0xd0, 0x3e, 0x15, 0x7b, 0xb4, + 0xf5, 0x3e, 0xff, 0xff, 0xf1, 0xf7, 0xff, 0xe7, + 0x5f, 0xff, 0xff, 0x9e, 0xdb, 0xf6, 0xd7, 0xb9, + 0xef, 0x27, 0x80, 0xbb, 0xc5, 0xff, 0xff, 0xe3, + // Entry 680 - 6BF + 0x97, 0x9d, 0xbf, 0x9f, 0xf7, 0xc7, 0xfd, 0x37, + 0xce, 0x7f, 0x04, 0x1d, 0x73, 0x7f, 0xf8, 0xda, + 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x79, 0xa0, + 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x08, + 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x01, 0x06, + 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00, + 0x04, 0x00, 0x10, 0xdc, 0x58, 0xd7, 0x0d, 0x0f, + // Entry 6C0 - 6FF + 0x14, 0x4d, 0xf1, 0x16, 0x44, 0xd5, 0x42, 0x08, + 0x40, 0x00, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00, + 0x00, 0xdc, 0xfb, 0xcb, 0x0e, 0x58, 0x48, 0x41, + 0x24, 0x20, 0x04, 0x00, 0x30, 0x12, 0x40, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xab, + 0x6d, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00, + // Entry 700 - 73F + 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x80, 0x86, 0xc2, 0x00, 0x00, 0x00, 0x00, 0x01, + 0xff, 0x18, 0x02, 0x00, 0x02, 0xf0, 0xfd, 0x79, + 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, + 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 740 - 77F + 0x00, 0x00, 0x00, 0xef, 0xd5, 0xfd, 0xcf, 0x7e, + 0xb0, 0x11, 0x00, 0x00, 0x00, 0x92, 0x01, 0x44, + 0xcd, 0xf9, 0x5c, 0x00, 0x01, 0x00, 0x30, 0x04, + 0x04, 0x55, 0x00, 0x01, 0x04, 0xf4, 0x3f, 0x4a, + 0x01, 0x00, 0x00, 0xb0, 0x80, 0x20, 0x55, 0x75, + 0x97, 0x7c, 0xdf, 0x31, 0xcc, 0x68, 0xd1, 0x03, + 0xd5, 0x57, 0x27, 0x14, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x2c, 0xf7, 0xcb, 0x1f, 0x14, 0x60, + // Entry 780 - 7BF + 0x03, 0x68, 0x01, 0x10, 0x8b, 0x38, 0x8a, 0x01, + 0x00, 0x00, 0x20, 0x00, 0x24, 0x44, 0x00, 0x00, + 0x10, 0x03, 0x11, 0x02, 0x01, 0x00, 0x00, 0xf0, + 0xf5, 0xff, 0xd5, 0x97, 0xbc, 0x70, 0xd6, 0x78, + 0x78, 0x15, 0x50, 0x01, 0xa4, 0x84, 0xa9, 0x41, + 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x00, + 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02, + 0xff, 0xef, 0xff, 0x4b, 0x85, 0x53, 0xf4, 0xed, + // Entry 7C0 - 7FF + 0xdd, 0xbf, 0xf2, 0x5d, 0xc7, 0x0c, 0xd5, 0x42, + 0xfc, 0xff, 0xf7, 0x1f, 0x00, 0x80, 0x40, 0x56, + 0xcc, 0x16, 0x9e, 0xea, 0x35, 0x7d, 0xef, 0xff, + 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x4d, + 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80, + 0x10, 0x20, 0x24, 0x00, 0xff, 0x2f, 0xd3, 0x60, + 0xfe, 0x01, 0x02, 0x88, 0x0a, 0x40, 0x16, 0x01, + 0x01, 0x15, 0x2b, 0x3c, 0x01, 0x00, 0x00, 0x10, + // Entry 800 - 83F + 0x90, 0x49, 0x41, 0x02, 0x02, 0x01, 0xe1, 0xbf, + 0xbf, 0x03, 0x00, 0x00, 0x10, 0xd4, 0xa3, 0xd1, + 0x40, 0x9c, 0x44, 0xdf, 0xf5, 0x8f, 0x66, 0xb3, + 0x55, 0x20, 0xd4, 0xc1, 0xd8, 0x30, 0x3d, 0x80, + 0x00, 0x00, 0x00, 0x04, 0xd4, 0x11, 0xc5, 0x84, + 0x2f, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93, + 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10, + 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00, + // Entry 840 - 87F + 0xf0, 0xfb, 0xfd, 0x7f, 0x05, 0x00, 0x16, 0x81, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28, + 0x84, 0x00, 0x21, 0xc0, 0x23, 0x24, 0x00, 0x00, + 0x00, 0xcb, 0xe4, 0x3a, 0x46, 0x88, 0x14, 0xf1, + 0xef, 0xff, 0x7f, 0x12, 0x01, 0x01, 0x84, 0x50, + 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40, + 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1, + // Entry 880 - 8BF + 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24, + 0x0a, 0x00, 0x80, 0x00, 0x00, +} + +// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +// to 2-letter language codes that cannot be derived using the method described above. +// Each 3-letter code is followed by its 1-byte langID. +const altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff" + +// altLangIndex is used to convert indexes in altLangISO3 to langIDs. +// Size: 12 bytes, 6 elements +var altLangIndex = [6]uint16{ + 0x0281, 0x0407, 0x01fb, 0x03e5, 0x013e, 0x0208, +} + +// AliasMap maps langIDs to their suggested replacements. +// Size: 716 bytes, 179 elements +var AliasMap = [179]FromTo{ + 0: {From: 0x82, To: 0x88}, + 1: {From: 0x187, To: 0x1ae}, + 2: {From: 0x1f3, To: 0x1e1}, + 3: {From: 0x1fb, To: 0x1bc}, + 4: {From: 0x208, To: 0x512}, + 5: {From: 0x20f, To: 0x20e}, + 6: {From: 0x310, To: 0x3dc}, + 7: {From: 0x347, To: 0x36f}, + 8: {From: 0x407, To: 0x432}, + 9: {From: 0x47a, To: 0x153}, + 10: {From: 0x490, To: 0x451}, + 11: {From: 0x4a2, To: 0x21}, + 12: {From: 0x53e, To: 0x544}, + 13: {From: 0x58f, To: 0x12d}, + 14: {From: 0x630, To: 0x1eb1}, + 15: {From: 0x651, To: 0x431}, + 16: {From: 0x662, To: 0x431}, + 17: {From: 0x6ed, To: 0x3a}, + 18: {From: 0x6f8, To: 0x1d7}, + 19: {From: 0x709, To: 0x3625}, + 20: {From: 0x73e, To: 0x21a1}, + 21: {From: 0x7b3, To: 0x56}, + 22: {From: 0x7b9, To: 0x299b}, + 23: {From: 0x7c5, To: 0x58}, + 24: {From: 0x7e6, To: 0x145}, + 25: {From: 0x80c, To: 0x5a}, + 26: {From: 0x815, To: 0x8d}, + 27: {From: 0x87e, To: 0x810}, + 28: {From: 0x8a8, To: 0x8b7}, + 29: {From: 0x8c3, To: 0xee3}, + 30: {From: 0x8fa, To: 0x1dc}, + 31: {From: 0x9ef, To: 0x331}, + 32: {From: 0xa36, To: 0x2c5}, + 33: {From: 0xa3d, To: 0xbf}, + 34: {From: 0xabe, To: 0x3322}, + 35: {From: 0xb38, To: 0x529}, + 36: {From: 0xb75, To: 0x265a}, + 37: {From: 0xb7e, To: 0xbc3}, + 38: {From: 0xb9b, To: 0x44e}, + 39: {From: 0xbbc, To: 0x4229}, + 40: {From: 0xbbf, To: 0x529}, + 41: {From: 0xbfe, To: 0x2da7}, + 42: {From: 0xc2e, To: 0x3181}, + 43: {From: 0xcb9, To: 0xf3}, + 44: {From: 0xd08, To: 0xfa}, + 45: {From: 0xdc8, To: 0x11a}, + 46: {From: 0xdd7, To: 0x32d}, + 47: {From: 0xdf8, To: 0xdfb}, + 48: {From: 0xdfe, To: 0x531}, + 49: {From: 0xe01, To: 0xdf3}, + 50: {From: 0xedf, To: 0x205a}, + 51: {From: 0xee9, To: 0x222e}, + 52: {From: 0xeee, To: 0x2e9a}, + 53: {From: 0xf39, To: 0x367}, + 54: {From: 0x10d0, To: 0x140}, + 55: {From: 0x1104, To: 0x2d0}, + 56: {From: 0x11a0, To: 0x1ec}, + 57: {From: 0x1279, To: 0x21}, + 58: {From: 0x1424, To: 0x15e}, + 59: {From: 0x1470, To: 0x14e}, + 60: {From: 0x151f, To: 0xd9b}, + 61: {From: 0x1523, To: 0x390}, + 62: {From: 0x1532, To: 0x19f}, + 63: {From: 0x1580, To: 0x210}, + 64: {From: 0x1583, To: 0x10d}, + 65: {From: 0x15a3, To: 0x3caf}, + 66: {From: 0x1630, To: 0x222e}, + 67: {From: 0x166a, To: 0x19b}, + 68: {From: 0x16c8, To: 0x136}, + 69: {From: 0x1700, To: 0x29f8}, + 70: {From: 0x1718, To: 0x194}, + 71: {From: 0x1727, To: 0xf3f}, + 72: {From: 0x177a, To: 0x178}, + 73: {From: 0x1809, To: 0x17b6}, + 74: {From: 0x1816, To: 0x18f3}, + 75: {From: 0x188a, To: 0x436}, + 76: {From: 0x1979, To: 0x1d01}, + 77: {From: 0x1a74, To: 0x2bb0}, + 78: {From: 0x1a8a, To: 0x1f8}, + 79: {From: 0x1b5a, To: 0x1fa}, + 80: {From: 0x1b86, To: 0x1515}, + 81: {From: 0x1d64, To: 0x2c9b}, + 82: {From: 0x2038, To: 0x37b1}, + 83: {From: 0x203d, To: 0x20dd}, + 84: {From: 0x205a, To: 0x30b}, + 85: {From: 0x20e3, To: 0x274}, + 86: {From: 0x20ee, To: 0x263}, + 87: {From: 0x20f2, To: 0x22d}, + 88: {From: 0x20f9, To: 0x256}, + 89: {From: 0x210f, To: 0x21eb}, + 90: {From: 0x2135, To: 0x27d}, + 91: {From: 0x2160, To: 0x913}, + 92: {From: 0x2199, To: 0x121}, + 93: {From: 0x21ce, To: 0x1561}, + 94: {From: 0x21e6, To: 0x504}, + 95: {From: 0x21f4, To: 0x49f}, + 96: {From: 0x21fb, To: 0x269}, + 97: {From: 0x222d, To: 0x121}, + 98: {From: 0x2237, To: 0x121}, + 99: {From: 0x2262, To: 0x92a}, + 100: {From: 0x2316, To: 0x3226}, + 101: {From: 0x236a, To: 0x2835}, + 102: {From: 0x2382, To: 0x3365}, + 103: {From: 0x2472, To: 0x2c7}, + 104: {From: 0x24e4, To: 0x2ff}, + 105: {From: 0x24f0, To: 0x2fa}, + 106: {From: 0x24fa, To: 0x31f}, + 107: {From: 0x2550, To: 0xb5b}, + 108: {From: 0x25a9, To: 0xe2}, + 109: {From: 0x263e, To: 0x2d0}, + 110: {From: 0x26c9, To: 0x26b4}, + 111: {From: 0x26f9, To: 0x3c8}, + 112: {From: 0x2727, To: 0x3caf}, + 113: {From: 0x2755, To: 0x6a4}, + 114: {From: 0x2765, To: 0x26b4}, + 115: {From: 0x2789, To: 0x4358}, + 116: {From: 0x27c9, To: 0x2001}, + 117: {From: 0x28ea, To: 0x27b1}, + 118: {From: 0x28ef, To: 0x2837}, + 119: {From: 0x2914, To: 0x351}, + 120: {From: 0x2986, To: 0x2da7}, + 121: {From: 0x29f0, To: 0x96b}, + 122: {From: 0x2b1a, To: 0x38d}, + 123: {From: 0x2bfc, To: 0x395}, + 124: {From: 0x2c3f, To: 0x3caf}, + 125: {From: 0x2ce1, To: 0x2201}, + 126: {From: 0x2cfc, To: 0x3be}, + 127: {From: 0x2d13, To: 0x597}, + 128: {From: 0x2d47, To: 0x148}, + 129: {From: 0x2d48, To: 0x148}, + 130: {From: 0x2dff, To: 0x2f1}, + 131: {From: 0x2e08, To: 0x19cc}, + 132: {From: 0x2e1a, To: 0x2d95}, + 133: {From: 0x2e21, To: 0x292}, + 134: {From: 0x2e54, To: 0x7d}, + 135: {From: 0x2e65, To: 0x2282}, + 136: {From: 0x2ea0, To: 0x2e9b}, + 137: {From: 0x2eef, To: 0x2ed7}, + 138: {From: 0x3193, To: 0x3c4}, + 139: {From: 0x3366, To: 0x338e}, + 140: {From: 0x342a, To: 0x3dc}, + 141: {From: 0x34ee, To: 0x18d0}, + 142: {From: 0x35c8, To: 0x2c9b}, + 143: {From: 0x35e6, To: 0x412}, + 144: {From: 0x3658, To: 0x246}, + 145: {From: 0x3676, To: 0x3f4}, + 146: {From: 0x36fd, To: 0x445}, + 147: {From: 0x37c0, To: 0x121}, + 148: {From: 0x3816, To: 0x38f2}, + 149: {From: 0x382a, To: 0x2b48}, + 150: {From: 0x382b, To: 0x2c9b}, + 151: {From: 0x382f, To: 0xa9}, + 152: {From: 0x3832, To: 0x3228}, + 153: {From: 0x386c, To: 0x39a6}, + 154: {From: 0x3892, To: 0x3fc0}, + 155: {From: 0x38a5, To: 0x39d7}, + 156: {From: 0x38b4, To: 0x1fa4}, + 157: {From: 0x38b5, To: 0x2e9a}, + 158: {From: 0x395c, To: 0x47e}, + 159: {From: 0x3b4e, To: 0xd91}, + 160: {From: 0x3b78, To: 0x137}, + 161: {From: 0x3c99, To: 0x4bc}, + 162: {From: 0x3fbd, To: 0x100}, + 163: {From: 0x4208, To: 0xa91}, + 164: {From: 0x42be, To: 0x573}, + 165: {From: 0x42f9, To: 0x3f60}, + 166: {From: 0x4378, To: 0x25a}, + 167: {From: 0x43b8, To: 0xe6c}, + 168: {From: 0x43cd, To: 0x10f}, + 169: {From: 0x44af, To: 0x3322}, + 170: {From: 0x44e3, To: 0x512}, + 171: {From: 0x45ca, To: 0x2409}, + 172: {From: 0x45dd, To: 0x26dc}, + 173: {From: 0x4610, To: 0x48ae}, + 174: {From: 0x46ae, To: 0x46a0}, + 175: {From: 0x473e, To: 0x4745}, + 176: {From: 0x4817, To: 0x3503}, + 177: {From: 0x4916, To: 0x31f}, + 178: {From: 0x49a7, To: 0x523}, +} + +// Size: 179 bytes, 179 elements +var AliasTypes = [179]AliasType{ + // Entry 0 - 3F + 1, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 1, 2, + 1, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0, 0, 2, + 1, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, + 1, 0, 0, 0, 0, 2, 1, 1, 1, 1, 2, 1, 0, 1, 1, 2, + // Entry 40 - 7F + 2, 0, 0, 1, 2, 0, 1, 0, 1, 1, 1, 1, 0, 0, 2, 1, + 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0, 1, 1, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, + // Entry 80 - BF + 2, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 2, 0, 0, 2, + 1, 1, 1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 1, 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, + 0, 1, 1, +} + +const ( + _Latn = 90 + _Hani = 57 + _Hans = 59 + _Hant = 60 + _Qaaa = 147 + _Qaai = 155 + _Qabx = 196 + _Zinh = 252 + _Zyyy = 257 + _Zzzz = 258 +) + +// script is an alphabetically sorted list of ISO 15924 codes. The index +// of the script in the string, divided by 4, is the internal scriptID. +const script tag.Index = "" + // Size: 1040 bytes + "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" + + "BrahBraiBugiBuhdCakmCansCariChamCherChrsCirtCoptCpmnCprtCyrlCyrsDevaDiak" + + "DogrDsrtDuplEgydEgyhEgypElbaElymEthiGeokGeorGlagGongGonmGothGranGrekGujr" + + "GuruHanbHangHaniHanoHansHantHatrHebrHiraHluwHmngHmnpHrktHungIndsItalJamo" + + "JavaJpanJurcKaliKanaKharKhmrKhojKitlKitsKndaKoreKpelKthiLanaLaooLatfLatg" + + "LatnLekeLepcLimbLinaLinbLisuLomaLyciLydiMahjMakaMandManiMarcMayaMedfMend" + + "MercMeroMlymModiMongMoonMrooMteiMultMymrNandNarbNbatNewaNkdbNkgbNkooNshu" + + "OgamOlckOrkhOryaOsgeOsmaOugrPalmPaucPcunPelmPermPhagPhliPhlpPhlvPhnxPiqd" + + "PlrdPrtiPsinQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaamQaanQaao" + + "QaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabeQabfQabg" + + "QabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabwQabxRanj" + + "RjngRohgRoroRunrSamrSaraSarbSaurSgnwShawShrdShuiSiddSindSinhSogdSogoSora" + + "SoyoSundSyloSyrcSyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTeluTengTfngTglg" + + "ThaaThaiTibtTirhTnsaTotoUgarVaiiVispVithWaraWchoWoleXpeoXsuxYeziYiiiZanb" + + "ZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff\xff" + +// suppressScript is an index from langID to the dominant script for that language, +// if it exists. If a script is given, it should be suppressed from the language tag. +// Size: 1330 bytes, 1330 elements +var suppressScript = [1330]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry C0 - FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x5a, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xea, 0x00, 0x00, 0x00, 0x00, 0xec, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x5a, 0x00, + // Entry 140 - 17F + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, + 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x00, 0x5a, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x5a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x35, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x22, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x5a, 0x00, 0x5a, 0x5a, 0x00, 0x08, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, + 0x5a, 0x5a, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x2e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x20, 0x00, 0x00, 0x5a, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x53, 0x00, 0x22, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, + 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x6e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x5a, + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, + // Entry 340 - 37F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x5a, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x5a, 0x00, + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 380 - 3BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, + // Entry 3C0 - 3FF + 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x5a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 400 - 43F + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, + // Entry 440 - 47F + 0x00, 0x00, 0x00, 0x00, 0x5a, 0x5a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xe3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xe6, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xeb, 0x00, 0x00, 0x00, 0x2c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, + // Entry 480 - 4BF + 0x5a, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 4C0 - 4FF + 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 500 - 53F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, + 0x00, 0x00, +} + +const ( + _001 = 1 + _419 = 31 + _BR = 65 + _CA = 73 + _ES = 110 + _GB = 123 + _MD = 188 + _PT = 238 + _UK = 306 + _US = 309 + _ZZ = 357 + _XA = 323 + _XC = 325 + _XK = 333 +) + +// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +// the UN.M49 codes used for groups.) +const isoRegionOffset = 32 + +// regionTypes defines the status of a region for various standards. +// Size: 358 bytes, 358 elements +var regionTypes = [358]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x04, + 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, + 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, + 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, + 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 80 - BF + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x00, 0x04, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry C0 - FF + 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, + 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, 0x06, + 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, + 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + // Entry 100 - 13F + 0x05, 0x05, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x02, 0x06, 0x04, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 140 - 17F + 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x04, 0x06, 0x06, + 0x04, 0x06, 0x06, 0x04, 0x06, 0x05, +} + +// regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +// Each 2-letter codes is followed by two bytes with the following meaning: +// - [A-Z}{2}: the first letter of the 2-letter code plus these two +// letters form the 3-letter ISO code. +// - 0, n: index into altRegionISO3. +const regionISO tag.Index = "" + // Size: 1308 bytes + "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" + + "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" + + "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" + + "CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADOOMDY" + + "HYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03EZ FIINFJJIFKLKFMSMFORO" + + "FQ\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQNQGR" + + "RCGS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERLILSR" + + "IMMNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM\x00" + + "\x09KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSOLTTU" + + "LUUXLVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNPMQTQ" + + "MRRTMSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLDNOOR" + + "NPPLNQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM\x00" + + "\x12PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSSQTTT" + + "QU\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLBSCYC" + + "SDDNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXMSYYR" + + "SZWZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTTTOTV" + + "UVTWWNTZZAUAKRUGGAUK UMMIUN USSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVNNMVU" + + "UTWFLFWKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXNNNXO" + + "OOXPPPXQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUGZAAF" + + "ZMMBZRARZWWEZZZZ\xff\xff\xff\xff" + +// altRegionISO3 holds a list of 3-letter region codes that cannot be +// mapped to 2-letter codes using the default algorithm. This is a short list. +const altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN" + +// altRegionIDs holds a list of regionIDs the positions of which match those +// of the 3-letter ISO codes in altRegionISO3. +// Size: 22 bytes, 11 elements +var altRegionIDs = [11]uint16{ + 0x0057, 0x0070, 0x0088, 0x00a8, 0x00aa, 0x00ad, 0x00ea, 0x0105, + 0x0121, 0x015f, 0x00dc, +} + +// Size: 80 bytes, 20 elements +var regionOldMap = [20]FromTo{ + 0: {From: 0x44, To: 0xc4}, + 1: {From: 0x58, To: 0xa7}, + 2: {From: 0x5f, To: 0x60}, + 3: {From: 0x66, To: 0x3b}, + 4: {From: 0x79, To: 0x78}, + 5: {From: 0x93, To: 0x37}, + 6: {From: 0xa3, To: 0x133}, + 7: {From: 0xc1, To: 0x133}, + 8: {From: 0xd7, To: 0x13f}, + 9: {From: 0xdc, To: 0x2b}, + 10: {From: 0xef, To: 0x133}, + 11: {From: 0xf2, To: 0xe2}, + 12: {From: 0xfc, To: 0x70}, + 13: {From: 0x103, To: 0x164}, + 14: {From: 0x12a, To: 0x126}, + 15: {From: 0x132, To: 0x7b}, + 16: {From: 0x13a, To: 0x13e}, + 17: {From: 0x141, To: 0x133}, + 18: {From: 0x15d, To: 0x15e}, + 19: {From: 0x163, To: 0x4b}, +} + +// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +// codes indicating collections of regions. +// Size: 716 bytes, 358 elements +var m49 = [358]int16{ + // Entry 0 - 3F + 0, 1, 2, 3, 5, 9, 11, 13, + 14, 15, 17, 18, 19, 21, 29, 30, + 34, 35, 39, 53, 54, 57, 61, 142, + 143, 145, 150, 151, 154, 155, 202, 419, + 958, 0, 20, 784, 4, 28, 660, 8, + 51, 530, 24, 10, 32, 16, 40, 36, + 533, 248, 31, 70, 52, 50, 56, 854, + 100, 48, 108, 204, 652, 60, 96, 68, + // Entry 40 - 7F + 535, 76, 44, 64, 104, 74, 72, 112, + 84, 124, 166, 180, 140, 178, 756, 384, + 184, 152, 120, 156, 170, 0, 188, 891, + 296, 192, 132, 531, 162, 196, 203, 278, + 276, 0, 262, 208, 212, 214, 204, 12, + 0, 218, 233, 818, 732, 232, 724, 231, + 967, 0, 246, 242, 238, 583, 234, 0, + 250, 249, 266, 826, 308, 268, 254, 831, + // Entry 80 - BF + 288, 292, 304, 270, 324, 312, 226, 300, + 239, 320, 316, 624, 328, 344, 334, 340, + 191, 332, 348, 854, 0, 360, 372, 376, + 833, 356, 86, 368, 364, 352, 380, 832, + 388, 400, 392, 581, 404, 417, 116, 296, + 174, 659, 408, 410, 414, 136, 398, 418, + 422, 662, 438, 144, 430, 426, 440, 442, + 428, 434, 504, 492, 498, 499, 663, 450, + // Entry C0 - FF + 584, 581, 807, 466, 104, 496, 446, 580, + 474, 478, 500, 470, 480, 462, 454, 484, + 458, 508, 516, 540, 562, 574, 566, 548, + 558, 528, 578, 524, 10, 520, 536, 570, + 554, 512, 591, 0, 604, 258, 598, 608, + 586, 616, 666, 612, 630, 275, 620, 581, + 585, 600, 591, 634, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + // Entry 100 - 13F + 971, 972, 638, 716, 642, 688, 643, 646, + 682, 90, 690, 729, 752, 702, 654, 705, + 744, 703, 694, 674, 686, 706, 740, 728, + 678, 810, 222, 534, 760, 748, 0, 796, + 148, 260, 768, 764, 762, 772, 626, 795, + 788, 776, 626, 792, 780, 798, 158, 834, + 804, 800, 826, 581, 0, 840, 858, 860, + 336, 670, 704, 862, 92, 850, 704, 548, + // Entry 140 - 17F + 876, 581, 882, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 720, 887, 175, + 891, 710, 894, 180, 716, 999, +} + +// m49Index gives indexes into fromM49 based on the three most significant bits +// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in +// +// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +// +// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +// The region code is stored in the 9 lsb of the indexed value. +// Size: 18 bytes, 9 elements +var m49Index = [9]int16{ + 0, 59, 108, 143, 181, 220, 259, 291, + 333, +} + +// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details. +// Size: 666 bytes, 333 elements +var fromM49 = [333]uint16{ + // Entry 0 - 3F + 0x0201, 0x0402, 0x0603, 0x0824, 0x0a04, 0x1027, 0x1205, 0x142b, + 0x1606, 0x1867, 0x1a07, 0x1c08, 0x1e09, 0x202d, 0x220a, 0x240b, + 0x260c, 0x2822, 0x2a0d, 0x302a, 0x3825, 0x3a0e, 0x3c0f, 0x3e32, + 0x402c, 0x4410, 0x4611, 0x482f, 0x4e12, 0x502e, 0x5842, 0x6039, + 0x6435, 0x6628, 0x6834, 0x6a13, 0x6c14, 0x7036, 0x7215, 0x783d, + 0x7a16, 0x8043, 0x883f, 0x8c33, 0x9046, 0x9445, 0x9841, 0xa848, + 0xac9a, 0xb509, 0xb93c, 0xc03e, 0xc838, 0xd0c4, 0xd83a, 0xe047, + 0xe8a6, 0xf052, 0xf849, 0x085a, 0x10ad, 0x184c, 0x1c17, 0x1e18, + // Entry 40 - 7F + 0x20b3, 0x2219, 0x2920, 0x2c1a, 0x2e1b, 0x3051, 0x341c, 0x361d, + 0x3853, 0x3d2e, 0x445c, 0x4c4a, 0x5454, 0x5ca8, 0x5f5f, 0x644d, + 0x684b, 0x7050, 0x7856, 0x7e90, 0x8059, 0x885d, 0x941e, 0x965e, + 0x983b, 0xa063, 0xa864, 0xac65, 0xb469, 0xbd1a, 0xc486, 0xcc6f, + 0xce6f, 0xd06d, 0xd26a, 0xd476, 0xdc74, 0xde88, 0xe473, 0xec72, + 0xf031, 0xf279, 0xf478, 0xfc7e, 0x04e5, 0x0921, 0x0c62, 0x147a, + 0x187d, 0x1c83, 0x26ed, 0x2860, 0x2c5f, 0x3060, 0x4080, 0x4881, + 0x50a7, 0x5887, 0x6082, 0x687c, 0x7085, 0x788a, 0x8089, 0x8884, + // Entry 80 - BF + 0x908c, 0x9891, 0x9c8e, 0xa138, 0xa88f, 0xb08d, 0xb892, 0xc09d, + 0xc899, 0xd095, 0xd89c, 0xe09b, 0xe896, 0xf097, 0xf89e, 0x004f, + 0x08a0, 0x10a2, 0x1cae, 0x20a1, 0x28a4, 0x30aa, 0x34ab, 0x3cac, + 0x42a5, 0x44af, 0x461f, 0x4cb0, 0x54b5, 0x58b8, 0x5cb4, 0x64b9, + 0x6cb2, 0x70b6, 0x74b7, 0x7cc6, 0x84bf, 0x8cce, 0x94d0, 0x9ccd, + 0xa4c3, 0xaccb, 0xb4c8, 0xbcc9, 0xc0cc, 0xc8cf, 0xd8bb, 0xe0c5, + 0xe4bc, 0xe6bd, 0xe8ca, 0xf0ba, 0xf8d1, 0x00e1, 0x08d2, 0x10dd, + 0x18db, 0x20d9, 0x2429, 0x265b, 0x2a30, 0x2d1b, 0x2e40, 0x30de, + // Entry C0 - FF + 0x38d3, 0x493f, 0x54e0, 0x5cd8, 0x64d4, 0x6cd6, 0x74df, 0x7cd5, + 0x84da, 0x88c7, 0x8b33, 0x8e75, 0x90c0, 0x92f0, 0x94e8, 0x9ee2, + 0xace6, 0xb0f1, 0xb8e4, 0xc0e7, 0xc8eb, 0xd0e9, 0xd8ee, 0xe08b, + 0xe526, 0xecec, 0xf4f3, 0xfd02, 0x0504, 0x0706, 0x0d07, 0x183c, + 0x1d0e, 0x26a9, 0x2826, 0x2cb1, 0x2ebe, 0x34ea, 0x3d39, 0x4513, + 0x4d18, 0x5508, 0x5d14, 0x6105, 0x650a, 0x6d12, 0x7d0d, 0x7f11, + 0x813e, 0x830f, 0x8515, 0x8d61, 0x9964, 0xa15d, 0xa86e, 0xb117, + 0xb30b, 0xb86c, 0xc10b, 0xc916, 0xd110, 0xd91d, 0xe10c, 0xe84e, + // Entry 100 - 13F + 0xf11c, 0xf524, 0xf923, 0x0122, 0x0925, 0x1129, 0x192c, 0x2023, + 0x2928, 0x312b, 0x3727, 0x391f, 0x3d2d, 0x4131, 0x4930, 0x4ec2, + 0x5519, 0x646b, 0x747b, 0x7e7f, 0x809f, 0x8298, 0x852f, 0x9135, + 0xa53d, 0xac37, 0xb536, 0xb937, 0xbd3b, 0xd940, 0xe542, 0xed5e, + 0xef5e, 0xf657, 0xfd62, 0x7c20, 0x7ef4, 0x80f5, 0x82f6, 0x84f7, + 0x86f8, 0x88f9, 0x8afa, 0x8cfb, 0x8e70, 0x90fd, 0x92fe, 0x94ff, + 0x9700, 0x9901, 0x9b43, 0x9d44, 0x9f45, 0xa146, 0xa347, 0xa548, + 0xa749, 0xa94a, 0xab4b, 0xad4c, 0xaf4d, 0xb14e, 0xb34f, 0xb550, + // Entry 140 - 17F + 0xb751, 0xb952, 0xbb53, 0xbd54, 0xbf55, 0xc156, 0xc357, 0xc558, + 0xc759, 0xc95a, 0xcb5b, 0xcd5c, 0xcf65, +} + +// Size: 2014 bytes +var variantIndex = map[string]uint8{ + "1606nict": 0x0, + "1694acad": 0x1, + "1901": 0x2, + "1959acad": 0x3, + "1994": 0x61, + "1996": 0x4, + "abl1943": 0x5, + "akuapem": 0x6, + "alalc97": 0x63, + "aluku": 0x7, + "ao1990": 0x8, + "aranes": 0x9, + "arevela": 0xa, + "arevmda": 0xb, + "arkaika": 0xc, + "asante": 0xd, + "auvern": 0xe, + "baku1926": 0xf, + "balanka": 0x10, + "barla": 0x11, + "basiceng": 0x12, + "bauddha": 0x13, + "biscayan": 0x14, + "biske": 0x5c, + "bohoric": 0x15, + "boont": 0x16, + "bornholm": 0x17, + "cisaup": 0x18, + "colb1945": 0x19, + "cornu": 0x1a, + "creiss": 0x1b, + "dajnko": 0x1c, + "ekavsk": 0x1d, + "emodeng": 0x1e, + "fonipa": 0x64, + "fonkirsh": 0x65, + "fonnapa": 0x66, + "fonupa": 0x67, + "fonxsamp": 0x68, + "gascon": 0x1f, + "grclass": 0x20, + "grital": 0x21, + "grmistr": 0x22, + "hepburn": 0x23, + "heploc": 0x62, + "hognorsk": 0x24, + "hsistemo": 0x25, + "ijekavsk": 0x26, + "itihasa": 0x27, + "ivanchov": 0x28, + "jauer": 0x29, + "jyutping": 0x2a, + "kkcor": 0x2b, + "kociewie": 0x2c, + "kscor": 0x2d, + "laukika": 0x2e, + "lemosin": 0x2f, + "lengadoc": 0x30, + "lipaw": 0x5d, + "luna1918": 0x31, + "metelko": 0x32, + "monoton": 0x33, + "ndyuka": 0x34, + "nedis": 0x35, + "newfound": 0x36, + "nicard": 0x37, + "njiva": 0x5e, + "nulik": 0x38, + "osojs": 0x5f, + "oxendict": 0x39, + "pahawh2": 0x3a, + "pahawh3": 0x3b, + "pahawh4": 0x3c, + "pamaka": 0x3d, + "peano": 0x3e, + "petr1708": 0x3f, + "pinyin": 0x40, + "polyton": 0x41, + "provenc": 0x42, + "puter": 0x43, + "rigik": 0x44, + "rozaj": 0x45, + "rumgr": 0x46, + "scotland": 0x47, + "scouse": 0x48, + "simple": 0x69, + "solba": 0x60, + "sotav": 0x49, + "spanglis": 0x4a, + "surmiran": 0x4b, + "sursilv": 0x4c, + "sutsilv": 0x4d, + "tarask": 0x4e, + "tongyong": 0x4f, + "tunumiit": 0x50, + "uccor": 0x51, + "ucrcor": 0x52, + "ulster": 0x53, + "unifon": 0x54, + "vaidika": 0x55, + "valencia": 0x56, + "vallader": 0x57, + "vecdruka": 0x58, + "vivaraup": 0x59, + "wadegile": 0x5a, + "xsistemo": 0x5b, +} + +// variantNumSpecialized is the number of specialized variants in variants. +const variantNumSpecialized = 99 + +// nRegionGroups is the number of region groups. +const nRegionGroups = 33 + +type likelyLangRegion struct { + lang uint16 + region uint16 +} + +// likelyScript is a lookup table, indexed by scriptID, for the most likely +// languages and regions given a script. +// Size: 1040 bytes, 260 elements +var likelyScript = [260]likelyLangRegion{ + 1: {lang: 0x14e, region: 0x84}, + 3: {lang: 0x2a2, region: 0x106}, + 4: {lang: 0x1f, region: 0x99}, + 5: {lang: 0x3a, region: 0x6b}, + 7: {lang: 0x3b, region: 0x9c}, + 8: {lang: 0x1d7, region: 0x28}, + 9: {lang: 0x13, region: 0x9c}, + 10: {lang: 0x5b, region: 0x95}, + 11: {lang: 0x60, region: 0x52}, + 12: {lang: 0xb9, region: 0xb4}, + 13: {lang: 0x63, region: 0x95}, + 14: {lang: 0xa5, region: 0x35}, + 15: {lang: 0x3e9, region: 0x99}, + 17: {lang: 0x529, region: 0x12e}, + 18: {lang: 0x3b1, region: 0x99}, + 19: {lang: 0x15e, region: 0x78}, + 20: {lang: 0xc2, region: 0x95}, + 21: {lang: 0x9d, region: 0xe7}, + 22: {lang: 0xdb, region: 0x35}, + 23: {lang: 0xf3, region: 0x49}, + 24: {lang: 0x4f0, region: 0x12b}, + 25: {lang: 0xe7, region: 0x13e}, + 26: {lang: 0xe5, region: 0x135}, + 29: {lang: 0xf1, region: 0x6b}, + 31: {lang: 0x1a0, region: 0x5d}, + 32: {lang: 0x3e2, region: 0x106}, + 34: {lang: 0x1be, region: 0x99}, + 38: {lang: 0x15e, region: 0x78}, + 41: {lang: 0x133, region: 0x6b}, + 42: {lang: 0x431, region: 0x27}, + 44: {lang: 0x27, region: 0x6f}, + 46: {lang: 0x210, region: 0x7d}, + 47: {lang: 0xfe, region: 0x38}, + 49: {lang: 0x19b, region: 0x99}, + 50: {lang: 0x19e, region: 0x130}, + 51: {lang: 0x3e9, region: 0x99}, + 52: {lang: 0x136, region: 0x87}, + 53: {lang: 0x1a4, region: 0x99}, + 54: {lang: 0x39d, region: 0x99}, + 55: {lang: 0x529, region: 0x12e}, + 56: {lang: 0x254, region: 0xab}, + 57: {lang: 0x529, region: 0x53}, + 58: {lang: 0x1cb, region: 0xe7}, + 59: {lang: 0x529, region: 0x53}, + 60: {lang: 0x529, region: 0x12e}, + 61: {lang: 0x2fd, region: 0x9b}, + 62: {lang: 0x1bc, region: 0x97}, + 63: {lang: 0x200, region: 0xa2}, + 64: {lang: 0x1c5, region: 0x12b}, + 65: {lang: 0x1ca, region: 0xaf}, + 68: {lang: 0x1d5, region: 0x92}, + 70: {lang: 0x142, region: 0x9e}, + 71: {lang: 0x254, region: 0xab}, + 72: {lang: 0x20e, region: 0x95}, + 73: {lang: 0x200, region: 0xa2}, + 75: {lang: 0x135, region: 0xc4}, + 76: {lang: 0x200, region: 0xa2}, + 77: {lang: 0x3bb, region: 0xe8}, + 78: {lang: 0x24a, region: 0xa6}, + 79: {lang: 0x3fa, region: 0x99}, + 82: {lang: 0x251, region: 0x99}, + 83: {lang: 0x254, region: 0xab}, + 85: {lang: 0x88, region: 0x99}, + 86: {lang: 0x370, region: 0x123}, + 87: {lang: 0x2b8, region: 0xaf}, + 92: {lang: 0x29f, region: 0x99}, + 93: {lang: 0x2a8, region: 0x99}, + 94: {lang: 0x28f, region: 0x87}, + 95: {lang: 0x1a0, region: 0x87}, + 96: {lang: 0x2ac, region: 0x53}, + 98: {lang: 0x4f4, region: 0x12b}, + 99: {lang: 0x4f5, region: 0x12b}, + 100: {lang: 0x1be, region: 0x99}, + 102: {lang: 0x337, region: 0x9c}, + 103: {lang: 0x4f7, region: 0x53}, + 104: {lang: 0xa9, region: 0x53}, + 107: {lang: 0x2e8, region: 0x112}, + 108: {lang: 0x4f8, region: 0x10b}, + 109: {lang: 0x4f8, region: 0x10b}, + 110: {lang: 0x304, region: 0x99}, + 111: {lang: 0x31b, region: 0x99}, + 112: {lang: 0x30b, region: 0x53}, + 114: {lang: 0x31e, region: 0x35}, + 115: {lang: 0x30e, region: 0x99}, + 116: {lang: 0x414, region: 0xe8}, + 117: {lang: 0x331, region: 0xc4}, + 119: {lang: 0x4f9, region: 0x108}, + 120: {lang: 0x3b, region: 0xa1}, + 121: {lang: 0x353, region: 0xdb}, + 124: {lang: 0x2d0, region: 0x84}, + 125: {lang: 0x52a, region: 0x53}, + 126: {lang: 0x403, region: 0x96}, + 127: {lang: 0x3ee, region: 0x99}, + 128: {lang: 0x39b, region: 0xc5}, + 129: {lang: 0x395, region: 0x99}, + 130: {lang: 0x399, region: 0x135}, + 131: {lang: 0x429, region: 0x115}, + 133: {lang: 0x3b, region: 0x11c}, + 134: {lang: 0xfd, region: 0xc4}, + 137: {lang: 0x27d, region: 0x106}, + 138: {lang: 0x2c9, region: 0x53}, + 139: {lang: 0x39f, region: 0x9c}, + 140: {lang: 0x39f, region: 0x53}, + 142: {lang: 0x3ad, region: 0xb0}, + 144: {lang: 0x1c6, region: 0x53}, + 145: {lang: 0x4fd, region: 0x9c}, + 198: {lang: 0x3cb, region: 0x95}, + 201: {lang: 0x372, region: 0x10c}, + 202: {lang: 0x420, region: 0x97}, + 204: {lang: 0x4ff, region: 0x15e}, + 205: {lang: 0x3f0, region: 0x99}, + 206: {lang: 0x45, region: 0x135}, + 207: {lang: 0x139, region: 0x7b}, + 208: {lang: 0x3e9, region: 0x99}, + 210: {lang: 0x3e9, region: 0x99}, + 211: {lang: 0x3fa, region: 0x99}, + 212: {lang: 0x40c, region: 0xb3}, + 215: {lang: 0x433, region: 0x99}, + 216: {lang: 0xef, region: 0xc5}, + 217: {lang: 0x43e, region: 0x95}, + 218: {lang: 0x44d, region: 0x35}, + 219: {lang: 0x44e, region: 0x9b}, + 223: {lang: 0x45a, region: 0xe7}, + 224: {lang: 0x11a, region: 0x99}, + 225: {lang: 0x45e, region: 0x53}, + 226: {lang: 0x232, region: 0x53}, + 227: {lang: 0x450, region: 0x99}, + 228: {lang: 0x4a5, region: 0x53}, + 229: {lang: 0x9f, region: 0x13e}, + 230: {lang: 0x461, region: 0x99}, + 232: {lang: 0x528, region: 0xba}, + 233: {lang: 0x153, region: 0xe7}, + 234: {lang: 0x128, region: 0xcd}, + 235: {lang: 0x46b, region: 0x123}, + 236: {lang: 0xa9, region: 0x53}, + 237: {lang: 0x2ce, region: 0x99}, + 240: {lang: 0x4ad, region: 0x11c}, + 241: {lang: 0x4be, region: 0xb4}, + 244: {lang: 0x1ce, region: 0x99}, + 247: {lang: 0x3a9, region: 0x9c}, + 248: {lang: 0x22, region: 0x9b}, + 250: {lang: 0x1ea, region: 0x53}, + 251: {lang: 0xef, region: 0xc5}, +} + +type likelyScriptRegion struct { + region uint16 + script uint16 + flags uint8 +} + +// likelyLang is a lookup table, indexed by langID, for the most likely +// scripts and regions given incomplete information. If more entries exist for a +// given language, region and script are the index and size respectively +// of the list in likelyLangList. +// Size: 7980 bytes, 1330 elements +var likelyLang = [1330]likelyScriptRegion{ + 0: {region: 0x135, script: 0x5a, flags: 0x0}, + 1: {region: 0x6f, script: 0x5a, flags: 0x0}, + 2: {region: 0x165, script: 0x5a, flags: 0x0}, + 3: {region: 0x165, script: 0x5a, flags: 0x0}, + 4: {region: 0x165, script: 0x5a, flags: 0x0}, + 5: {region: 0x7d, script: 0x20, flags: 0x0}, + 6: {region: 0x165, script: 0x5a, flags: 0x0}, + 7: {region: 0x165, script: 0x20, flags: 0x0}, + 8: {region: 0x80, script: 0x5a, flags: 0x0}, + 9: {region: 0x165, script: 0x5a, flags: 0x0}, + 10: {region: 0x165, script: 0x5a, flags: 0x0}, + 11: {region: 0x165, script: 0x5a, flags: 0x0}, + 12: {region: 0x95, script: 0x5a, flags: 0x0}, + 13: {region: 0x131, script: 0x5a, flags: 0x0}, + 14: {region: 0x80, script: 0x5a, flags: 0x0}, + 15: {region: 0x165, script: 0x5a, flags: 0x0}, + 16: {region: 0x165, script: 0x5a, flags: 0x0}, + 17: {region: 0x106, script: 0x20, flags: 0x0}, + 18: {region: 0x165, script: 0x5a, flags: 0x0}, + 19: {region: 0x9c, script: 0x9, flags: 0x0}, + 20: {region: 0x128, script: 0x5, flags: 0x0}, + 21: {region: 0x165, script: 0x5a, flags: 0x0}, + 22: {region: 0x161, script: 0x5a, flags: 0x0}, + 23: {region: 0x165, script: 0x5a, flags: 0x0}, + 24: {region: 0x165, script: 0x5a, flags: 0x0}, + 25: {region: 0x165, script: 0x5a, flags: 0x0}, + 26: {region: 0x165, script: 0x5a, flags: 0x0}, + 27: {region: 0x165, script: 0x5a, flags: 0x0}, + 28: {region: 0x52, script: 0x5a, flags: 0x0}, + 29: {region: 0x165, script: 0x5a, flags: 0x0}, + 30: {region: 0x165, script: 0x5a, flags: 0x0}, + 31: {region: 0x99, script: 0x4, flags: 0x0}, + 32: {region: 0x165, script: 0x5a, flags: 0x0}, + 33: {region: 0x80, script: 0x5a, flags: 0x0}, + 34: {region: 0x9b, script: 0xf8, flags: 0x0}, + 35: {region: 0x165, script: 0x5a, flags: 0x0}, + 36: {region: 0x165, script: 0x5a, flags: 0x0}, + 37: {region: 0x14d, script: 0x5a, flags: 0x0}, + 38: {region: 0x106, script: 0x20, flags: 0x0}, + 39: {region: 0x6f, script: 0x2c, flags: 0x0}, + 40: {region: 0x165, script: 0x5a, flags: 0x0}, + 41: {region: 0x165, script: 0x5a, flags: 0x0}, + 42: {region: 0xd6, script: 0x5a, flags: 0x0}, + 43: {region: 0x165, script: 0x5a, flags: 0x0}, + 45: {region: 0x165, script: 0x5a, flags: 0x0}, + 46: {region: 0x165, script: 0x5a, flags: 0x0}, + 47: {region: 0x165, script: 0x5a, flags: 0x0}, + 48: {region: 0x165, script: 0x5a, flags: 0x0}, + 49: {region: 0x165, script: 0x5a, flags: 0x0}, + 50: {region: 0x165, script: 0x5a, flags: 0x0}, + 51: {region: 0x95, script: 0x5a, flags: 0x0}, + 52: {region: 0x165, script: 0x5, flags: 0x0}, + 53: {region: 0x122, script: 0x5, flags: 0x0}, + 54: {region: 0x165, script: 0x5a, flags: 0x0}, + 55: {region: 0x165, script: 0x5a, flags: 0x0}, + 56: {region: 0x165, script: 0x5a, flags: 0x0}, + 57: {region: 0x165, script: 0x5a, flags: 0x0}, + 58: {region: 0x6b, script: 0x5, flags: 0x0}, + 59: {region: 0x0, script: 0x3, flags: 0x1}, + 60: {region: 0x165, script: 0x5a, flags: 0x0}, + 61: {region: 0x51, script: 0x5a, flags: 0x0}, + 62: {region: 0x3f, script: 0x5a, flags: 0x0}, + 63: {region: 0x67, script: 0x5, flags: 0x0}, + 65: {region: 0xba, script: 0x5, flags: 0x0}, + 66: {region: 0x6b, script: 0x5, flags: 0x0}, + 67: {region: 0x99, script: 0xe, flags: 0x0}, + 68: {region: 0x12f, script: 0x5a, flags: 0x0}, + 69: {region: 0x135, script: 0xce, flags: 0x0}, + 70: {region: 0x165, script: 0x5a, flags: 0x0}, + 71: {region: 0x165, script: 0x5a, flags: 0x0}, + 72: {region: 0x6e, script: 0x5a, flags: 0x0}, + 73: {region: 0x165, script: 0x5a, flags: 0x0}, + 74: {region: 0x165, script: 0x5a, flags: 0x0}, + 75: {region: 0x49, script: 0x5a, flags: 0x0}, + 76: {region: 0x165, script: 0x5a, flags: 0x0}, + 77: {region: 0x106, script: 0x20, flags: 0x0}, + 78: {region: 0x165, script: 0x5, flags: 0x0}, + 79: {region: 0x165, script: 0x5a, flags: 0x0}, + 80: {region: 0x165, script: 0x5a, flags: 0x0}, + 81: {region: 0x165, script: 0x5a, flags: 0x0}, + 82: {region: 0x99, script: 0x22, flags: 0x0}, + 83: {region: 0x165, script: 0x5a, flags: 0x0}, + 84: {region: 0x165, script: 0x5a, flags: 0x0}, + 85: {region: 0x165, script: 0x5a, flags: 0x0}, + 86: {region: 0x3f, script: 0x5a, flags: 0x0}, + 87: {region: 0x165, script: 0x5a, flags: 0x0}, + 88: {region: 0x3, script: 0x5, flags: 0x1}, + 89: {region: 0x106, script: 0x20, flags: 0x0}, + 90: {region: 0xe8, script: 0x5, flags: 0x0}, + 91: {region: 0x95, script: 0x5a, flags: 0x0}, + 92: {region: 0xdb, script: 0x22, flags: 0x0}, + 93: {region: 0x2e, script: 0x5a, flags: 0x0}, + 94: {region: 0x52, script: 0x5a, flags: 0x0}, + 95: {region: 0x165, script: 0x5a, flags: 0x0}, + 96: {region: 0x52, script: 0xb, flags: 0x0}, + 97: {region: 0x165, script: 0x5a, flags: 0x0}, + 98: {region: 0x165, script: 0x5a, flags: 0x0}, + 99: {region: 0x95, script: 0x5a, flags: 0x0}, + 100: {region: 0x165, script: 0x5a, flags: 0x0}, + 101: {region: 0x52, script: 0x5a, flags: 0x0}, + 102: {region: 0x165, script: 0x5a, flags: 0x0}, + 103: {region: 0x165, script: 0x5a, flags: 0x0}, + 104: {region: 0x165, script: 0x5a, flags: 0x0}, + 105: {region: 0x165, script: 0x5a, flags: 0x0}, + 106: {region: 0x4f, script: 0x5a, flags: 0x0}, + 107: {region: 0x165, script: 0x5a, flags: 0x0}, + 108: {region: 0x165, script: 0x5a, flags: 0x0}, + 109: {region: 0x165, script: 0x5a, flags: 0x0}, + 110: {region: 0x165, script: 0x2c, flags: 0x0}, + 111: {region: 0x165, script: 0x5a, flags: 0x0}, + 112: {region: 0x165, script: 0x5a, flags: 0x0}, + 113: {region: 0x47, script: 0x20, flags: 0x0}, + 114: {region: 0x165, script: 0x5a, flags: 0x0}, + 115: {region: 0x165, script: 0x5a, flags: 0x0}, + 116: {region: 0x10b, script: 0x5, flags: 0x0}, + 117: {region: 0x162, script: 0x5a, flags: 0x0}, + 118: {region: 0x165, script: 0x5a, flags: 0x0}, + 119: {region: 0x95, script: 0x5a, flags: 0x0}, + 120: {region: 0x165, script: 0x5a, flags: 0x0}, + 121: {region: 0x12f, script: 0x5a, flags: 0x0}, + 122: {region: 0x52, script: 0x5a, flags: 0x0}, + 123: {region: 0x99, script: 0xe3, flags: 0x0}, + 124: {region: 0xe8, script: 0x5, flags: 0x0}, + 125: {region: 0x99, script: 0x22, flags: 0x0}, + 126: {region: 0x38, script: 0x20, flags: 0x0}, + 127: {region: 0x99, script: 0x22, flags: 0x0}, + 128: {region: 0xe8, script: 0x5, flags: 0x0}, + 129: {region: 0x12b, script: 0x34, flags: 0x0}, + 131: {region: 0x99, script: 0x22, flags: 0x0}, + 132: {region: 0x165, script: 0x5a, flags: 0x0}, + 133: {region: 0x99, script: 0x22, flags: 0x0}, + 134: {region: 0xe7, script: 0x5a, flags: 0x0}, + 135: {region: 0x165, script: 0x5a, flags: 0x0}, + 136: {region: 0x99, script: 0x22, flags: 0x0}, + 137: {region: 0x165, script: 0x5a, flags: 0x0}, + 138: {region: 0x13f, script: 0x5a, flags: 0x0}, + 139: {region: 0x165, script: 0x5a, flags: 0x0}, + 140: {region: 0x165, script: 0x5a, flags: 0x0}, + 141: {region: 0xe7, script: 0x5a, flags: 0x0}, + 142: {region: 0x165, script: 0x5a, flags: 0x0}, + 143: {region: 0xd6, script: 0x5a, flags: 0x0}, + 144: {region: 0x165, script: 0x5a, flags: 0x0}, + 145: {region: 0x165, script: 0x5a, flags: 0x0}, + 146: {region: 0x165, script: 0x5a, flags: 0x0}, + 147: {region: 0x165, script: 0x2c, flags: 0x0}, + 148: {region: 0x99, script: 0x22, flags: 0x0}, + 149: {region: 0x95, script: 0x5a, flags: 0x0}, + 150: {region: 0x165, script: 0x5a, flags: 0x0}, + 151: {region: 0x165, script: 0x5a, flags: 0x0}, + 152: {region: 0x114, script: 0x5a, flags: 0x0}, + 153: {region: 0x165, script: 0x5a, flags: 0x0}, + 154: {region: 0x165, script: 0x5a, flags: 0x0}, + 155: {region: 0x52, script: 0x5a, flags: 0x0}, + 156: {region: 0x165, script: 0x5a, flags: 0x0}, + 157: {region: 0xe7, script: 0x5a, flags: 0x0}, + 158: {region: 0x165, script: 0x5a, flags: 0x0}, + 159: {region: 0x13e, script: 0xe5, flags: 0x0}, + 160: {region: 0xc3, script: 0x5a, flags: 0x0}, + 161: {region: 0x165, script: 0x5a, flags: 0x0}, + 162: {region: 0x165, script: 0x5a, flags: 0x0}, + 163: {region: 0xc3, script: 0x5a, flags: 0x0}, + 164: {region: 0x165, script: 0x5a, flags: 0x0}, + 165: {region: 0x35, script: 0xe, flags: 0x0}, + 166: {region: 0x165, script: 0x5a, flags: 0x0}, + 167: {region: 0x165, script: 0x5a, flags: 0x0}, + 168: {region: 0x165, script: 0x5a, flags: 0x0}, + 169: {region: 0x53, script: 0xec, flags: 0x0}, + 170: {region: 0x165, script: 0x5a, flags: 0x0}, + 171: {region: 0x165, script: 0x5a, flags: 0x0}, + 172: {region: 0x165, script: 0x5a, flags: 0x0}, + 173: {region: 0x99, script: 0xe, flags: 0x0}, + 174: {region: 0x165, script: 0x5a, flags: 0x0}, + 175: {region: 0x9c, script: 0x5, flags: 0x0}, + 176: {region: 0x165, script: 0x5a, flags: 0x0}, + 177: {region: 0x4f, script: 0x5a, flags: 0x0}, + 178: {region: 0x78, script: 0x5a, flags: 0x0}, + 179: {region: 0x99, script: 0x22, flags: 0x0}, + 180: {region: 0xe8, script: 0x5, flags: 0x0}, + 181: {region: 0x99, script: 0x22, flags: 0x0}, + 182: {region: 0x165, script: 0x5a, flags: 0x0}, + 183: {region: 0x33, script: 0x5a, flags: 0x0}, + 184: {region: 0x165, script: 0x5a, flags: 0x0}, + 185: {region: 0xb4, script: 0xc, flags: 0x0}, + 186: {region: 0x52, script: 0x5a, flags: 0x0}, + 187: {region: 0x165, script: 0x2c, flags: 0x0}, + 188: {region: 0xe7, script: 0x5a, flags: 0x0}, + 189: {region: 0x165, script: 0x5a, flags: 0x0}, + 190: {region: 0xe8, script: 0x22, flags: 0x0}, + 191: {region: 0x106, script: 0x20, flags: 0x0}, + 192: {region: 0x15f, script: 0x5a, flags: 0x0}, + 193: {region: 0x165, script: 0x5a, flags: 0x0}, + 194: {region: 0x95, script: 0x5a, flags: 0x0}, + 195: {region: 0x165, script: 0x5a, flags: 0x0}, + 196: {region: 0x52, script: 0x5a, flags: 0x0}, + 197: {region: 0x165, script: 0x5a, flags: 0x0}, + 198: {region: 0x165, script: 0x5a, flags: 0x0}, + 199: {region: 0x165, script: 0x5a, flags: 0x0}, + 200: {region: 0x86, script: 0x5a, flags: 0x0}, + 201: {region: 0x165, script: 0x5a, flags: 0x0}, + 202: {region: 0x165, script: 0x5a, flags: 0x0}, + 203: {region: 0x165, script: 0x5a, flags: 0x0}, + 204: {region: 0x165, script: 0x5a, flags: 0x0}, + 205: {region: 0x6d, script: 0x2c, flags: 0x0}, + 206: {region: 0x165, script: 0x5a, flags: 0x0}, + 207: {region: 0x165, script: 0x5a, flags: 0x0}, + 208: {region: 0x52, script: 0x5a, flags: 0x0}, + 209: {region: 0x165, script: 0x5a, flags: 0x0}, + 210: {region: 0x165, script: 0x5a, flags: 0x0}, + 211: {region: 0xc3, script: 0x5a, flags: 0x0}, + 212: {region: 0x165, script: 0x5a, flags: 0x0}, + 213: {region: 0x165, script: 0x5a, flags: 0x0}, + 214: {region: 0x165, script: 0x5a, flags: 0x0}, + 215: {region: 0x6e, script: 0x5a, flags: 0x0}, + 216: {region: 0x165, script: 0x5a, flags: 0x0}, + 217: {region: 0x165, script: 0x5a, flags: 0x0}, + 218: {region: 0xd6, script: 0x5a, flags: 0x0}, + 219: {region: 0x35, script: 0x16, flags: 0x0}, + 220: {region: 0x106, script: 0x20, flags: 0x0}, + 221: {region: 0xe7, script: 0x5a, flags: 0x0}, + 222: {region: 0x165, script: 0x5a, flags: 0x0}, + 223: {region: 0x131, script: 0x5a, flags: 0x0}, + 224: {region: 0x8a, script: 0x5a, flags: 0x0}, + 225: {region: 0x75, script: 0x5a, flags: 0x0}, + 226: {region: 0x106, script: 0x20, flags: 0x0}, + 227: {region: 0x135, script: 0x5a, flags: 0x0}, + 228: {region: 0x49, script: 0x5a, flags: 0x0}, + 229: {region: 0x135, script: 0x1a, flags: 0x0}, + 230: {region: 0xa6, script: 0x5, flags: 0x0}, + 231: {region: 0x13e, script: 0x19, flags: 0x0}, + 232: {region: 0x165, script: 0x5a, flags: 0x0}, + 233: {region: 0x9b, script: 0x5, flags: 0x0}, + 234: {region: 0x165, script: 0x5a, flags: 0x0}, + 235: {region: 0x165, script: 0x5a, flags: 0x0}, + 236: {region: 0x165, script: 0x5a, flags: 0x0}, + 237: {region: 0x165, script: 0x5a, flags: 0x0}, + 238: {region: 0x165, script: 0x5a, flags: 0x0}, + 239: {region: 0xc5, script: 0xd8, flags: 0x0}, + 240: {region: 0x78, script: 0x5a, flags: 0x0}, + 241: {region: 0x6b, script: 0x1d, flags: 0x0}, + 242: {region: 0xe7, script: 0x5a, flags: 0x0}, + 243: {region: 0x49, script: 0x17, flags: 0x0}, + 244: {region: 0x130, script: 0x20, flags: 0x0}, + 245: {region: 0x49, script: 0x17, flags: 0x0}, + 246: {region: 0x49, script: 0x17, flags: 0x0}, + 247: {region: 0x49, script: 0x17, flags: 0x0}, + 248: {region: 0x49, script: 0x17, flags: 0x0}, + 249: {region: 0x10a, script: 0x5a, flags: 0x0}, + 250: {region: 0x5e, script: 0x5a, flags: 0x0}, + 251: {region: 0xe9, script: 0x5a, flags: 0x0}, + 252: {region: 0x49, script: 0x17, flags: 0x0}, + 253: {region: 0xc4, script: 0x86, flags: 0x0}, + 254: {region: 0x8, script: 0x2, flags: 0x1}, + 255: {region: 0x106, script: 0x20, flags: 0x0}, + 256: {region: 0x7b, script: 0x5a, flags: 0x0}, + 257: {region: 0x63, script: 0x5a, flags: 0x0}, + 258: {region: 0x165, script: 0x5a, flags: 0x0}, + 259: {region: 0x165, script: 0x5a, flags: 0x0}, + 260: {region: 0x165, script: 0x5a, flags: 0x0}, + 261: {region: 0x165, script: 0x5a, flags: 0x0}, + 262: {region: 0x135, script: 0x5a, flags: 0x0}, + 263: {region: 0x106, script: 0x20, flags: 0x0}, + 264: {region: 0xa4, script: 0x5a, flags: 0x0}, + 265: {region: 0x165, script: 0x5a, flags: 0x0}, + 266: {region: 0x165, script: 0x5a, flags: 0x0}, + 267: {region: 0x99, script: 0x5, flags: 0x0}, + 268: {region: 0x165, script: 0x5a, flags: 0x0}, + 269: {region: 0x60, script: 0x5a, flags: 0x0}, + 270: {region: 0x165, script: 0x5a, flags: 0x0}, + 271: {region: 0x49, script: 0x5a, flags: 0x0}, + 272: {region: 0x165, script: 0x5a, flags: 0x0}, + 273: {region: 0x165, script: 0x5a, flags: 0x0}, + 274: {region: 0x165, script: 0x5a, flags: 0x0}, + 275: {region: 0x165, script: 0x5, flags: 0x0}, + 276: {region: 0x49, script: 0x5a, flags: 0x0}, + 277: {region: 0x165, script: 0x5a, flags: 0x0}, + 278: {region: 0x165, script: 0x5a, flags: 0x0}, + 279: {region: 0xd4, script: 0x5a, flags: 0x0}, + 280: {region: 0x4f, script: 0x5a, flags: 0x0}, + 281: {region: 0x165, script: 0x5a, flags: 0x0}, + 282: {region: 0x99, script: 0x5, flags: 0x0}, + 283: {region: 0x165, script: 0x5a, flags: 0x0}, + 284: {region: 0x165, script: 0x5a, flags: 0x0}, + 285: {region: 0x165, script: 0x5a, flags: 0x0}, + 286: {region: 0x165, script: 0x2c, flags: 0x0}, + 287: {region: 0x60, script: 0x5a, flags: 0x0}, + 288: {region: 0xc3, script: 0x5a, flags: 0x0}, + 289: {region: 0xd0, script: 0x5a, flags: 0x0}, + 290: {region: 0x165, script: 0x5a, flags: 0x0}, + 291: {region: 0xdb, script: 0x22, flags: 0x0}, + 292: {region: 0x52, script: 0x5a, flags: 0x0}, + 293: {region: 0x165, script: 0x5a, flags: 0x0}, + 294: {region: 0x165, script: 0x5a, flags: 0x0}, + 295: {region: 0x165, script: 0x5a, flags: 0x0}, + 296: {region: 0xcd, script: 0xea, flags: 0x0}, + 297: {region: 0x165, script: 0x5a, flags: 0x0}, + 298: {region: 0x165, script: 0x5a, flags: 0x0}, + 299: {region: 0x114, script: 0x5a, flags: 0x0}, + 300: {region: 0x37, script: 0x5a, flags: 0x0}, + 301: {region: 0x43, script: 0xec, flags: 0x0}, + 302: {region: 0x165, script: 0x5a, flags: 0x0}, + 303: {region: 0xa4, script: 0x5a, flags: 0x0}, + 304: {region: 0x80, script: 0x5a, flags: 0x0}, + 305: {region: 0xd6, script: 0x5a, flags: 0x0}, + 306: {region: 0x9e, script: 0x5a, flags: 0x0}, + 307: {region: 0x6b, script: 0x29, flags: 0x0}, + 308: {region: 0x165, script: 0x5a, flags: 0x0}, + 309: {region: 0xc4, script: 0x4b, flags: 0x0}, + 310: {region: 0x87, script: 0x34, flags: 0x0}, + 311: {region: 0x165, script: 0x5a, flags: 0x0}, + 312: {region: 0x165, script: 0x5a, flags: 0x0}, + 313: {region: 0xa, script: 0x2, flags: 0x1}, + 314: {region: 0x165, script: 0x5a, flags: 0x0}, + 315: {region: 0x165, script: 0x5a, flags: 0x0}, + 316: {region: 0x1, script: 0x5a, flags: 0x0}, + 317: {region: 0x165, script: 0x5a, flags: 0x0}, + 318: {region: 0x6e, script: 0x5a, flags: 0x0}, + 319: {region: 0x135, script: 0x5a, flags: 0x0}, + 320: {region: 0x6a, script: 0x5a, flags: 0x0}, + 321: {region: 0x165, script: 0x5a, flags: 0x0}, + 322: {region: 0x9e, script: 0x46, flags: 0x0}, + 323: {region: 0x165, script: 0x5a, flags: 0x0}, + 324: {region: 0x165, script: 0x5a, flags: 0x0}, + 325: {region: 0x6e, script: 0x5a, flags: 0x0}, + 326: {region: 0x52, script: 0x5a, flags: 0x0}, + 327: {region: 0x6e, script: 0x5a, flags: 0x0}, + 328: {region: 0x9c, script: 0x5, flags: 0x0}, + 329: {region: 0x165, script: 0x5a, flags: 0x0}, + 330: {region: 0x165, script: 0x5a, flags: 0x0}, + 331: {region: 0x165, script: 0x5a, flags: 0x0}, + 332: {region: 0x165, script: 0x5a, flags: 0x0}, + 333: {region: 0x86, script: 0x5a, flags: 0x0}, + 334: {region: 0xc, script: 0x2, flags: 0x1}, + 335: {region: 0x165, script: 0x5a, flags: 0x0}, + 336: {region: 0xc3, script: 0x5a, flags: 0x0}, + 337: {region: 0x72, script: 0x5a, flags: 0x0}, + 338: {region: 0x10b, script: 0x5, flags: 0x0}, + 339: {region: 0xe7, script: 0x5a, flags: 0x0}, + 340: {region: 0x10c, script: 0x5a, flags: 0x0}, + 341: {region: 0x73, script: 0x5a, flags: 0x0}, + 342: {region: 0x165, script: 0x5a, flags: 0x0}, + 343: {region: 0x165, script: 0x5a, flags: 0x0}, + 344: {region: 0x76, script: 0x5a, flags: 0x0}, + 345: {region: 0x165, script: 0x5a, flags: 0x0}, + 346: {region: 0x3b, script: 0x5a, flags: 0x0}, + 347: {region: 0x165, script: 0x5a, flags: 0x0}, + 348: {region: 0x165, script: 0x5a, flags: 0x0}, + 349: {region: 0x165, script: 0x5a, flags: 0x0}, + 350: {region: 0x78, script: 0x5a, flags: 0x0}, + 351: {region: 0x135, script: 0x5a, flags: 0x0}, + 352: {region: 0x78, script: 0x5a, flags: 0x0}, + 353: {region: 0x60, script: 0x5a, flags: 0x0}, + 354: {region: 0x60, script: 0x5a, flags: 0x0}, + 355: {region: 0x52, script: 0x5, flags: 0x0}, + 356: {region: 0x140, script: 0x5a, flags: 0x0}, + 357: {region: 0x165, script: 0x5a, flags: 0x0}, + 358: {region: 0x84, script: 0x5a, flags: 0x0}, + 359: {region: 0x165, script: 0x5a, flags: 0x0}, + 360: {region: 0xd4, script: 0x5a, flags: 0x0}, + 361: {region: 0x9e, script: 0x5a, flags: 0x0}, + 362: {region: 0xd6, script: 0x5a, flags: 0x0}, + 363: {region: 0x165, script: 0x5a, flags: 0x0}, + 364: {region: 0x10b, script: 0x5a, flags: 0x0}, + 365: {region: 0xd9, script: 0x5a, flags: 0x0}, + 366: {region: 0x96, script: 0x5a, flags: 0x0}, + 367: {region: 0x80, script: 0x5a, flags: 0x0}, + 368: {region: 0x165, script: 0x5a, flags: 0x0}, + 369: {region: 0xbc, script: 0x5a, flags: 0x0}, + 370: {region: 0x165, script: 0x5a, flags: 0x0}, + 371: {region: 0x165, script: 0x5a, flags: 0x0}, + 372: {region: 0x165, script: 0x5a, flags: 0x0}, + 373: {region: 0x53, script: 0x3b, flags: 0x0}, + 374: {region: 0x165, script: 0x5a, flags: 0x0}, + 375: {region: 0x95, script: 0x5a, flags: 0x0}, + 376: {region: 0x165, script: 0x5a, flags: 0x0}, + 377: {region: 0x165, script: 0x5a, flags: 0x0}, + 378: {region: 0x99, script: 0x22, flags: 0x0}, + 379: {region: 0x165, script: 0x5a, flags: 0x0}, + 380: {region: 0x9c, script: 0x5, flags: 0x0}, + 381: {region: 0x7e, script: 0x5a, flags: 0x0}, + 382: {region: 0x7b, script: 0x5a, flags: 0x0}, + 383: {region: 0x165, script: 0x5a, flags: 0x0}, + 384: {region: 0x165, script: 0x5a, flags: 0x0}, + 385: {region: 0x165, script: 0x5a, flags: 0x0}, + 386: {region: 0x165, script: 0x5a, flags: 0x0}, + 387: {region: 0x165, script: 0x5a, flags: 0x0}, + 388: {region: 0x165, script: 0x5a, flags: 0x0}, + 389: {region: 0x6f, script: 0x2c, flags: 0x0}, + 390: {region: 0x165, script: 0x5a, flags: 0x0}, + 391: {region: 0xdb, script: 0x22, flags: 0x0}, + 392: {region: 0x165, script: 0x5a, flags: 0x0}, + 393: {region: 0xa7, script: 0x5a, flags: 0x0}, + 394: {region: 0x165, script: 0x5a, flags: 0x0}, + 395: {region: 0xe8, script: 0x5, flags: 0x0}, + 396: {region: 0x165, script: 0x5a, flags: 0x0}, + 397: {region: 0xe8, script: 0x5, flags: 0x0}, + 398: {region: 0x165, script: 0x5a, flags: 0x0}, + 399: {region: 0x165, script: 0x5a, flags: 0x0}, + 400: {region: 0x6e, script: 0x5a, flags: 0x0}, + 401: {region: 0x9c, script: 0x5, flags: 0x0}, + 402: {region: 0x165, script: 0x5a, flags: 0x0}, + 403: {region: 0x165, script: 0x2c, flags: 0x0}, + 404: {region: 0xf1, script: 0x5a, flags: 0x0}, + 405: {region: 0x165, script: 0x5a, flags: 0x0}, + 406: {region: 0x165, script: 0x5a, flags: 0x0}, + 407: {region: 0x165, script: 0x5a, flags: 0x0}, + 408: {region: 0x165, script: 0x2c, flags: 0x0}, + 409: {region: 0x165, script: 0x5a, flags: 0x0}, + 410: {region: 0x99, script: 0x22, flags: 0x0}, + 411: {region: 0x99, script: 0xe6, flags: 0x0}, + 412: {region: 0x95, script: 0x5a, flags: 0x0}, + 413: {region: 0xd9, script: 0x5a, flags: 0x0}, + 414: {region: 0x130, script: 0x32, flags: 0x0}, + 415: {region: 0x165, script: 0x5a, flags: 0x0}, + 416: {region: 0xe, script: 0x2, flags: 0x1}, + 417: {region: 0x99, script: 0xe, flags: 0x0}, + 418: {region: 0x165, script: 0x5a, flags: 0x0}, + 419: {region: 0x4e, script: 0x5a, flags: 0x0}, + 420: {region: 0x99, script: 0x35, flags: 0x0}, + 421: {region: 0x41, script: 0x5a, flags: 0x0}, + 422: {region: 0x54, script: 0x5a, flags: 0x0}, + 423: {region: 0x165, script: 0x5a, flags: 0x0}, + 424: {region: 0x80, script: 0x5a, flags: 0x0}, + 425: {region: 0x165, script: 0x5a, flags: 0x0}, + 426: {region: 0x165, script: 0x5a, flags: 0x0}, + 427: {region: 0xa4, script: 0x5a, flags: 0x0}, + 428: {region: 0x98, script: 0x5a, flags: 0x0}, + 429: {region: 0x165, script: 0x5a, flags: 0x0}, + 430: {region: 0xdb, script: 0x22, flags: 0x0}, + 431: {region: 0x165, script: 0x5a, flags: 0x0}, + 432: {region: 0x165, script: 0x5, flags: 0x0}, + 433: {region: 0x49, script: 0x5a, flags: 0x0}, + 434: {region: 0x165, script: 0x5, flags: 0x0}, + 435: {region: 0x165, script: 0x5a, flags: 0x0}, + 436: {region: 0x10, script: 0x3, flags: 0x1}, + 437: {region: 0x165, script: 0x5a, flags: 0x0}, + 438: {region: 0x53, script: 0x3b, flags: 0x0}, + 439: {region: 0x165, script: 0x5a, flags: 0x0}, + 440: {region: 0x135, script: 0x5a, flags: 0x0}, + 441: {region: 0x24, script: 0x5, flags: 0x0}, + 442: {region: 0x165, script: 0x5a, flags: 0x0}, + 443: {region: 0x165, script: 0x2c, flags: 0x0}, + 444: {region: 0x97, script: 0x3e, flags: 0x0}, + 445: {region: 0x165, script: 0x5a, flags: 0x0}, + 446: {region: 0x99, script: 0x22, flags: 0x0}, + 447: {region: 0x165, script: 0x5a, flags: 0x0}, + 448: {region: 0x73, script: 0x5a, flags: 0x0}, + 449: {region: 0x165, script: 0x5a, flags: 0x0}, + 450: {region: 0x165, script: 0x5a, flags: 0x0}, + 451: {region: 0xe7, script: 0x5a, flags: 0x0}, + 452: {region: 0x165, script: 0x5a, flags: 0x0}, + 453: {region: 0x12b, script: 0x40, flags: 0x0}, + 454: {region: 0x53, script: 0x90, flags: 0x0}, + 455: {region: 0x165, script: 0x5a, flags: 0x0}, + 456: {region: 0xe8, script: 0x5, flags: 0x0}, + 457: {region: 0x99, script: 0x22, flags: 0x0}, + 458: {region: 0xaf, script: 0x41, flags: 0x0}, + 459: {region: 0xe7, script: 0x5a, flags: 0x0}, + 460: {region: 0xe8, script: 0x5, flags: 0x0}, + 461: {region: 0xe6, script: 0x5a, flags: 0x0}, + 462: {region: 0x99, script: 0x22, flags: 0x0}, + 463: {region: 0x99, script: 0x22, flags: 0x0}, + 464: {region: 0x165, script: 0x5a, flags: 0x0}, + 465: {region: 0x90, script: 0x5a, flags: 0x0}, + 466: {region: 0x60, script: 0x5a, flags: 0x0}, + 467: {region: 0x53, script: 0x3b, flags: 0x0}, + 468: {region: 0x91, script: 0x5a, flags: 0x0}, + 469: {region: 0x92, script: 0x5a, flags: 0x0}, + 470: {region: 0x165, script: 0x5a, flags: 0x0}, + 471: {region: 0x28, script: 0x8, flags: 0x0}, + 472: {region: 0xd2, script: 0x5a, flags: 0x0}, + 473: {region: 0x78, script: 0x5a, flags: 0x0}, + 474: {region: 0x165, script: 0x5a, flags: 0x0}, + 475: {region: 0x165, script: 0x5a, flags: 0x0}, + 476: {region: 0xd0, script: 0x5a, flags: 0x0}, + 477: {region: 0xd6, script: 0x5a, flags: 0x0}, + 478: {region: 0x165, script: 0x5a, flags: 0x0}, + 479: {region: 0x165, script: 0x5a, flags: 0x0}, + 480: {region: 0x165, script: 0x5a, flags: 0x0}, + 481: {region: 0x95, script: 0x5a, flags: 0x0}, + 482: {region: 0x165, script: 0x5a, flags: 0x0}, + 483: {region: 0x165, script: 0x5a, flags: 0x0}, + 484: {region: 0x165, script: 0x5a, flags: 0x0}, + 486: {region: 0x122, script: 0x5a, flags: 0x0}, + 487: {region: 0xd6, script: 0x5a, flags: 0x0}, + 488: {region: 0x165, script: 0x5a, flags: 0x0}, + 489: {region: 0x165, script: 0x5a, flags: 0x0}, + 490: {region: 0x53, script: 0xfa, flags: 0x0}, + 491: {region: 0x165, script: 0x5a, flags: 0x0}, + 492: {region: 0x135, script: 0x5a, flags: 0x0}, + 493: {region: 0x165, script: 0x5a, flags: 0x0}, + 494: {region: 0x49, script: 0x5a, flags: 0x0}, + 495: {region: 0x165, script: 0x5a, flags: 0x0}, + 496: {region: 0x165, script: 0x5a, flags: 0x0}, + 497: {region: 0xe7, script: 0x5a, flags: 0x0}, + 498: {region: 0x165, script: 0x5a, flags: 0x0}, + 499: {region: 0x95, script: 0x5a, flags: 0x0}, + 500: {region: 0x106, script: 0x20, flags: 0x0}, + 501: {region: 0x1, script: 0x5a, flags: 0x0}, + 502: {region: 0x165, script: 0x5a, flags: 0x0}, + 503: {region: 0x165, script: 0x5a, flags: 0x0}, + 504: {region: 0x9d, script: 0x5a, flags: 0x0}, + 505: {region: 0x9e, script: 0x5a, flags: 0x0}, + 506: {region: 0x49, script: 0x17, flags: 0x0}, + 507: {region: 0x97, script: 0x3e, flags: 0x0}, + 508: {region: 0x165, script: 0x5a, flags: 0x0}, + 509: {region: 0x165, script: 0x5a, flags: 0x0}, + 510: {region: 0x106, script: 0x5a, flags: 0x0}, + 511: {region: 0x165, script: 0x5a, flags: 0x0}, + 512: {region: 0xa2, script: 0x49, flags: 0x0}, + 513: {region: 0x165, script: 0x5a, flags: 0x0}, + 514: {region: 0xa0, script: 0x5a, flags: 0x0}, + 515: {region: 0x1, script: 0x5a, flags: 0x0}, + 516: {region: 0x165, script: 0x5a, flags: 0x0}, + 517: {region: 0x165, script: 0x5a, flags: 0x0}, + 518: {region: 0x165, script: 0x5a, flags: 0x0}, + 519: {region: 0x52, script: 0x5a, flags: 0x0}, + 520: {region: 0x130, script: 0x3e, flags: 0x0}, + 521: {region: 0x165, script: 0x5a, flags: 0x0}, + 522: {region: 0x12f, script: 0x5a, flags: 0x0}, + 523: {region: 0xdb, script: 0x22, flags: 0x0}, + 524: {region: 0x165, script: 0x5a, flags: 0x0}, + 525: {region: 0x63, script: 0x5a, flags: 0x0}, + 526: {region: 0x95, script: 0x5a, flags: 0x0}, + 527: {region: 0x95, script: 0x5a, flags: 0x0}, + 528: {region: 0x7d, script: 0x2e, flags: 0x0}, + 529: {region: 0x137, script: 0x20, flags: 0x0}, + 530: {region: 0x67, script: 0x5a, flags: 0x0}, + 531: {region: 0xc4, script: 0x5a, flags: 0x0}, + 532: {region: 0x165, script: 0x5a, flags: 0x0}, + 533: {region: 0x165, script: 0x5a, flags: 0x0}, + 534: {region: 0xd6, script: 0x5a, flags: 0x0}, + 535: {region: 0xa4, script: 0x5a, flags: 0x0}, + 536: {region: 0xc3, script: 0x5a, flags: 0x0}, + 537: {region: 0x106, script: 0x20, flags: 0x0}, + 538: {region: 0x165, script: 0x5a, flags: 0x0}, + 539: {region: 0x165, script: 0x5a, flags: 0x0}, + 540: {region: 0x165, script: 0x5a, flags: 0x0}, + 541: {region: 0x165, script: 0x5a, flags: 0x0}, + 542: {region: 0xd4, script: 0x5, flags: 0x0}, + 543: {region: 0xd6, script: 0x5a, flags: 0x0}, + 544: {region: 0x164, script: 0x5a, flags: 0x0}, + 545: {region: 0x165, script: 0x5a, flags: 0x0}, + 546: {region: 0x165, script: 0x5a, flags: 0x0}, + 547: {region: 0x12f, script: 0x5a, flags: 0x0}, + 548: {region: 0x122, script: 0x5, flags: 0x0}, + 549: {region: 0x165, script: 0x5a, flags: 0x0}, + 550: {region: 0x123, script: 0xeb, flags: 0x0}, + 551: {region: 0x5a, script: 0x5a, flags: 0x0}, + 552: {region: 0x52, script: 0x5a, flags: 0x0}, + 553: {region: 0x165, script: 0x5a, flags: 0x0}, + 554: {region: 0x4f, script: 0x5a, flags: 0x0}, + 555: {region: 0x99, script: 0x22, flags: 0x0}, + 556: {region: 0x99, script: 0x22, flags: 0x0}, + 557: {region: 0x4b, script: 0x5a, flags: 0x0}, + 558: {region: 0x95, script: 0x5a, flags: 0x0}, + 559: {region: 0x165, script: 0x5a, flags: 0x0}, + 560: {region: 0x41, script: 0x5a, flags: 0x0}, + 561: {region: 0x99, script: 0x5a, flags: 0x0}, + 562: {region: 0x53, script: 0xe2, flags: 0x0}, + 563: {region: 0x99, script: 0x22, flags: 0x0}, + 564: {region: 0xc3, script: 0x5a, flags: 0x0}, + 565: {region: 0x165, script: 0x5a, flags: 0x0}, + 566: {region: 0x99, script: 0x75, flags: 0x0}, + 567: {region: 0xe8, script: 0x5, flags: 0x0}, + 568: {region: 0x165, script: 0x5a, flags: 0x0}, + 569: {region: 0xa4, script: 0x5a, flags: 0x0}, + 570: {region: 0x165, script: 0x5a, flags: 0x0}, + 571: {region: 0x12b, script: 0x5a, flags: 0x0}, + 572: {region: 0x165, script: 0x5a, flags: 0x0}, + 573: {region: 0xd2, script: 0x5a, flags: 0x0}, + 574: {region: 0x165, script: 0x5a, flags: 0x0}, + 575: {region: 0xaf, script: 0x57, flags: 0x0}, + 576: {region: 0x165, script: 0x5a, flags: 0x0}, + 577: {region: 0x165, script: 0x5a, flags: 0x0}, + 578: {region: 0x13, script: 0x6, flags: 0x1}, + 579: {region: 0x165, script: 0x5a, flags: 0x0}, + 580: {region: 0x52, script: 0x5a, flags: 0x0}, + 581: {region: 0x82, script: 0x5a, flags: 0x0}, + 582: {region: 0xa4, script: 0x5a, flags: 0x0}, + 583: {region: 0x165, script: 0x5a, flags: 0x0}, + 584: {region: 0x165, script: 0x5a, flags: 0x0}, + 585: {region: 0x165, script: 0x5a, flags: 0x0}, + 586: {region: 0xa6, script: 0x4e, flags: 0x0}, + 587: {region: 0x2a, script: 0x5a, flags: 0x0}, + 588: {region: 0x165, script: 0x5a, flags: 0x0}, + 589: {region: 0x165, script: 0x5a, flags: 0x0}, + 590: {region: 0x165, script: 0x5a, flags: 0x0}, + 591: {region: 0x165, script: 0x5a, flags: 0x0}, + 592: {region: 0x165, script: 0x5a, flags: 0x0}, + 593: {region: 0x99, script: 0x52, flags: 0x0}, + 594: {region: 0x8b, script: 0x5a, flags: 0x0}, + 595: {region: 0x165, script: 0x5a, flags: 0x0}, + 596: {region: 0xab, script: 0x53, flags: 0x0}, + 597: {region: 0x106, script: 0x20, flags: 0x0}, + 598: {region: 0x99, script: 0x22, flags: 0x0}, + 599: {region: 0x165, script: 0x5a, flags: 0x0}, + 600: {region: 0x75, script: 0x5a, flags: 0x0}, + 601: {region: 0x165, script: 0x5a, flags: 0x0}, + 602: {region: 0xb4, script: 0x5a, flags: 0x0}, + 603: {region: 0x165, script: 0x5a, flags: 0x0}, + 604: {region: 0x165, script: 0x5a, flags: 0x0}, + 605: {region: 0x165, script: 0x5a, flags: 0x0}, + 606: {region: 0x165, script: 0x5a, flags: 0x0}, + 607: {region: 0x165, script: 0x5a, flags: 0x0}, + 608: {region: 0x165, script: 0x5a, flags: 0x0}, + 609: {region: 0x165, script: 0x5a, flags: 0x0}, + 610: {region: 0x165, script: 0x2c, flags: 0x0}, + 611: {region: 0x165, script: 0x5a, flags: 0x0}, + 612: {region: 0x106, script: 0x20, flags: 0x0}, + 613: {region: 0x112, script: 0x5a, flags: 0x0}, + 614: {region: 0xe7, script: 0x5a, flags: 0x0}, + 615: {region: 0x106, script: 0x5a, flags: 0x0}, + 616: {region: 0x165, script: 0x5a, flags: 0x0}, + 617: {region: 0x99, script: 0x22, flags: 0x0}, + 618: {region: 0x99, script: 0x5, flags: 0x0}, + 619: {region: 0x12f, script: 0x5a, flags: 0x0}, + 620: {region: 0x165, script: 0x5a, flags: 0x0}, + 621: {region: 0x52, script: 0x5a, flags: 0x0}, + 622: {region: 0x60, script: 0x5a, flags: 0x0}, + 623: {region: 0x165, script: 0x5a, flags: 0x0}, + 624: {region: 0x165, script: 0x5a, flags: 0x0}, + 625: {region: 0x165, script: 0x2c, flags: 0x0}, + 626: {region: 0x165, script: 0x5a, flags: 0x0}, + 627: {region: 0x165, script: 0x5a, flags: 0x0}, + 628: {region: 0x19, script: 0x3, flags: 0x1}, + 629: {region: 0x165, script: 0x5a, flags: 0x0}, + 630: {region: 0x165, script: 0x5a, flags: 0x0}, + 631: {region: 0x165, script: 0x5a, flags: 0x0}, + 632: {region: 0x165, script: 0x5a, flags: 0x0}, + 633: {region: 0x106, script: 0x20, flags: 0x0}, + 634: {region: 0x165, script: 0x5a, flags: 0x0}, + 635: {region: 0x165, script: 0x5a, flags: 0x0}, + 636: {region: 0x165, script: 0x5a, flags: 0x0}, + 637: {region: 0x106, script: 0x20, flags: 0x0}, + 638: {region: 0x165, script: 0x5a, flags: 0x0}, + 639: {region: 0x95, script: 0x5a, flags: 0x0}, + 640: {region: 0xe8, script: 0x5, flags: 0x0}, + 641: {region: 0x7b, script: 0x5a, flags: 0x0}, + 642: {region: 0x165, script: 0x5a, flags: 0x0}, + 643: {region: 0x165, script: 0x5a, flags: 0x0}, + 644: {region: 0x165, script: 0x5a, flags: 0x0}, + 645: {region: 0x165, script: 0x2c, flags: 0x0}, + 646: {region: 0x123, script: 0xeb, flags: 0x0}, + 647: {region: 0xe8, script: 0x5, flags: 0x0}, + 648: {region: 0x165, script: 0x5a, flags: 0x0}, + 649: {region: 0x165, script: 0x5a, flags: 0x0}, + 650: {region: 0x1c, script: 0x5, flags: 0x1}, + 651: {region: 0x165, script: 0x5a, flags: 0x0}, + 652: {region: 0x165, script: 0x5a, flags: 0x0}, + 653: {region: 0x165, script: 0x5a, flags: 0x0}, + 654: {region: 0x138, script: 0x5a, flags: 0x0}, + 655: {region: 0x87, script: 0x5e, flags: 0x0}, + 656: {region: 0x97, script: 0x3e, flags: 0x0}, + 657: {region: 0x12f, script: 0x5a, flags: 0x0}, + 658: {region: 0xe8, script: 0x5, flags: 0x0}, + 659: {region: 0x131, script: 0x5a, flags: 0x0}, + 660: {region: 0x165, script: 0x5a, flags: 0x0}, + 661: {region: 0xb7, script: 0x5a, flags: 0x0}, + 662: {region: 0x106, script: 0x20, flags: 0x0}, + 663: {region: 0x165, script: 0x5a, flags: 0x0}, + 664: {region: 0x95, script: 0x5a, flags: 0x0}, + 665: {region: 0x165, script: 0x5a, flags: 0x0}, + 666: {region: 0x53, script: 0xeb, flags: 0x0}, + 667: {region: 0x165, script: 0x5a, flags: 0x0}, + 668: {region: 0x165, script: 0x5a, flags: 0x0}, + 669: {region: 0x165, script: 0x5a, flags: 0x0}, + 670: {region: 0x165, script: 0x5a, flags: 0x0}, + 671: {region: 0x99, script: 0x5c, flags: 0x0}, + 672: {region: 0x165, script: 0x5a, flags: 0x0}, + 673: {region: 0x165, script: 0x5a, flags: 0x0}, + 674: {region: 0x106, script: 0x20, flags: 0x0}, + 675: {region: 0x131, script: 0x5a, flags: 0x0}, + 676: {region: 0x165, script: 0x5a, flags: 0x0}, + 677: {region: 0xd9, script: 0x5a, flags: 0x0}, + 678: {region: 0x165, script: 0x5a, flags: 0x0}, + 679: {region: 0x165, script: 0x5a, flags: 0x0}, + 680: {region: 0x21, script: 0x2, flags: 0x1}, + 681: {region: 0x165, script: 0x5a, flags: 0x0}, + 682: {region: 0x165, script: 0x5a, flags: 0x0}, + 683: {region: 0x9e, script: 0x5a, flags: 0x0}, + 684: {region: 0x53, script: 0x60, flags: 0x0}, + 685: {region: 0x95, script: 0x5a, flags: 0x0}, + 686: {region: 0x9c, script: 0x5, flags: 0x0}, + 687: {region: 0x135, script: 0x5a, flags: 0x0}, + 688: {region: 0x165, script: 0x5a, flags: 0x0}, + 689: {region: 0x165, script: 0x5a, flags: 0x0}, + 690: {region: 0x99, script: 0xe6, flags: 0x0}, + 691: {region: 0x9e, script: 0x5a, flags: 0x0}, + 692: {region: 0x165, script: 0x5a, flags: 0x0}, + 693: {region: 0x4b, script: 0x5a, flags: 0x0}, + 694: {region: 0x165, script: 0x5a, flags: 0x0}, + 695: {region: 0x165, script: 0x5a, flags: 0x0}, + 696: {region: 0xaf, script: 0x57, flags: 0x0}, + 697: {region: 0x165, script: 0x5a, flags: 0x0}, + 698: {region: 0x165, script: 0x5a, flags: 0x0}, + 699: {region: 0x4b, script: 0x5a, flags: 0x0}, + 700: {region: 0x165, script: 0x5a, flags: 0x0}, + 701: {region: 0x165, script: 0x5a, flags: 0x0}, + 702: {region: 0x162, script: 0x5a, flags: 0x0}, + 703: {region: 0x9c, script: 0x5, flags: 0x0}, + 704: {region: 0xb6, script: 0x5a, flags: 0x0}, + 705: {region: 0xb8, script: 0x5a, flags: 0x0}, + 706: {region: 0x4b, script: 0x5a, flags: 0x0}, + 707: {region: 0x4b, script: 0x5a, flags: 0x0}, + 708: {region: 0xa4, script: 0x5a, flags: 0x0}, + 709: {region: 0xa4, script: 0x5a, flags: 0x0}, + 710: {region: 0x9c, script: 0x5, flags: 0x0}, + 711: {region: 0xb8, script: 0x5a, flags: 0x0}, + 712: {region: 0x123, script: 0xeb, flags: 0x0}, + 713: {region: 0x53, script: 0x3b, flags: 0x0}, + 714: {region: 0x12b, script: 0x5a, flags: 0x0}, + 715: {region: 0x95, script: 0x5a, flags: 0x0}, + 716: {region: 0x52, script: 0x5a, flags: 0x0}, + 717: {region: 0x99, script: 0x22, flags: 0x0}, + 718: {region: 0x99, script: 0x22, flags: 0x0}, + 719: {region: 0x95, script: 0x5a, flags: 0x0}, + 720: {region: 0x23, script: 0x3, flags: 0x1}, + 721: {region: 0xa4, script: 0x5a, flags: 0x0}, + 722: {region: 0x165, script: 0x5a, flags: 0x0}, + 723: {region: 0xcf, script: 0x5a, flags: 0x0}, + 724: {region: 0x165, script: 0x5a, flags: 0x0}, + 725: {region: 0x165, script: 0x5a, flags: 0x0}, + 726: {region: 0x165, script: 0x5a, flags: 0x0}, + 727: {region: 0x165, script: 0x5a, flags: 0x0}, + 728: {region: 0x165, script: 0x5a, flags: 0x0}, + 729: {region: 0x165, script: 0x5a, flags: 0x0}, + 730: {region: 0x165, script: 0x5a, flags: 0x0}, + 731: {region: 0x165, script: 0x5a, flags: 0x0}, + 732: {region: 0x165, script: 0x5a, flags: 0x0}, + 733: {region: 0x165, script: 0x5a, flags: 0x0}, + 734: {region: 0x165, script: 0x5a, flags: 0x0}, + 735: {region: 0x165, script: 0x5, flags: 0x0}, + 736: {region: 0x106, script: 0x20, flags: 0x0}, + 737: {region: 0xe7, script: 0x5a, flags: 0x0}, + 738: {region: 0x165, script: 0x5a, flags: 0x0}, + 739: {region: 0x95, script: 0x5a, flags: 0x0}, + 740: {region: 0x165, script: 0x2c, flags: 0x0}, + 741: {region: 0x165, script: 0x5a, flags: 0x0}, + 742: {region: 0x165, script: 0x5a, flags: 0x0}, + 743: {region: 0x165, script: 0x5a, flags: 0x0}, + 744: {region: 0x112, script: 0x5a, flags: 0x0}, + 745: {region: 0xa4, script: 0x5a, flags: 0x0}, + 746: {region: 0x165, script: 0x5a, flags: 0x0}, + 747: {region: 0x165, script: 0x5a, flags: 0x0}, + 748: {region: 0x123, script: 0x5, flags: 0x0}, + 749: {region: 0xcc, script: 0x5a, flags: 0x0}, + 750: {region: 0x165, script: 0x5a, flags: 0x0}, + 751: {region: 0x165, script: 0x5a, flags: 0x0}, + 752: {region: 0x165, script: 0x5a, flags: 0x0}, + 753: {region: 0xbf, script: 0x5a, flags: 0x0}, + 754: {region: 0xd1, script: 0x5a, flags: 0x0}, + 755: {region: 0x165, script: 0x5a, flags: 0x0}, + 756: {region: 0x52, script: 0x5a, flags: 0x0}, + 757: {region: 0xdb, script: 0x22, flags: 0x0}, + 758: {region: 0x12f, script: 0x5a, flags: 0x0}, + 759: {region: 0xc0, script: 0x5a, flags: 0x0}, + 760: {region: 0x165, script: 0x5a, flags: 0x0}, + 761: {region: 0x165, script: 0x5a, flags: 0x0}, + 762: {region: 0xe0, script: 0x5a, flags: 0x0}, + 763: {region: 0x165, script: 0x5a, flags: 0x0}, + 764: {region: 0x95, script: 0x5a, flags: 0x0}, + 765: {region: 0x9b, script: 0x3d, flags: 0x0}, + 766: {region: 0x165, script: 0x5a, flags: 0x0}, + 767: {region: 0xc2, script: 0x20, flags: 0x0}, + 768: {region: 0x165, script: 0x5, flags: 0x0}, + 769: {region: 0x165, script: 0x5a, flags: 0x0}, + 770: {region: 0x165, script: 0x5a, flags: 0x0}, + 771: {region: 0x165, script: 0x5a, flags: 0x0}, + 772: {region: 0x99, script: 0x6e, flags: 0x0}, + 773: {region: 0x165, script: 0x5a, flags: 0x0}, + 774: {region: 0x165, script: 0x5a, flags: 0x0}, + 775: {region: 0x10b, script: 0x5a, flags: 0x0}, + 776: {region: 0x165, script: 0x5a, flags: 0x0}, + 777: {region: 0x165, script: 0x5a, flags: 0x0}, + 778: {region: 0x165, script: 0x5a, flags: 0x0}, + 779: {region: 0x26, script: 0x3, flags: 0x1}, + 780: {region: 0x165, script: 0x5a, flags: 0x0}, + 781: {region: 0x165, script: 0x5a, flags: 0x0}, + 782: {region: 0x99, script: 0xe, flags: 0x0}, + 783: {region: 0xc4, script: 0x75, flags: 0x0}, + 785: {region: 0x165, script: 0x5a, flags: 0x0}, + 786: {region: 0x49, script: 0x5a, flags: 0x0}, + 787: {region: 0x49, script: 0x5a, flags: 0x0}, + 788: {region: 0x37, script: 0x5a, flags: 0x0}, + 789: {region: 0x165, script: 0x5a, flags: 0x0}, + 790: {region: 0x165, script: 0x5a, flags: 0x0}, + 791: {region: 0x165, script: 0x5a, flags: 0x0}, + 792: {region: 0x165, script: 0x5a, flags: 0x0}, + 793: {region: 0x165, script: 0x5a, flags: 0x0}, + 794: {region: 0x165, script: 0x5a, flags: 0x0}, + 795: {region: 0x99, script: 0x22, flags: 0x0}, + 796: {region: 0xdb, script: 0x22, flags: 0x0}, + 797: {region: 0x106, script: 0x20, flags: 0x0}, + 798: {region: 0x35, script: 0x72, flags: 0x0}, + 799: {region: 0x29, script: 0x3, flags: 0x1}, + 800: {region: 0xcb, script: 0x5a, flags: 0x0}, + 801: {region: 0x165, script: 0x5a, flags: 0x0}, + 802: {region: 0x165, script: 0x5a, flags: 0x0}, + 803: {region: 0x165, script: 0x5a, flags: 0x0}, + 804: {region: 0x99, script: 0x22, flags: 0x0}, + 805: {region: 0x52, script: 0x5a, flags: 0x0}, + 807: {region: 0x165, script: 0x5a, flags: 0x0}, + 808: {region: 0x135, script: 0x5a, flags: 0x0}, + 809: {region: 0x165, script: 0x5a, flags: 0x0}, + 810: {region: 0x165, script: 0x5a, flags: 0x0}, + 811: {region: 0xe8, script: 0x5, flags: 0x0}, + 812: {region: 0xc3, script: 0x5a, flags: 0x0}, + 813: {region: 0x99, script: 0x22, flags: 0x0}, + 814: {region: 0x95, script: 0x5a, flags: 0x0}, + 815: {region: 0x164, script: 0x5a, flags: 0x0}, + 816: {region: 0x165, script: 0x5a, flags: 0x0}, + 817: {region: 0xc4, script: 0x75, flags: 0x0}, + 818: {region: 0x165, script: 0x5a, flags: 0x0}, + 819: {region: 0x165, script: 0x2c, flags: 0x0}, + 820: {region: 0x106, script: 0x20, flags: 0x0}, + 821: {region: 0x165, script: 0x5a, flags: 0x0}, + 822: {region: 0x131, script: 0x5a, flags: 0x0}, + 823: {region: 0x9c, script: 0x66, flags: 0x0}, + 824: {region: 0x165, script: 0x5a, flags: 0x0}, + 825: {region: 0x165, script: 0x5a, flags: 0x0}, + 826: {region: 0x9c, script: 0x5, flags: 0x0}, + 827: {region: 0x165, script: 0x5a, flags: 0x0}, + 828: {region: 0x165, script: 0x5a, flags: 0x0}, + 829: {region: 0x165, script: 0x5a, flags: 0x0}, + 830: {region: 0xdd, script: 0x5a, flags: 0x0}, + 831: {region: 0x165, script: 0x5a, flags: 0x0}, + 832: {region: 0x165, script: 0x5a, flags: 0x0}, + 834: {region: 0x165, script: 0x5a, flags: 0x0}, + 835: {region: 0x53, script: 0x3b, flags: 0x0}, + 836: {region: 0x9e, script: 0x5a, flags: 0x0}, + 837: {region: 0xd2, script: 0x5a, flags: 0x0}, + 838: {region: 0x165, script: 0x5a, flags: 0x0}, + 839: {region: 0xda, script: 0x5a, flags: 0x0}, + 840: {region: 0x165, script: 0x5a, flags: 0x0}, + 841: {region: 0x165, script: 0x5a, flags: 0x0}, + 842: {region: 0x165, script: 0x5a, flags: 0x0}, + 843: {region: 0xcf, script: 0x5a, flags: 0x0}, + 844: {region: 0x165, script: 0x5a, flags: 0x0}, + 845: {region: 0x165, script: 0x5a, flags: 0x0}, + 846: {region: 0x164, script: 0x5a, flags: 0x0}, + 847: {region: 0xd1, script: 0x5a, flags: 0x0}, + 848: {region: 0x60, script: 0x5a, flags: 0x0}, + 849: {region: 0xdb, script: 0x22, flags: 0x0}, + 850: {region: 0x165, script: 0x5a, flags: 0x0}, + 851: {region: 0xdb, script: 0x22, flags: 0x0}, + 852: {region: 0x165, script: 0x5a, flags: 0x0}, + 853: {region: 0x165, script: 0x5a, flags: 0x0}, + 854: {region: 0xd2, script: 0x5a, flags: 0x0}, + 855: {region: 0x165, script: 0x5a, flags: 0x0}, + 856: {region: 0x165, script: 0x5a, flags: 0x0}, + 857: {region: 0xd1, script: 0x5a, flags: 0x0}, + 858: {region: 0x165, script: 0x5a, flags: 0x0}, + 859: {region: 0xcf, script: 0x5a, flags: 0x0}, + 860: {region: 0xcf, script: 0x5a, flags: 0x0}, + 861: {region: 0x165, script: 0x5a, flags: 0x0}, + 862: {region: 0x165, script: 0x5a, flags: 0x0}, + 863: {region: 0x95, script: 0x5a, flags: 0x0}, + 864: {region: 0x165, script: 0x5a, flags: 0x0}, + 865: {region: 0xdf, script: 0x5a, flags: 0x0}, + 866: {region: 0x165, script: 0x5a, flags: 0x0}, + 867: {region: 0x165, script: 0x5a, flags: 0x0}, + 868: {region: 0x99, script: 0x5a, flags: 0x0}, + 869: {region: 0x165, script: 0x5a, flags: 0x0}, + 870: {region: 0x165, script: 0x5a, flags: 0x0}, + 871: {region: 0xd9, script: 0x5a, flags: 0x0}, + 872: {region: 0x52, script: 0x5a, flags: 0x0}, + 873: {region: 0x165, script: 0x5a, flags: 0x0}, + 874: {region: 0xda, script: 0x5a, flags: 0x0}, + 875: {region: 0x165, script: 0x5a, flags: 0x0}, + 876: {region: 0x52, script: 0x5a, flags: 0x0}, + 877: {region: 0x165, script: 0x5a, flags: 0x0}, + 878: {region: 0x165, script: 0x5a, flags: 0x0}, + 879: {region: 0xda, script: 0x5a, flags: 0x0}, + 880: {region: 0x123, script: 0x56, flags: 0x0}, + 881: {region: 0x99, script: 0x22, flags: 0x0}, + 882: {region: 0x10c, script: 0xc9, flags: 0x0}, + 883: {region: 0x165, script: 0x5a, flags: 0x0}, + 884: {region: 0x165, script: 0x5a, flags: 0x0}, + 885: {region: 0x84, script: 0x7c, flags: 0x0}, + 886: {region: 0x161, script: 0x5a, flags: 0x0}, + 887: {region: 0x165, script: 0x5a, flags: 0x0}, + 888: {region: 0x49, script: 0x17, flags: 0x0}, + 889: {region: 0x165, script: 0x5a, flags: 0x0}, + 890: {region: 0x161, script: 0x5a, flags: 0x0}, + 891: {region: 0x165, script: 0x5a, flags: 0x0}, + 892: {region: 0x165, script: 0x5a, flags: 0x0}, + 893: {region: 0x165, script: 0x5a, flags: 0x0}, + 894: {region: 0x165, script: 0x5a, flags: 0x0}, + 895: {region: 0x165, script: 0x5a, flags: 0x0}, + 896: {region: 0x117, script: 0x5a, flags: 0x0}, + 897: {region: 0x165, script: 0x5a, flags: 0x0}, + 898: {region: 0x165, script: 0x5a, flags: 0x0}, + 899: {region: 0x135, script: 0x5a, flags: 0x0}, + 900: {region: 0x165, script: 0x5a, flags: 0x0}, + 901: {region: 0x53, script: 0x5a, flags: 0x0}, + 902: {region: 0x165, script: 0x5a, flags: 0x0}, + 903: {region: 0xce, script: 0x5a, flags: 0x0}, + 904: {region: 0x12f, script: 0x5a, flags: 0x0}, + 905: {region: 0x131, script: 0x5a, flags: 0x0}, + 906: {region: 0x80, script: 0x5a, flags: 0x0}, + 907: {region: 0x78, script: 0x5a, flags: 0x0}, + 908: {region: 0x165, script: 0x5a, flags: 0x0}, + 910: {region: 0x165, script: 0x5a, flags: 0x0}, + 911: {region: 0x165, script: 0x5a, flags: 0x0}, + 912: {region: 0x6f, script: 0x5a, flags: 0x0}, + 913: {region: 0x165, script: 0x5a, flags: 0x0}, + 914: {region: 0x165, script: 0x5a, flags: 0x0}, + 915: {region: 0x165, script: 0x5a, flags: 0x0}, + 916: {region: 0x165, script: 0x5a, flags: 0x0}, + 917: {region: 0x99, script: 0x81, flags: 0x0}, + 918: {region: 0x165, script: 0x5a, flags: 0x0}, + 919: {region: 0x165, script: 0x5, flags: 0x0}, + 920: {region: 0x7d, script: 0x20, flags: 0x0}, + 921: {region: 0x135, script: 0x82, flags: 0x0}, + 922: {region: 0x165, script: 0x5, flags: 0x0}, + 923: {region: 0xc5, script: 0x80, flags: 0x0}, + 924: {region: 0x165, script: 0x5a, flags: 0x0}, + 925: {region: 0x2c, script: 0x3, flags: 0x1}, + 926: {region: 0xe7, script: 0x5a, flags: 0x0}, + 927: {region: 0x2f, script: 0x2, flags: 0x1}, + 928: {region: 0xe7, script: 0x5a, flags: 0x0}, + 929: {region: 0x30, script: 0x5a, flags: 0x0}, + 930: {region: 0xf0, script: 0x5a, flags: 0x0}, + 931: {region: 0x165, script: 0x5a, flags: 0x0}, + 932: {region: 0x78, script: 0x5a, flags: 0x0}, + 933: {region: 0xd6, script: 0x5a, flags: 0x0}, + 934: {region: 0x135, script: 0x5a, flags: 0x0}, + 935: {region: 0x49, script: 0x5a, flags: 0x0}, + 936: {region: 0x165, script: 0x5a, flags: 0x0}, + 937: {region: 0x9c, script: 0xf7, flags: 0x0}, + 938: {region: 0x165, script: 0x5a, flags: 0x0}, + 939: {region: 0x60, script: 0x5a, flags: 0x0}, + 940: {region: 0x165, script: 0x5, flags: 0x0}, + 941: {region: 0xb0, script: 0x8e, flags: 0x0}, + 943: {region: 0x165, script: 0x5a, flags: 0x0}, + 944: {region: 0x165, script: 0x5a, flags: 0x0}, + 945: {region: 0x99, script: 0x12, flags: 0x0}, + 946: {region: 0xa4, script: 0x5a, flags: 0x0}, + 947: {region: 0xe9, script: 0x5a, flags: 0x0}, + 948: {region: 0x165, script: 0x5a, flags: 0x0}, + 949: {region: 0x9e, script: 0x5a, flags: 0x0}, + 950: {region: 0x165, script: 0x5a, flags: 0x0}, + 951: {region: 0x165, script: 0x5a, flags: 0x0}, + 952: {region: 0x87, script: 0x34, flags: 0x0}, + 953: {region: 0x75, script: 0x5a, flags: 0x0}, + 954: {region: 0x165, script: 0x5a, flags: 0x0}, + 955: {region: 0xe8, script: 0x4d, flags: 0x0}, + 956: {region: 0x9c, script: 0x5, flags: 0x0}, + 957: {region: 0x1, script: 0x5a, flags: 0x0}, + 958: {region: 0x24, script: 0x5, flags: 0x0}, + 959: {region: 0x165, script: 0x5a, flags: 0x0}, + 960: {region: 0x41, script: 0x5a, flags: 0x0}, + 961: {region: 0x165, script: 0x5a, flags: 0x0}, + 962: {region: 0x7a, script: 0x5a, flags: 0x0}, + 963: {region: 0x165, script: 0x5a, flags: 0x0}, + 964: {region: 0xe4, script: 0x5a, flags: 0x0}, + 965: {region: 0x89, script: 0x5a, flags: 0x0}, + 966: {region: 0x69, script: 0x5a, flags: 0x0}, + 967: {region: 0x165, script: 0x5a, flags: 0x0}, + 968: {region: 0x99, script: 0x22, flags: 0x0}, + 969: {region: 0x165, script: 0x5a, flags: 0x0}, + 970: {region: 0x102, script: 0x5a, flags: 0x0}, + 971: {region: 0x95, script: 0x5a, flags: 0x0}, + 972: {region: 0x165, script: 0x5a, flags: 0x0}, + 973: {region: 0x165, script: 0x5a, flags: 0x0}, + 974: {region: 0x9e, script: 0x5a, flags: 0x0}, + 975: {region: 0x165, script: 0x5, flags: 0x0}, + 976: {region: 0x99, script: 0x5a, flags: 0x0}, + 977: {region: 0x31, script: 0x2, flags: 0x1}, + 978: {region: 0xdb, script: 0x22, flags: 0x0}, + 979: {region: 0x35, script: 0xe, flags: 0x0}, + 980: {region: 0x4e, script: 0x5a, flags: 0x0}, + 981: {region: 0x72, script: 0x5a, flags: 0x0}, + 982: {region: 0x4e, script: 0x5a, flags: 0x0}, + 983: {region: 0x9c, script: 0x5, flags: 0x0}, + 984: {region: 0x10c, script: 0x5a, flags: 0x0}, + 985: {region: 0x3a, script: 0x5a, flags: 0x0}, + 986: {region: 0x165, script: 0x5a, flags: 0x0}, + 987: {region: 0xd1, script: 0x5a, flags: 0x0}, + 988: {region: 0x104, script: 0x5a, flags: 0x0}, + 989: {region: 0x95, script: 0x5a, flags: 0x0}, + 990: {region: 0x12f, script: 0x5a, flags: 0x0}, + 991: {region: 0x165, script: 0x5a, flags: 0x0}, + 992: {region: 0x165, script: 0x5a, flags: 0x0}, + 993: {region: 0x73, script: 0x5a, flags: 0x0}, + 994: {region: 0x106, script: 0x20, flags: 0x0}, + 995: {region: 0x130, script: 0x20, flags: 0x0}, + 996: {region: 0x109, script: 0x5a, flags: 0x0}, + 997: {region: 0x107, script: 0x5a, flags: 0x0}, + 998: {region: 0x12f, script: 0x5a, flags: 0x0}, + 999: {region: 0x165, script: 0x5a, flags: 0x0}, + 1000: {region: 0xa2, script: 0x4c, flags: 0x0}, + 1001: {region: 0x99, script: 0x22, flags: 0x0}, + 1002: {region: 0x80, script: 0x5a, flags: 0x0}, + 1003: {region: 0x106, script: 0x20, flags: 0x0}, + 1004: {region: 0xa4, script: 0x5a, flags: 0x0}, + 1005: {region: 0x95, script: 0x5a, flags: 0x0}, + 1006: {region: 0x99, script: 0x5a, flags: 0x0}, + 1007: {region: 0x114, script: 0x5a, flags: 0x0}, + 1008: {region: 0x99, script: 0xcd, flags: 0x0}, + 1009: {region: 0x165, script: 0x5a, flags: 0x0}, + 1010: {region: 0x165, script: 0x5a, flags: 0x0}, + 1011: {region: 0x12f, script: 0x5a, flags: 0x0}, + 1012: {region: 0x9e, script: 0x5a, flags: 0x0}, + 1013: {region: 0x99, script: 0x22, flags: 0x0}, + 1014: {region: 0x165, script: 0x5, flags: 0x0}, + 1015: {region: 0x9e, script: 0x5a, flags: 0x0}, + 1016: {region: 0x7b, script: 0x5a, flags: 0x0}, + 1017: {region: 0x49, script: 0x5a, flags: 0x0}, + 1018: {region: 0x33, script: 0x4, flags: 0x1}, + 1019: {region: 0x9e, script: 0x5a, flags: 0x0}, + 1020: {region: 0x9c, script: 0x5, flags: 0x0}, + 1021: {region: 0xda, script: 0x5a, flags: 0x0}, + 1022: {region: 0x4f, script: 0x5a, flags: 0x0}, + 1023: {region: 0xd1, script: 0x5a, flags: 0x0}, + 1024: {region: 0xcf, script: 0x5a, flags: 0x0}, + 1025: {region: 0xc3, script: 0x5a, flags: 0x0}, + 1026: {region: 0x4c, script: 0x5a, flags: 0x0}, + 1027: {region: 0x96, script: 0x7e, flags: 0x0}, + 1028: {region: 0xb6, script: 0x5a, flags: 0x0}, + 1029: {region: 0x165, script: 0x2c, flags: 0x0}, + 1030: {region: 0x165, script: 0x5a, flags: 0x0}, + 1032: {region: 0xba, script: 0xe8, flags: 0x0}, + 1033: {region: 0x165, script: 0x5a, flags: 0x0}, + 1034: {region: 0xc4, script: 0x75, flags: 0x0}, + 1035: {region: 0x165, script: 0x5, flags: 0x0}, + 1036: {region: 0xb3, script: 0xd4, flags: 0x0}, + 1037: {region: 0x6f, script: 0x5a, flags: 0x0}, + 1038: {region: 0x165, script: 0x5a, flags: 0x0}, + 1039: {region: 0x165, script: 0x5a, flags: 0x0}, + 1040: {region: 0x165, script: 0x5a, flags: 0x0}, + 1041: {region: 0x165, script: 0x5a, flags: 0x0}, + 1042: {region: 0x111, script: 0x5a, flags: 0x0}, + 1043: {region: 0x165, script: 0x5a, flags: 0x0}, + 1044: {region: 0xe8, script: 0x5, flags: 0x0}, + 1045: {region: 0x165, script: 0x5a, flags: 0x0}, + 1046: {region: 0x10f, script: 0x5a, flags: 0x0}, + 1047: {region: 0x165, script: 0x5a, flags: 0x0}, + 1048: {region: 0xe9, script: 0x5a, flags: 0x0}, + 1049: {region: 0x165, script: 0x5a, flags: 0x0}, + 1050: {region: 0x95, script: 0x5a, flags: 0x0}, + 1051: {region: 0x142, script: 0x5a, flags: 0x0}, + 1052: {region: 0x10c, script: 0x5a, flags: 0x0}, + 1054: {region: 0x10c, script: 0x5a, flags: 0x0}, + 1055: {region: 0x72, script: 0x5a, flags: 0x0}, + 1056: {region: 0x97, script: 0xca, flags: 0x0}, + 1057: {region: 0x165, script: 0x5a, flags: 0x0}, + 1058: {region: 0x72, script: 0x5a, flags: 0x0}, + 1059: {region: 0x164, script: 0x5a, flags: 0x0}, + 1060: {region: 0x165, script: 0x5a, flags: 0x0}, + 1061: {region: 0xc3, script: 0x5a, flags: 0x0}, + 1062: {region: 0x165, script: 0x5a, flags: 0x0}, + 1063: {region: 0x165, script: 0x5a, flags: 0x0}, + 1064: {region: 0x165, script: 0x5a, flags: 0x0}, + 1065: {region: 0x115, script: 0x5a, flags: 0x0}, + 1066: {region: 0x165, script: 0x5a, flags: 0x0}, + 1067: {region: 0x165, script: 0x5a, flags: 0x0}, + 1068: {region: 0x123, script: 0xeb, flags: 0x0}, + 1069: {region: 0x165, script: 0x5a, flags: 0x0}, + 1070: {region: 0x165, script: 0x5a, flags: 0x0}, + 1071: {region: 0x165, script: 0x5a, flags: 0x0}, + 1072: {region: 0x165, script: 0x5a, flags: 0x0}, + 1073: {region: 0x27, script: 0x5a, flags: 0x0}, + 1074: {region: 0x37, script: 0x5, flags: 0x1}, + 1075: {region: 0x99, script: 0xd7, flags: 0x0}, + 1076: {region: 0x116, script: 0x5a, flags: 0x0}, + 1077: {region: 0x114, script: 0x5a, flags: 0x0}, + 1078: {region: 0x99, script: 0x22, flags: 0x0}, + 1079: {region: 0x161, script: 0x5a, flags: 0x0}, + 1080: {region: 0x165, script: 0x5a, flags: 0x0}, + 1081: {region: 0x165, script: 0x5a, flags: 0x0}, + 1082: {region: 0x6d, script: 0x5a, flags: 0x0}, + 1083: {region: 0x161, script: 0x5a, flags: 0x0}, + 1084: {region: 0x165, script: 0x5a, flags: 0x0}, + 1085: {region: 0x60, script: 0x5a, flags: 0x0}, + 1086: {region: 0x95, script: 0x5a, flags: 0x0}, + 1087: {region: 0x165, script: 0x5a, flags: 0x0}, + 1088: {region: 0x165, script: 0x5a, flags: 0x0}, + 1089: {region: 0x12f, script: 0x5a, flags: 0x0}, + 1090: {region: 0x165, script: 0x5a, flags: 0x0}, + 1091: {region: 0x84, script: 0x5a, flags: 0x0}, + 1092: {region: 0x10c, script: 0x5a, flags: 0x0}, + 1093: {region: 0x12f, script: 0x5a, flags: 0x0}, + 1094: {region: 0x15f, script: 0x5, flags: 0x0}, + 1095: {region: 0x4b, script: 0x5a, flags: 0x0}, + 1096: {region: 0x60, script: 0x5a, flags: 0x0}, + 1097: {region: 0x165, script: 0x5a, flags: 0x0}, + 1098: {region: 0x99, script: 0x22, flags: 0x0}, + 1099: {region: 0x95, script: 0x5a, flags: 0x0}, + 1100: {region: 0x165, script: 0x5a, flags: 0x0}, + 1101: {region: 0x35, script: 0xe, flags: 0x0}, + 1102: {region: 0x9b, script: 0xdb, flags: 0x0}, + 1103: {region: 0xe9, script: 0x5a, flags: 0x0}, + 1104: {region: 0x99, script: 0xe3, flags: 0x0}, + 1105: {region: 0xdb, script: 0x22, flags: 0x0}, + 1106: {region: 0x165, script: 0x5a, flags: 0x0}, + 1107: {region: 0x165, script: 0x5a, flags: 0x0}, + 1108: {region: 0x165, script: 0x5a, flags: 0x0}, + 1109: {region: 0x165, script: 0x5a, flags: 0x0}, + 1110: {region: 0x165, script: 0x5a, flags: 0x0}, + 1111: {region: 0x165, script: 0x5a, flags: 0x0}, + 1112: {region: 0x165, script: 0x5a, flags: 0x0}, + 1113: {region: 0x165, script: 0x5a, flags: 0x0}, + 1114: {region: 0xe7, script: 0x5a, flags: 0x0}, + 1115: {region: 0x165, script: 0x5a, flags: 0x0}, + 1116: {region: 0x165, script: 0x5a, flags: 0x0}, + 1117: {region: 0x99, script: 0x52, flags: 0x0}, + 1118: {region: 0x53, script: 0xe1, flags: 0x0}, + 1119: {region: 0xdb, script: 0x22, flags: 0x0}, + 1120: {region: 0xdb, script: 0x22, flags: 0x0}, + 1121: {region: 0x99, script: 0xe6, flags: 0x0}, + 1122: {region: 0x165, script: 0x5a, flags: 0x0}, + 1123: {region: 0x112, script: 0x5a, flags: 0x0}, + 1124: {region: 0x131, script: 0x5a, flags: 0x0}, + 1125: {region: 0x126, script: 0x5a, flags: 0x0}, + 1126: {region: 0x165, script: 0x5a, flags: 0x0}, + 1127: {region: 0x3c, script: 0x3, flags: 0x1}, + 1128: {region: 0x165, script: 0x5a, flags: 0x0}, + 1129: {region: 0x165, script: 0x5a, flags: 0x0}, + 1130: {region: 0x165, script: 0x5a, flags: 0x0}, + 1131: {region: 0x123, script: 0xeb, flags: 0x0}, + 1132: {region: 0xdb, script: 0x22, flags: 0x0}, + 1133: {region: 0xdb, script: 0x22, flags: 0x0}, + 1134: {region: 0xdb, script: 0x22, flags: 0x0}, + 1135: {region: 0x6f, script: 0x2c, flags: 0x0}, + 1136: {region: 0x165, script: 0x5a, flags: 0x0}, + 1137: {region: 0x6d, script: 0x2c, flags: 0x0}, + 1138: {region: 0x165, script: 0x5a, flags: 0x0}, + 1139: {region: 0x165, script: 0x5a, flags: 0x0}, + 1140: {region: 0x165, script: 0x5a, flags: 0x0}, + 1141: {region: 0xd6, script: 0x5a, flags: 0x0}, + 1142: {region: 0x127, script: 0x5a, flags: 0x0}, + 1143: {region: 0x125, script: 0x5a, flags: 0x0}, + 1144: {region: 0x32, script: 0x5a, flags: 0x0}, + 1145: {region: 0xdb, script: 0x22, flags: 0x0}, + 1146: {region: 0xe7, script: 0x5a, flags: 0x0}, + 1147: {region: 0x165, script: 0x5a, flags: 0x0}, + 1148: {region: 0x165, script: 0x5a, flags: 0x0}, + 1149: {region: 0x32, script: 0x5a, flags: 0x0}, + 1150: {region: 0xd4, script: 0x5a, flags: 0x0}, + 1151: {region: 0x165, script: 0x5a, flags: 0x0}, + 1152: {region: 0x161, script: 0x5a, flags: 0x0}, + 1153: {region: 0x165, script: 0x5a, flags: 0x0}, + 1154: {region: 0x129, script: 0x5a, flags: 0x0}, + 1155: {region: 0x165, script: 0x5a, flags: 0x0}, + 1156: {region: 0xce, script: 0x5a, flags: 0x0}, + 1157: {region: 0x165, script: 0x5a, flags: 0x0}, + 1158: {region: 0xe6, script: 0x5a, flags: 0x0}, + 1159: {region: 0x165, script: 0x5a, flags: 0x0}, + 1160: {region: 0x165, script: 0x5a, flags: 0x0}, + 1161: {region: 0x165, script: 0x5a, flags: 0x0}, + 1162: {region: 0x12b, script: 0x5a, flags: 0x0}, + 1163: {region: 0x12b, script: 0x5a, flags: 0x0}, + 1164: {region: 0x12e, script: 0x5a, flags: 0x0}, + 1165: {region: 0x165, script: 0x5, flags: 0x0}, + 1166: {region: 0x161, script: 0x5a, flags: 0x0}, + 1167: {region: 0x87, script: 0x34, flags: 0x0}, + 1168: {region: 0xdb, script: 0x22, flags: 0x0}, + 1169: {region: 0xe7, script: 0x5a, flags: 0x0}, + 1170: {region: 0x43, script: 0xec, flags: 0x0}, + 1171: {region: 0x165, script: 0x5a, flags: 0x0}, + 1172: {region: 0x106, script: 0x20, flags: 0x0}, + 1173: {region: 0x165, script: 0x5a, flags: 0x0}, + 1174: {region: 0x165, script: 0x5a, flags: 0x0}, + 1175: {region: 0x131, script: 0x5a, flags: 0x0}, + 1176: {region: 0x165, script: 0x5a, flags: 0x0}, + 1177: {region: 0x123, script: 0xeb, flags: 0x0}, + 1178: {region: 0x32, script: 0x5a, flags: 0x0}, + 1179: {region: 0x165, script: 0x5a, flags: 0x0}, + 1180: {region: 0x165, script: 0x5a, flags: 0x0}, + 1181: {region: 0xce, script: 0x5a, flags: 0x0}, + 1182: {region: 0x165, script: 0x5a, flags: 0x0}, + 1183: {region: 0x165, script: 0x5a, flags: 0x0}, + 1184: {region: 0x12d, script: 0x5a, flags: 0x0}, + 1185: {region: 0x165, script: 0x5a, flags: 0x0}, + 1187: {region: 0x165, script: 0x5a, flags: 0x0}, + 1188: {region: 0xd4, script: 0x5a, flags: 0x0}, + 1189: {region: 0x53, script: 0xe4, flags: 0x0}, + 1190: {region: 0xe5, script: 0x5a, flags: 0x0}, + 1191: {region: 0x165, script: 0x5a, flags: 0x0}, + 1192: {region: 0x106, script: 0x20, flags: 0x0}, + 1193: {region: 0xba, script: 0x5a, flags: 0x0}, + 1194: {region: 0x165, script: 0x5a, flags: 0x0}, + 1195: {region: 0x106, script: 0x20, flags: 0x0}, + 1196: {region: 0x3f, script: 0x4, flags: 0x1}, + 1197: {region: 0x11c, script: 0xf0, flags: 0x0}, + 1198: {region: 0x130, script: 0x20, flags: 0x0}, + 1199: {region: 0x75, script: 0x5a, flags: 0x0}, + 1200: {region: 0x2a, script: 0x5a, flags: 0x0}, + 1202: {region: 0x43, script: 0x3, flags: 0x1}, + 1203: {region: 0x99, script: 0xe, flags: 0x0}, + 1204: {region: 0xe8, script: 0x5, flags: 0x0}, + 1205: {region: 0x165, script: 0x5a, flags: 0x0}, + 1206: {region: 0x165, script: 0x5a, flags: 0x0}, + 1207: {region: 0x165, script: 0x5a, flags: 0x0}, + 1208: {region: 0x165, script: 0x5a, flags: 0x0}, + 1209: {region: 0x165, script: 0x5a, flags: 0x0}, + 1210: {region: 0x165, script: 0x5a, flags: 0x0}, + 1211: {region: 0x165, script: 0x5a, flags: 0x0}, + 1212: {region: 0x46, script: 0x4, flags: 0x1}, + 1213: {region: 0x165, script: 0x5a, flags: 0x0}, + 1214: {region: 0xb4, script: 0xf1, flags: 0x0}, + 1215: {region: 0x165, script: 0x5a, flags: 0x0}, + 1216: {region: 0x161, script: 0x5a, flags: 0x0}, + 1217: {region: 0x9e, script: 0x5a, flags: 0x0}, + 1218: {region: 0x106, script: 0x5a, flags: 0x0}, + 1219: {region: 0x13e, script: 0x5a, flags: 0x0}, + 1220: {region: 0x11b, script: 0x5a, flags: 0x0}, + 1221: {region: 0x165, script: 0x5a, flags: 0x0}, + 1222: {region: 0x36, script: 0x5a, flags: 0x0}, + 1223: {region: 0x60, script: 0x5a, flags: 0x0}, + 1224: {region: 0xd1, script: 0x5a, flags: 0x0}, + 1225: {region: 0x1, script: 0x5a, flags: 0x0}, + 1226: {region: 0x106, script: 0x5a, flags: 0x0}, + 1227: {region: 0x6a, script: 0x5a, flags: 0x0}, + 1228: {region: 0x12f, script: 0x5a, flags: 0x0}, + 1229: {region: 0x165, script: 0x5a, flags: 0x0}, + 1230: {region: 0x36, script: 0x5a, flags: 0x0}, + 1231: {region: 0x4e, script: 0x5a, flags: 0x0}, + 1232: {region: 0x165, script: 0x5a, flags: 0x0}, + 1233: {region: 0x6f, script: 0x2c, flags: 0x0}, + 1234: {region: 0x165, script: 0x5a, flags: 0x0}, + 1235: {region: 0xe7, script: 0x5a, flags: 0x0}, + 1236: {region: 0x2f, script: 0x5a, flags: 0x0}, + 1237: {region: 0x99, script: 0xe6, flags: 0x0}, + 1238: {region: 0x99, script: 0x22, flags: 0x0}, + 1239: {region: 0x165, script: 0x5a, flags: 0x0}, + 1240: {region: 0x165, script: 0x5a, flags: 0x0}, + 1241: {region: 0x165, script: 0x5a, flags: 0x0}, + 1242: {region: 0x165, script: 0x5a, flags: 0x0}, + 1243: {region: 0x165, script: 0x5a, flags: 0x0}, + 1244: {region: 0x165, script: 0x5a, flags: 0x0}, + 1245: {region: 0x165, script: 0x5a, flags: 0x0}, + 1246: {region: 0x165, script: 0x5a, flags: 0x0}, + 1247: {region: 0x165, script: 0x5a, flags: 0x0}, + 1248: {region: 0x140, script: 0x5a, flags: 0x0}, + 1249: {region: 0x165, script: 0x5a, flags: 0x0}, + 1250: {region: 0x165, script: 0x5a, flags: 0x0}, + 1251: {region: 0xa8, script: 0x5, flags: 0x0}, + 1252: {region: 0x165, script: 0x5a, flags: 0x0}, + 1253: {region: 0x114, script: 0x5a, flags: 0x0}, + 1254: {region: 0x165, script: 0x5a, flags: 0x0}, + 1255: {region: 0x165, script: 0x5a, flags: 0x0}, + 1256: {region: 0x165, script: 0x5a, flags: 0x0}, + 1257: {region: 0x165, script: 0x5a, flags: 0x0}, + 1258: {region: 0x99, script: 0x22, flags: 0x0}, + 1259: {region: 0x53, script: 0x3b, flags: 0x0}, + 1260: {region: 0x165, script: 0x5a, flags: 0x0}, + 1261: {region: 0x165, script: 0x5a, flags: 0x0}, + 1262: {region: 0x41, script: 0x5a, flags: 0x0}, + 1263: {region: 0x165, script: 0x5a, flags: 0x0}, + 1264: {region: 0x12b, script: 0x18, flags: 0x0}, + 1265: {region: 0x165, script: 0x5a, flags: 0x0}, + 1266: {region: 0x161, script: 0x5a, flags: 0x0}, + 1267: {region: 0x165, script: 0x5a, flags: 0x0}, + 1268: {region: 0x12b, script: 0x62, flags: 0x0}, + 1269: {region: 0x12b, script: 0x63, flags: 0x0}, + 1270: {region: 0x7d, script: 0x2e, flags: 0x0}, + 1271: {region: 0x53, script: 0x67, flags: 0x0}, + 1272: {region: 0x10b, script: 0x6c, flags: 0x0}, + 1273: {region: 0x108, script: 0x77, flags: 0x0}, + 1274: {region: 0x99, script: 0x22, flags: 0x0}, + 1275: {region: 0x131, script: 0x5a, flags: 0x0}, + 1276: {region: 0x165, script: 0x5a, flags: 0x0}, + 1277: {region: 0x9c, script: 0x91, flags: 0x0}, + 1278: {region: 0x165, script: 0x5a, flags: 0x0}, + 1279: {region: 0x15e, script: 0xcc, flags: 0x0}, + 1280: {region: 0x165, script: 0x5a, flags: 0x0}, + 1281: {region: 0x165, script: 0x5a, flags: 0x0}, + 1282: {region: 0xdb, script: 0x22, flags: 0x0}, + 1283: {region: 0x165, script: 0x5a, flags: 0x0}, + 1284: {region: 0x165, script: 0x5a, flags: 0x0}, + 1285: {region: 0xd1, script: 0x5a, flags: 0x0}, + 1286: {region: 0x75, script: 0x5a, flags: 0x0}, + 1287: {region: 0x165, script: 0x5a, flags: 0x0}, + 1288: {region: 0x165, script: 0x5a, flags: 0x0}, + 1289: {region: 0x52, script: 0x5a, flags: 0x0}, + 1290: {region: 0x165, script: 0x5a, flags: 0x0}, + 1291: {region: 0x165, script: 0x5a, flags: 0x0}, + 1292: {region: 0x165, script: 0x5a, flags: 0x0}, + 1293: {region: 0x52, script: 0x5a, flags: 0x0}, + 1294: {region: 0x165, script: 0x5a, flags: 0x0}, + 1295: {region: 0x165, script: 0x5a, flags: 0x0}, + 1296: {region: 0x165, script: 0x5a, flags: 0x0}, + 1297: {region: 0x165, script: 0x5a, flags: 0x0}, + 1298: {region: 0x1, script: 0x3e, flags: 0x0}, + 1299: {region: 0x165, script: 0x5a, flags: 0x0}, + 1300: {region: 0x165, script: 0x5a, flags: 0x0}, + 1301: {region: 0x165, script: 0x5a, flags: 0x0}, + 1302: {region: 0x165, script: 0x5a, flags: 0x0}, + 1303: {region: 0x165, script: 0x5a, flags: 0x0}, + 1304: {region: 0xd6, script: 0x5a, flags: 0x0}, + 1305: {region: 0x165, script: 0x5a, flags: 0x0}, + 1306: {region: 0x165, script: 0x5a, flags: 0x0}, + 1307: {region: 0x165, script: 0x5a, flags: 0x0}, + 1308: {region: 0x41, script: 0x5a, flags: 0x0}, + 1309: {region: 0x165, script: 0x5a, flags: 0x0}, + 1310: {region: 0xcf, script: 0x5a, flags: 0x0}, + 1311: {region: 0x4a, script: 0x3, flags: 0x1}, + 1312: {region: 0x165, script: 0x5a, flags: 0x0}, + 1313: {region: 0x165, script: 0x5a, flags: 0x0}, + 1314: {region: 0x165, script: 0x5a, flags: 0x0}, + 1315: {region: 0x53, script: 0x5a, flags: 0x0}, + 1316: {region: 0x10b, script: 0x5a, flags: 0x0}, + 1318: {region: 0xa8, script: 0x5, flags: 0x0}, + 1319: {region: 0xd9, script: 0x5a, flags: 0x0}, + 1320: {region: 0xba, script: 0xe8, flags: 0x0}, + 1321: {region: 0x4d, script: 0x14, flags: 0x1}, + 1322: {region: 0x53, script: 0x7d, flags: 0x0}, + 1323: {region: 0x165, script: 0x5a, flags: 0x0}, + 1324: {region: 0x122, script: 0x5a, flags: 0x0}, + 1325: {region: 0xd0, script: 0x5a, flags: 0x0}, + 1326: {region: 0x165, script: 0x5a, flags: 0x0}, + 1327: {region: 0x161, script: 0x5a, flags: 0x0}, + 1329: {region: 0x12b, script: 0x5a, flags: 0x0}, +} + +// likelyLangList holds lists info associated with likelyLang. +// Size: 582 bytes, 97 elements +var likelyLangList = [97]likelyScriptRegion{ + 0: {region: 0x9c, script: 0x7, flags: 0x0}, + 1: {region: 0xa1, script: 0x78, flags: 0x2}, + 2: {region: 0x11c, script: 0x85, flags: 0x2}, + 3: {region: 0x32, script: 0x5a, flags: 0x0}, + 4: {region: 0x9b, script: 0x5, flags: 0x4}, + 5: {region: 0x9c, script: 0x5, flags: 0x4}, + 6: {region: 0x106, script: 0x20, flags: 0x4}, + 7: {region: 0x9c, script: 0x5, flags: 0x2}, + 8: {region: 0x106, script: 0x20, flags: 0x0}, + 9: {region: 0x38, script: 0x2f, flags: 0x2}, + 10: {region: 0x135, script: 0x5a, flags: 0x0}, + 11: {region: 0x7b, script: 0xcf, flags: 0x2}, + 12: {region: 0x114, script: 0x5a, flags: 0x0}, + 13: {region: 0x84, script: 0x1, flags: 0x2}, + 14: {region: 0x5d, script: 0x1f, flags: 0x0}, + 15: {region: 0x87, script: 0x5f, flags: 0x2}, + 16: {region: 0xd6, script: 0x5a, flags: 0x0}, + 17: {region: 0x52, script: 0x5, flags: 0x4}, + 18: {region: 0x10b, script: 0x5, flags: 0x4}, + 19: {region: 0xae, script: 0x20, flags: 0x0}, + 20: {region: 0x24, script: 0x5, flags: 0x4}, + 21: {region: 0x53, script: 0x5, flags: 0x4}, + 22: {region: 0x9c, script: 0x5, flags: 0x4}, + 23: {region: 0xc5, script: 0x5, flags: 0x4}, + 24: {region: 0x53, script: 0x5, flags: 0x2}, + 25: {region: 0x12b, script: 0x5a, flags: 0x0}, + 26: {region: 0xb0, script: 0x5, flags: 0x4}, + 27: {region: 0x9b, script: 0x5, flags: 0x2}, + 28: {region: 0xa5, script: 0x20, flags: 0x0}, + 29: {region: 0x53, script: 0x5, flags: 0x4}, + 30: {region: 0x12b, script: 0x5a, flags: 0x4}, + 31: {region: 0x53, script: 0x5, flags: 0x2}, + 32: {region: 0x12b, script: 0x5a, flags: 0x2}, + 33: {region: 0xdb, script: 0x22, flags: 0x0}, + 34: {region: 0x99, script: 0x5d, flags: 0x2}, + 35: {region: 0x83, script: 0x5a, flags: 0x0}, + 36: {region: 0x84, script: 0x7c, flags: 0x4}, + 37: {region: 0x84, script: 0x7c, flags: 0x2}, + 38: {region: 0xc5, script: 0x20, flags: 0x0}, + 39: {region: 0x53, script: 0x70, flags: 0x4}, + 40: {region: 0x53, script: 0x70, flags: 0x2}, + 41: {region: 0xd0, script: 0x5a, flags: 0x0}, + 42: {region: 0x4a, script: 0x5, flags: 0x4}, + 43: {region: 0x95, script: 0x5, flags: 0x4}, + 44: {region: 0x99, script: 0x36, flags: 0x0}, + 45: {region: 0xe8, script: 0x5, flags: 0x4}, + 46: {region: 0xe8, script: 0x5, flags: 0x2}, + 47: {region: 0x9c, script: 0x8b, flags: 0x0}, + 48: {region: 0x53, script: 0x8c, flags: 0x2}, + 49: {region: 0xba, script: 0xe8, flags: 0x0}, + 50: {region: 0xd9, script: 0x5a, flags: 0x4}, + 51: {region: 0xe8, script: 0x5, flags: 0x0}, + 52: {region: 0x99, script: 0x22, flags: 0x2}, + 53: {region: 0x99, script: 0x4f, flags: 0x2}, + 54: {region: 0x99, script: 0xd3, flags: 0x2}, + 55: {region: 0x105, script: 0x20, flags: 0x0}, + 56: {region: 0xbd, script: 0x5a, flags: 0x4}, + 57: {region: 0x104, script: 0x5a, flags: 0x4}, + 58: {region: 0x106, script: 0x5a, flags: 0x4}, + 59: {region: 0x12b, script: 0x5a, flags: 0x4}, + 60: {region: 0x124, script: 0x20, flags: 0x0}, + 61: {region: 0xe8, script: 0x5, flags: 0x4}, + 62: {region: 0xe8, script: 0x5, flags: 0x2}, + 63: {region: 0x53, script: 0x5, flags: 0x0}, + 64: {region: 0xae, script: 0x20, flags: 0x4}, + 65: {region: 0xc5, script: 0x20, flags: 0x4}, + 66: {region: 0xae, script: 0x20, flags: 0x2}, + 67: {region: 0x99, script: 0xe, flags: 0x0}, + 68: {region: 0xdb, script: 0x22, flags: 0x4}, + 69: {region: 0xdb, script: 0x22, flags: 0x2}, + 70: {region: 0x137, script: 0x5a, flags: 0x0}, + 71: {region: 0x24, script: 0x5, flags: 0x4}, + 72: {region: 0x53, script: 0x20, flags: 0x4}, + 73: {region: 0x24, script: 0x5, flags: 0x2}, + 74: {region: 0x8d, script: 0x3c, flags: 0x0}, + 75: {region: 0x53, script: 0x3b, flags: 0x4}, + 76: {region: 0x53, script: 0x3b, flags: 0x2}, + 77: {region: 0x53, script: 0x3b, flags: 0x0}, + 78: {region: 0x2f, script: 0x3c, flags: 0x4}, + 79: {region: 0x3e, script: 0x3c, flags: 0x4}, + 80: {region: 0x7b, script: 0x3c, flags: 0x4}, + 81: {region: 0x7e, script: 0x3c, flags: 0x4}, + 82: {region: 0x8d, script: 0x3c, flags: 0x4}, + 83: {region: 0x95, script: 0x3c, flags: 0x4}, + 84: {region: 0xc6, script: 0x3c, flags: 0x4}, + 85: {region: 0xd0, script: 0x3c, flags: 0x4}, + 86: {region: 0xe2, script: 0x3c, flags: 0x4}, + 87: {region: 0xe5, script: 0x3c, flags: 0x4}, + 88: {region: 0xe7, script: 0x3c, flags: 0x4}, + 89: {region: 0x116, script: 0x3c, flags: 0x4}, + 90: {region: 0x123, script: 0x3c, flags: 0x4}, + 91: {region: 0x12e, script: 0x3c, flags: 0x4}, + 92: {region: 0x135, script: 0x3c, flags: 0x4}, + 93: {region: 0x13e, script: 0x3c, flags: 0x4}, + 94: {region: 0x12e, script: 0x11, flags: 0x2}, + 95: {region: 0x12e, script: 0x37, flags: 0x2}, + 96: {region: 0x12e, script: 0x3c, flags: 0x2}, +} + +type likelyLangScript struct { + lang uint16 + script uint16 + flags uint8 +} + +// likelyRegion is a lookup table, indexed by regionID, for the most likely +// languages and scripts given incomplete information. If more entries exist +// for a given regionID, lang and script are the index and size respectively +// of the list in likelyRegionList. +// TODO: exclude containers and user-definable regions from the list. +// Size: 2148 bytes, 358 elements +var likelyRegion = [358]likelyLangScript{ + 34: {lang: 0xd7, script: 0x5a, flags: 0x0}, + 35: {lang: 0x3a, script: 0x5, flags: 0x0}, + 36: {lang: 0x0, script: 0x2, flags: 0x1}, + 39: {lang: 0x2, script: 0x2, flags: 0x1}, + 40: {lang: 0x4, script: 0x2, flags: 0x1}, + 42: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 43: {lang: 0x0, script: 0x5a, flags: 0x0}, + 44: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 45: {lang: 0x41b, script: 0x5a, flags: 0x0}, + 46: {lang: 0x10d, script: 0x5a, flags: 0x0}, + 48: {lang: 0x367, script: 0x5a, flags: 0x0}, + 49: {lang: 0x444, script: 0x5a, flags: 0x0}, + 50: {lang: 0x58, script: 0x5a, flags: 0x0}, + 51: {lang: 0x6, script: 0x2, flags: 0x1}, + 53: {lang: 0xa5, script: 0xe, flags: 0x0}, + 54: {lang: 0x367, script: 0x5a, flags: 0x0}, + 55: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 56: {lang: 0x7e, script: 0x20, flags: 0x0}, + 57: {lang: 0x3a, script: 0x5, flags: 0x0}, + 58: {lang: 0x3d9, script: 0x5a, flags: 0x0}, + 59: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 60: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 62: {lang: 0x31f, script: 0x5a, flags: 0x0}, + 63: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 64: {lang: 0x3a1, script: 0x5a, flags: 0x0}, + 65: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 67: {lang: 0x8, script: 0x2, flags: 0x1}, + 69: {lang: 0x0, script: 0x5a, flags: 0x0}, + 71: {lang: 0x71, script: 0x20, flags: 0x0}, + 73: {lang: 0x512, script: 0x3e, flags: 0x2}, + 74: {lang: 0x31f, script: 0x5, flags: 0x2}, + 75: {lang: 0x445, script: 0x5a, flags: 0x0}, + 76: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 77: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 78: {lang: 0x10d, script: 0x5a, flags: 0x0}, + 79: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 81: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 82: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 83: {lang: 0xa, script: 0x4, flags: 0x1}, + 84: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 85: {lang: 0x0, script: 0x5a, flags: 0x0}, + 86: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 89: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 90: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 91: {lang: 0x3a1, script: 0x5a, flags: 0x0}, + 93: {lang: 0xe, script: 0x2, flags: 0x1}, + 94: {lang: 0xfa, script: 0x5a, flags: 0x0}, + 96: {lang: 0x10d, script: 0x5a, flags: 0x0}, + 98: {lang: 0x1, script: 0x5a, flags: 0x0}, + 99: {lang: 0x101, script: 0x5a, flags: 0x0}, + 101: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 103: {lang: 0x10, script: 0x2, flags: 0x1}, + 104: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 105: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 106: {lang: 0x140, script: 0x5a, flags: 0x0}, + 107: {lang: 0x3a, script: 0x5, flags: 0x0}, + 108: {lang: 0x3a, script: 0x5, flags: 0x0}, + 109: {lang: 0x46f, script: 0x2c, flags: 0x0}, + 110: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 111: {lang: 0x12, script: 0x2, flags: 0x1}, + 113: {lang: 0x10d, script: 0x5a, flags: 0x0}, + 114: {lang: 0x151, script: 0x5a, flags: 0x0}, + 115: {lang: 0x1c0, script: 0x22, flags: 0x2}, + 118: {lang: 0x158, script: 0x5a, flags: 0x0}, + 120: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 122: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 123: {lang: 0x14, script: 0x2, flags: 0x1}, + 125: {lang: 0x16, script: 0x3, flags: 0x1}, + 126: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 128: {lang: 0x21, script: 0x5a, flags: 0x0}, + 130: {lang: 0x245, script: 0x5a, flags: 0x0}, + 132: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 133: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 134: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 135: {lang: 0x19, script: 0x2, flags: 0x1}, + 136: {lang: 0x0, script: 0x5a, flags: 0x0}, + 137: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 139: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 141: {lang: 0x529, script: 0x3c, flags: 0x0}, + 142: {lang: 0x0, script: 0x5a, flags: 0x0}, + 143: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 144: {lang: 0x1d1, script: 0x5a, flags: 0x0}, + 145: {lang: 0x1d4, script: 0x5a, flags: 0x0}, + 146: {lang: 0x1d5, script: 0x5a, flags: 0x0}, + 148: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 149: {lang: 0x1b, script: 0x2, flags: 0x1}, + 151: {lang: 0x1bc, script: 0x3e, flags: 0x0}, + 153: {lang: 0x1d, script: 0x3, flags: 0x1}, + 155: {lang: 0x3a, script: 0x5, flags: 0x0}, + 156: {lang: 0x20, script: 0x2, flags: 0x1}, + 157: {lang: 0x1f8, script: 0x5a, flags: 0x0}, + 158: {lang: 0x1f9, script: 0x5a, flags: 0x0}, + 161: {lang: 0x3a, script: 0x5, flags: 0x0}, + 162: {lang: 0x200, script: 0x49, flags: 0x0}, + 164: {lang: 0x445, script: 0x5a, flags: 0x0}, + 165: {lang: 0x28a, script: 0x20, flags: 0x0}, + 166: {lang: 0x22, script: 0x3, flags: 0x1}, + 168: {lang: 0x25, script: 0x2, flags: 0x1}, + 170: {lang: 0x254, script: 0x53, flags: 0x0}, + 171: {lang: 0x254, script: 0x53, flags: 0x0}, + 172: {lang: 0x3a, script: 0x5, flags: 0x0}, + 174: {lang: 0x3e2, script: 0x20, flags: 0x0}, + 175: {lang: 0x27, script: 0x2, flags: 0x1}, + 176: {lang: 0x3a, script: 0x5, flags: 0x0}, + 178: {lang: 0x10d, script: 0x5a, flags: 0x0}, + 179: {lang: 0x40c, script: 0xd4, flags: 0x0}, + 181: {lang: 0x43b, script: 0x5a, flags: 0x0}, + 182: {lang: 0x2c0, script: 0x5a, flags: 0x0}, + 183: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 184: {lang: 0x2c7, script: 0x5a, flags: 0x0}, + 185: {lang: 0x3a, script: 0x5, flags: 0x0}, + 186: {lang: 0x29, script: 0x2, flags: 0x1}, + 187: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 188: {lang: 0x2b, script: 0x2, flags: 0x1}, + 189: {lang: 0x432, script: 0x5a, flags: 0x0}, + 190: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 191: {lang: 0x2f1, script: 0x5a, flags: 0x0}, + 194: {lang: 0x2d, script: 0x2, flags: 0x1}, + 195: {lang: 0xa0, script: 0x5a, flags: 0x0}, + 196: {lang: 0x2f, script: 0x2, flags: 0x1}, + 197: {lang: 0x31, script: 0x2, flags: 0x1}, + 198: {lang: 0x33, script: 0x2, flags: 0x1}, + 200: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 201: {lang: 0x35, script: 0x2, flags: 0x1}, + 203: {lang: 0x320, script: 0x5a, flags: 0x0}, + 204: {lang: 0x37, script: 0x3, flags: 0x1}, + 205: {lang: 0x128, script: 0xea, flags: 0x0}, + 207: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 208: {lang: 0x31f, script: 0x5a, flags: 0x0}, + 209: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 210: {lang: 0x16, script: 0x5a, flags: 0x0}, + 211: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 212: {lang: 0x1b4, script: 0x5a, flags: 0x0}, + 214: {lang: 0x1b4, script: 0x5, flags: 0x2}, + 216: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 217: {lang: 0x367, script: 0x5a, flags: 0x0}, + 218: {lang: 0x347, script: 0x5a, flags: 0x0}, + 219: {lang: 0x351, script: 0x22, flags: 0x0}, + 225: {lang: 0x3a, script: 0x5, flags: 0x0}, + 226: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 228: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 229: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 230: {lang: 0x486, script: 0x5a, flags: 0x0}, + 231: {lang: 0x153, script: 0x5a, flags: 0x0}, + 232: {lang: 0x3a, script: 0x3, flags: 0x1}, + 233: {lang: 0x3b3, script: 0x5a, flags: 0x0}, + 234: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 236: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 237: {lang: 0x3a, script: 0x5, flags: 0x0}, + 238: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 240: {lang: 0x3a2, script: 0x5a, flags: 0x0}, + 241: {lang: 0x194, script: 0x5a, flags: 0x0}, + 243: {lang: 0x3a, script: 0x5, flags: 0x0}, + 258: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 260: {lang: 0x3d, script: 0x2, flags: 0x1}, + 261: {lang: 0x432, script: 0x20, flags: 0x0}, + 262: {lang: 0x3f, script: 0x2, flags: 0x1}, + 263: {lang: 0x3e5, script: 0x5a, flags: 0x0}, + 264: {lang: 0x3a, script: 0x5, flags: 0x0}, + 266: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 267: {lang: 0x3a, script: 0x5, flags: 0x0}, + 268: {lang: 0x41, script: 0x2, flags: 0x1}, + 271: {lang: 0x416, script: 0x5a, flags: 0x0}, + 272: {lang: 0x347, script: 0x5a, flags: 0x0}, + 273: {lang: 0x43, script: 0x2, flags: 0x1}, + 275: {lang: 0x1f9, script: 0x5a, flags: 0x0}, + 276: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 277: {lang: 0x429, script: 0x5a, flags: 0x0}, + 278: {lang: 0x367, script: 0x5a, flags: 0x0}, + 280: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 282: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 284: {lang: 0x45, script: 0x2, flags: 0x1}, + 288: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 289: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 290: {lang: 0x47, script: 0x2, flags: 0x1}, + 291: {lang: 0x49, script: 0x3, flags: 0x1}, + 292: {lang: 0x4c, script: 0x2, flags: 0x1}, + 293: {lang: 0x477, script: 0x5a, flags: 0x0}, + 294: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 295: {lang: 0x476, script: 0x5a, flags: 0x0}, + 296: {lang: 0x4e, script: 0x2, flags: 0x1}, + 297: {lang: 0x482, script: 0x5a, flags: 0x0}, + 299: {lang: 0x50, script: 0x4, flags: 0x1}, + 301: {lang: 0x4a0, script: 0x5a, flags: 0x0}, + 302: {lang: 0x54, script: 0x2, flags: 0x1}, + 303: {lang: 0x445, script: 0x5a, flags: 0x0}, + 304: {lang: 0x56, script: 0x3, flags: 0x1}, + 305: {lang: 0x445, script: 0x5a, flags: 0x0}, + 309: {lang: 0x512, script: 0x3e, flags: 0x2}, + 310: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 311: {lang: 0x4bc, script: 0x5a, flags: 0x0}, + 312: {lang: 0x1f9, script: 0x5a, flags: 0x0}, + 315: {lang: 0x13e, script: 0x5a, flags: 0x0}, + 318: {lang: 0x4c3, script: 0x5a, flags: 0x0}, + 319: {lang: 0x8a, script: 0x5a, flags: 0x0}, + 320: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 322: {lang: 0x41b, script: 0x5a, flags: 0x0}, + 333: {lang: 0x59, script: 0x2, flags: 0x1}, + 350: {lang: 0x3a, script: 0x5, flags: 0x0}, + 351: {lang: 0x5b, script: 0x2, flags: 0x1}, + 356: {lang: 0x423, script: 0x5a, flags: 0x0}, +} + +// likelyRegionList holds lists info associated with likelyRegion. +// Size: 558 bytes, 93 elements +var likelyRegionList = [93]likelyLangScript{ + 0: {lang: 0x148, script: 0x5, flags: 0x0}, + 1: {lang: 0x476, script: 0x5a, flags: 0x0}, + 2: {lang: 0x431, script: 0x5a, flags: 0x0}, + 3: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 4: {lang: 0x1d7, script: 0x8, flags: 0x0}, + 5: {lang: 0x274, script: 0x5a, flags: 0x0}, + 6: {lang: 0xb7, script: 0x5a, flags: 0x0}, + 7: {lang: 0x432, script: 0x20, flags: 0x0}, + 8: {lang: 0x12d, script: 0xec, flags: 0x0}, + 9: {lang: 0x351, script: 0x22, flags: 0x0}, + 10: {lang: 0x529, script: 0x3b, flags: 0x0}, + 11: {lang: 0x4ac, script: 0x5, flags: 0x0}, + 12: {lang: 0x523, script: 0x5a, flags: 0x0}, + 13: {lang: 0x29a, script: 0xeb, flags: 0x0}, + 14: {lang: 0x136, script: 0x34, flags: 0x0}, + 15: {lang: 0x48a, script: 0x5a, flags: 0x0}, + 16: {lang: 0x3a, script: 0x5, flags: 0x0}, + 17: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 18: {lang: 0x27, script: 0x2c, flags: 0x0}, + 19: {lang: 0x139, script: 0x5a, flags: 0x0}, + 20: {lang: 0x26a, script: 0x5, flags: 0x2}, + 21: {lang: 0x512, script: 0x3e, flags: 0x2}, + 22: {lang: 0x210, script: 0x2e, flags: 0x0}, + 23: {lang: 0x5, script: 0x20, flags: 0x0}, + 24: {lang: 0x274, script: 0x5a, flags: 0x0}, + 25: {lang: 0x136, script: 0x34, flags: 0x0}, + 26: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 27: {lang: 0x1e1, script: 0x5a, flags: 0x0}, + 28: {lang: 0x31f, script: 0x5, flags: 0x0}, + 29: {lang: 0x1be, script: 0x22, flags: 0x0}, + 30: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 31: {lang: 0x236, script: 0x75, flags: 0x0}, + 32: {lang: 0x148, script: 0x5, flags: 0x0}, + 33: {lang: 0x476, script: 0x5a, flags: 0x0}, + 34: {lang: 0x24a, script: 0x4e, flags: 0x0}, + 35: {lang: 0xe6, script: 0x5, flags: 0x0}, + 36: {lang: 0x226, script: 0xeb, flags: 0x0}, + 37: {lang: 0x3a, script: 0x5, flags: 0x0}, + 38: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 39: {lang: 0x2b8, script: 0x57, flags: 0x0}, + 40: {lang: 0x226, script: 0xeb, flags: 0x0}, + 41: {lang: 0x3a, script: 0x5, flags: 0x0}, + 42: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 43: {lang: 0x3dc, script: 0x5a, flags: 0x0}, + 44: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 45: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 46: {lang: 0x431, script: 0x5a, flags: 0x0}, + 47: {lang: 0x331, script: 0x75, flags: 0x0}, + 48: {lang: 0x213, script: 0x5a, flags: 0x0}, + 49: {lang: 0x30b, script: 0x20, flags: 0x0}, + 50: {lang: 0x242, script: 0x5, flags: 0x0}, + 51: {lang: 0x529, script: 0x3c, flags: 0x0}, + 52: {lang: 0x3c0, script: 0x5a, flags: 0x0}, + 53: {lang: 0x3a, script: 0x5, flags: 0x0}, + 54: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 55: {lang: 0x2ed, script: 0x5a, flags: 0x0}, + 56: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 57: {lang: 0x88, script: 0x22, flags: 0x0}, + 58: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 59: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 60: {lang: 0xbe, script: 0x22, flags: 0x0}, + 61: {lang: 0x3dc, script: 0x5a, flags: 0x0}, + 62: {lang: 0x7e, script: 0x20, flags: 0x0}, + 63: {lang: 0x3e2, script: 0x20, flags: 0x0}, + 64: {lang: 0x267, script: 0x5a, flags: 0x0}, + 65: {lang: 0x444, script: 0x5a, flags: 0x0}, + 66: {lang: 0x512, script: 0x3e, flags: 0x0}, + 67: {lang: 0x412, script: 0x5a, flags: 0x0}, + 68: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 69: {lang: 0x3a, script: 0x5, flags: 0x0}, + 70: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 71: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 72: {lang: 0x35, script: 0x5, flags: 0x0}, + 73: {lang: 0x46b, script: 0xeb, flags: 0x0}, + 74: {lang: 0x2ec, script: 0x5, flags: 0x0}, + 75: {lang: 0x30f, script: 0x75, flags: 0x0}, + 76: {lang: 0x467, script: 0x20, flags: 0x0}, + 77: {lang: 0x148, script: 0x5, flags: 0x0}, + 78: {lang: 0x3a, script: 0x5, flags: 0x0}, + 79: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 80: {lang: 0x48a, script: 0x5a, flags: 0x0}, + 81: {lang: 0x58, script: 0x5, flags: 0x0}, + 82: {lang: 0x219, script: 0x20, flags: 0x0}, + 83: {lang: 0x81, script: 0x34, flags: 0x0}, + 84: {lang: 0x529, script: 0x3c, flags: 0x0}, + 85: {lang: 0x48c, script: 0x5a, flags: 0x0}, + 86: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 87: {lang: 0x512, script: 0x3e, flags: 0x0}, + 88: {lang: 0x3b3, script: 0x5a, flags: 0x0}, + 89: {lang: 0x431, script: 0x5a, flags: 0x0}, + 90: {lang: 0x432, script: 0x20, flags: 0x0}, + 91: {lang: 0x15e, script: 0x5a, flags: 0x0}, + 92: {lang: 0x446, script: 0x5, flags: 0x0}, +} + +type likelyTag struct { + lang uint16 + region uint16 + script uint16 +} + +// Size: 198 bytes, 33 elements +var likelyRegionGroup = [33]likelyTag{ + 1: {lang: 0x139, region: 0xd6, script: 0x5a}, + 2: {lang: 0x139, region: 0x135, script: 0x5a}, + 3: {lang: 0x3c0, region: 0x41, script: 0x5a}, + 4: {lang: 0x139, region: 0x2f, script: 0x5a}, + 5: {lang: 0x139, region: 0xd6, script: 0x5a}, + 6: {lang: 0x13e, region: 0xcf, script: 0x5a}, + 7: {lang: 0x445, region: 0x12f, script: 0x5a}, + 8: {lang: 0x3a, region: 0x6b, script: 0x5}, + 9: {lang: 0x445, region: 0x4b, script: 0x5a}, + 10: {lang: 0x139, region: 0x161, script: 0x5a}, + 11: {lang: 0x139, region: 0x135, script: 0x5a}, + 12: {lang: 0x139, region: 0x135, script: 0x5a}, + 13: {lang: 0x13e, region: 0x59, script: 0x5a}, + 14: {lang: 0x529, region: 0x53, script: 0x3b}, + 15: {lang: 0x1be, region: 0x99, script: 0x22}, + 16: {lang: 0x1e1, region: 0x95, script: 0x5a}, + 17: {lang: 0x1f9, region: 0x9e, script: 0x5a}, + 18: {lang: 0x139, region: 0x2f, script: 0x5a}, + 19: {lang: 0x139, region: 0xe6, script: 0x5a}, + 20: {lang: 0x139, region: 0x8a, script: 0x5a}, + 21: {lang: 0x41b, region: 0x142, script: 0x5a}, + 22: {lang: 0x529, region: 0x53, script: 0x3b}, + 23: {lang: 0x4bc, region: 0x137, script: 0x5a}, + 24: {lang: 0x3a, region: 0x108, script: 0x5}, + 25: {lang: 0x3e2, region: 0x106, script: 0x20}, + 26: {lang: 0x3e2, region: 0x106, script: 0x20}, + 27: {lang: 0x139, region: 0x7b, script: 0x5a}, + 28: {lang: 0x10d, region: 0x60, script: 0x5a}, + 29: {lang: 0x139, region: 0xd6, script: 0x5a}, + 30: {lang: 0x13e, region: 0x1f, script: 0x5a}, + 31: {lang: 0x139, region: 0x9a, script: 0x5a}, + 32: {lang: 0x139, region: 0x7b, script: 0x5a}, +} + +// Size: 264 bytes, 33 elements +var regionContainment = [33]uint64{ + // Entry 0 - 1F + 0x00000001ffffffff, 0x00000000200007a2, 0x0000000000003044, 0x0000000000000008, + 0x00000000803c0010, 0x0000000000000020, 0x0000000000000040, 0x0000000000000080, + 0x0000000000000100, 0x0000000000000200, 0x0000000000000400, 0x000000004000384c, + 0x0000000000001000, 0x0000000000002000, 0x0000000000004000, 0x0000000000008000, + 0x0000000000010000, 0x0000000000020000, 0x0000000000040000, 0x0000000000080000, + 0x0000000000100000, 0x0000000000200000, 0x0000000001c1c000, 0x0000000000800000, + 0x0000000001000000, 0x000000001e020000, 0x0000000004000000, 0x0000000008000000, + 0x0000000010000000, 0x00000000200006a0, 0x0000000040002048, 0x0000000080000000, + // Entry 20 - 3F + 0x0000000100000000, +} + +// regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +// where each set holds all groupings that are directly connected in a region +// containment graph. +// Size: 358 bytes, 358 elements +var regionInclusion = [358]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x23, + 0x24, 0x26, 0x27, 0x22, 0x28, 0x29, 0x2a, 0x2b, + 0x26, 0x2c, 0x24, 0x23, 0x26, 0x25, 0x2a, 0x2d, + 0x2e, 0x24, 0x2f, 0x2d, 0x26, 0x30, 0x31, 0x28, + // Entry 40 - 7F + 0x26, 0x28, 0x26, 0x25, 0x31, 0x22, 0x32, 0x33, + 0x34, 0x30, 0x22, 0x27, 0x27, 0x27, 0x35, 0x2d, + 0x29, 0x28, 0x27, 0x36, 0x28, 0x22, 0x34, 0x23, + 0x21, 0x26, 0x2d, 0x26, 0x22, 0x37, 0x2e, 0x35, + 0x2a, 0x22, 0x2f, 0x38, 0x26, 0x26, 0x21, 0x39, + 0x39, 0x28, 0x38, 0x39, 0x39, 0x2f, 0x3a, 0x2f, + 0x20, 0x21, 0x38, 0x3b, 0x28, 0x3c, 0x2c, 0x21, + 0x2a, 0x35, 0x27, 0x38, 0x26, 0x24, 0x28, 0x2c, + // Entry 80 - BF + 0x2d, 0x23, 0x30, 0x2d, 0x2d, 0x26, 0x27, 0x3a, + 0x22, 0x34, 0x3c, 0x2d, 0x28, 0x36, 0x22, 0x34, + 0x3a, 0x26, 0x2e, 0x21, 0x39, 0x31, 0x38, 0x24, + 0x2c, 0x25, 0x22, 0x24, 0x25, 0x2c, 0x3a, 0x2c, + 0x26, 0x24, 0x36, 0x21, 0x2f, 0x3d, 0x31, 0x3c, + 0x2f, 0x26, 0x36, 0x36, 0x24, 0x26, 0x3d, 0x31, + 0x24, 0x26, 0x35, 0x25, 0x2d, 0x32, 0x38, 0x2a, + 0x38, 0x39, 0x39, 0x35, 0x33, 0x23, 0x26, 0x2f, + // Entry C0 - FF + 0x3c, 0x21, 0x23, 0x2d, 0x31, 0x36, 0x36, 0x3c, + 0x26, 0x2d, 0x26, 0x3a, 0x2f, 0x25, 0x2f, 0x34, + 0x31, 0x2f, 0x32, 0x3b, 0x2d, 0x2b, 0x2d, 0x21, + 0x34, 0x2a, 0x2c, 0x25, 0x21, 0x3c, 0x24, 0x29, + 0x2b, 0x24, 0x34, 0x21, 0x28, 0x29, 0x3b, 0x31, + 0x25, 0x2e, 0x30, 0x29, 0x26, 0x24, 0x3a, 0x21, + 0x3c, 0x28, 0x21, 0x24, 0x21, 0x21, 0x1f, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + // Entry 100 - 13F + 0x21, 0x21, 0x2f, 0x21, 0x2e, 0x23, 0x33, 0x2f, + 0x24, 0x3b, 0x2f, 0x39, 0x38, 0x31, 0x2d, 0x3a, + 0x2c, 0x2e, 0x2d, 0x23, 0x2d, 0x2f, 0x28, 0x2f, + 0x27, 0x33, 0x34, 0x26, 0x24, 0x32, 0x22, 0x26, + 0x27, 0x22, 0x2d, 0x31, 0x3d, 0x29, 0x31, 0x3d, + 0x39, 0x29, 0x31, 0x24, 0x26, 0x29, 0x36, 0x2f, + 0x33, 0x2f, 0x21, 0x22, 0x21, 0x30, 0x28, 0x3d, + 0x23, 0x26, 0x21, 0x28, 0x26, 0x26, 0x31, 0x3b, + // Entry 140 - 17F + 0x29, 0x21, 0x29, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x23, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x24, 0x24, 0x2f, + 0x23, 0x32, 0x2f, 0x27, 0x2f, 0x21, +} + +// regionInclusionBits is an array of bit vectors where every vector represents +// a set of region groupings. These sets are used to compute the distance +// between two regions for the purpose of language matching. +// Size: 584 bytes, 73 elements +var regionInclusionBits = [73]uint64{ + // Entry 0 - 1F + 0x0000000102400813, 0x00000000200007a3, 0x0000000000003844, 0x0000000040000808, + 0x00000000803c0011, 0x0000000020000022, 0x0000000040000844, 0x0000000020000082, + 0x0000000000000102, 0x0000000020000202, 0x0000000020000402, 0x000000004000384d, + 0x0000000000001804, 0x0000000040002804, 0x0000000000404000, 0x0000000000408000, + 0x0000000000410000, 0x0000000002020000, 0x0000000000040010, 0x0000000000080010, + 0x0000000000100010, 0x0000000000200010, 0x0000000001c1c001, 0x0000000000c00000, + 0x0000000001400000, 0x000000001e020001, 0x0000000006000000, 0x000000000a000000, + 0x0000000012000000, 0x00000000200006a2, 0x0000000040002848, 0x0000000080000010, + // Entry 20 - 3F + 0x0000000100000001, 0x0000000000000001, 0x0000000080000000, 0x0000000000020000, + 0x0000000001000000, 0x0000000000008000, 0x0000000000002000, 0x0000000000000200, + 0x0000000000000008, 0x0000000000200000, 0x0000000110000000, 0x0000000000040000, + 0x0000000008000000, 0x0000000000000020, 0x0000000104000000, 0x0000000000000080, + 0x0000000000001000, 0x0000000000010000, 0x0000000000000400, 0x0000000004000000, + 0x0000000000000040, 0x0000000010000000, 0x0000000000004000, 0x0000000101000000, + 0x0000000108000000, 0x0000000000000100, 0x0000000100020000, 0x0000000000080000, + 0x0000000000100000, 0x0000000000800000, 0x00000001ffffffff, 0x0000000122400fb3, + // Entry 40 - 5F + 0x00000001827c0813, 0x000000014240385f, 0x0000000103c1c813, 0x000000011e420813, + 0x0000000112000001, 0x0000000106000001, 0x0000000101400001, 0x000000010a000001, + 0x0000000102020001, +} + +// regionInclusionNext marks, for each entry in regionInclusionBits, the set of +// all groups that are reachable from the groups set in the respective entry. +// Size: 73 bytes, 73 elements +var regionInclusionNext = [73]uint8{ + // Entry 0 - 3F + 0x3e, 0x3f, 0x0b, 0x0b, 0x40, 0x01, 0x0b, 0x01, + 0x01, 0x01, 0x01, 0x41, 0x0b, 0x0b, 0x16, 0x16, + 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x42, 0x16, + 0x16, 0x43, 0x19, 0x19, 0x19, 0x01, 0x0b, 0x04, + 0x00, 0x00, 0x1f, 0x11, 0x18, 0x0f, 0x0d, 0x09, + 0x03, 0x15, 0x44, 0x12, 0x1b, 0x05, 0x45, 0x07, + 0x0c, 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x46, + 0x47, 0x08, 0x48, 0x13, 0x14, 0x17, 0x3e, 0x3e, + // Entry 40 - 7F + 0x3e, 0x3e, 0x3e, 0x3e, 0x43, 0x43, 0x42, 0x43, + 0x43, +} + +type parentRel struct { + lang uint16 + script uint16 + maxScript uint16 + toRegion uint16 + fromRegion []uint16 +} + +// Size: 414 bytes, 5 elements +var parents = [5]parentRel{ + 0: {lang: 0x139, script: 0x0, maxScript: 0x5a, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x25, 0x26, 0x2f, 0x34, 0x36, 0x3d, 0x42, 0x46, 0x48, 0x49, 0x4a, 0x50, 0x52, 0x5c, 0x5d, 0x61, 0x64, 0x6d, 0x73, 0x74, 0x75, 0x7b, 0x7c, 0x7f, 0x80, 0x81, 0x83, 0x8c, 0x8d, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9f, 0xa0, 0xa4, 0xa7, 0xa9, 0xad, 0xb1, 0xb4, 0xb5, 0xbf, 0xc6, 0xca, 0xcb, 0xcc, 0xce, 0xd0, 0xd2, 0xd5, 0xd6, 0xdd, 0xdf, 0xe0, 0xe6, 0xe7, 0xe8, 0xeb, 0xf0, 0x107, 0x109, 0x10a, 0x10b, 0x10d, 0x10e, 0x112, 0x117, 0x11b, 0x11d, 0x11f, 0x125, 0x129, 0x12c, 0x12d, 0x12f, 0x131, 0x139, 0x13c, 0x13f, 0x142, 0x161, 0x162, 0x164}}, + 1: {lang: 0x139, script: 0x0, maxScript: 0x5a, toRegion: 0x1a, fromRegion: []uint16{0x2e, 0x4e, 0x60, 0x63, 0x72, 0xd9, 0x10c, 0x10f}}, + 2: {lang: 0x13e, script: 0x0, maxScript: 0x5a, toRegion: 0x1f, fromRegion: []uint16{0x2c, 0x3f, 0x41, 0x48, 0x51, 0x54, 0x56, 0x59, 0x65, 0x69, 0x89, 0x8f, 0xcf, 0xd8, 0xe2, 0xe4, 0xec, 0xf1, 0x11a, 0x135, 0x136, 0x13b}}, + 3: {lang: 0x3c0, script: 0x0, maxScript: 0x5a, toRegion: 0xee, fromRegion: []uint16{0x2a, 0x4e, 0x5a, 0x86, 0x8b, 0xb7, 0xc6, 0xd1, 0x118, 0x126}}, + 4: {lang: 0x529, script: 0x3c, maxScript: 0x3c, toRegion: 0x8d, fromRegion: []uint16{0xc6}}, +} + +// Total table size 30244 bytes (29KiB); checksum: B6B15F30 diff --git a/vendor/golang.org/x/text/internal/language/tags.go b/vendor/golang.org/x/text/internal/language/tags.go new file mode 100644 index 00000000000..e7afd3188e6 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/tags.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Language { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +// Und is the root language. +var Und Tag diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go new file mode 100644 index 00000000000..1cc004a6d5f --- /dev/null +++ b/vendor/golang.org/x/text/internal/match.go @@ -0,0 +1,67 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// This file contains matchers that implement CLDR inheritance. +// +// See https://unicode.org/reports/tr35/#Locale_Inheritance. +// +// Some of the inheritance described in this document is already handled by +// the cldr package. + +import ( + "golang.org/x/text/language" +) + +// TODO: consider if (some of the) matching algorithm needs to be public after +// getting some feel about what is generic and what is specific. + +// NewInheritanceMatcher returns a matcher that matches based on the inheritance +// chain. +// +// The matcher uses canonicalization and the parent relationship to find a +// match. The resulting match will always be either Und or a language with the +// same language and script as the requested language. It will not match +// languages for which there is understood to be mutual or one-directional +// intelligibility. +// +// A Match will indicate an Exact match if the language matches after +// canonicalization and High if the matched tag is a parent. +func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher { + tags := &InheritanceMatcher{make(map[language.Tag]int)} + for i, tag := range t { + ct, err := language.All.Canonicalize(tag) + if err != nil { + ct = tag + } + tags.index[ct] = i + } + return tags +} + +type InheritanceMatcher struct { + index map[language.Tag]int +} + +func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) { + for _, t := range want { + ct, err := language.All.Canonicalize(t) + if err != nil { + ct = t + } + conf := language.Exact + for { + if index, ok := m.index[ct]; ok { + return ct, index, conf + } + if ct == language.Und { + break + } + ct = ct.Parent() + conf = language.High + } + } + return language.Und, 0, language.No +} diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go new file mode 100644 index 00000000000..b5d348891d8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/tag/tag.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag contains functionality handling tags and related data. +package tag // import "golang.org/x/text/internal/tag" + +import "sort" + +// An Index converts tags to a compact numeric value. +// +// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can +// be used to store additional information about the tag. +type Index string + +// Elem returns the element data at the given index. +func (s Index) Elem(x int) string { + return string(s[x*4 : x*4+4]) +} + +// Index reports the index of the given key or -1 if it could not be found. +// Only the first len(key) bytes from the start of the 4-byte entries will be +// considered for the search and the first match in Index will be returned. +func (s Index) Index(key []byte) int { + n := len(key) + // search the index of the first entry with an equal or higher value than + // key in s. + index := sort.Search(len(s)/4, func(i int) bool { + return cmp(s[i*4:i*4+n], key) != -1 + }) + i := index * 4 + if cmp(s[i:i+len(key)], key) != 0 { + return -1 + } + return index +} + +// Next finds the next occurrence of key after index x, which must have been +// obtained from a call to Index using the same key. It returns x+1 or -1. +func (s Index) Next(key []byte, x int) int { + if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 { + return x + } + return -1 +} + +// cmp returns an integer comparing a and b lexicographically. +func cmp(a Index, b []byte) int { + n := len(a) + if len(b) < n { + n = len(b) + } + for i, c := range b[:n] { + switch { + case a[i] > c: + return 1 + case a[i] < c: + return -1 + } + } + switch { + case len(a) < len(b): + return -1 + case len(a) > len(b): + return 1 + } + return 0 +} + +// Compare returns an integer comparing a and b lexicographically. +func Compare(a string, b []byte) int { + return cmp(Index(a), b) +} + +// FixCase reformats b to the same pattern of cases as form. +// If returns false if string b is malformed. +func FixCase(form string, b []byte) bool { + if len(form) != len(b) { + return false + } + for i, c := range b { + if form[i] <= 'Z' { + if c >= 'a' { + c -= 'z' - 'Z' + } + if c < 'A' || 'Z' < c { + return false + } + } else { + if c <= 'Z' { + c += 'z' - 'Z' + } + if c < 'a' || 'z' < c { + return false + } + } + b[i] = c + } + return true +} diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go new file mode 100644 index 00000000000..a24fd1a4d69 --- /dev/null +++ b/vendor/golang.org/x/text/language/coverage.go @@ -0,0 +1,187 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "fmt" + "sort" + + "golang.org/x/text/internal/language" +) + +// The Coverage interface is used to define the level of coverage of an +// internationalization service. Note that not all types are supported by all +// services. As lists may be generated on the fly, it is recommended that users +// of a Coverage cache the results. +type Coverage interface { + // Tags returns the list of supported tags. + Tags() []Tag + + // BaseLanguages returns the list of supported base languages. + BaseLanguages() []Base + + // Scripts returns the list of supported scripts. + Scripts() []Script + + // Regions returns the list of supported regions. + Regions() []Region +} + +var ( + // Supported defines a Coverage that lists all supported subtags. Tags + // always returns nil. + Supported Coverage = allSubtags{} +) + +// TODO: +// - Support Variants, numbering systems. +// - CLDR coverage levels. +// - Set of common tags defined in this package. + +type allSubtags struct{} + +// Regions returns the list of supported regions. As all regions are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" region is not returned. +func (s allSubtags) Regions() []Region { + reg := make([]Region, language.NumRegions) + for i := range reg { + reg[i] = Region{language.Region(i + 1)} + } + return reg +} + +// Scripts returns the list of supported scripts. As all scripts are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" script is not returned. +func (s allSubtags) Scripts() []Script { + scr := make([]Script, language.NumScripts) + for i := range scr { + scr[i] = Script{language.Script(i + 1)} + } + return scr +} + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func (s allSubtags) BaseLanguages() []Base { + bs := language.BaseLanguages() + base := make([]Base, len(bs)) + for i, b := range bs { + base[i] = Base{b} + } + return base +} + +// Tags always returns nil. +func (s allSubtags) Tags() []Tag { + return nil +} + +// coverage is used by NewCoverage which is used as a convenient way for +// creating Coverage implementations for partially defined data. Very often a +// package will only need to define a subset of slices. coverage provides a +// convenient way to do this. Moreover, packages using NewCoverage, instead of +// their own implementation, will not break if later new slice types are added. +type coverage struct { + tags func() []Tag + bases func() []Base + scripts func() []Script + regions func() []Region +} + +func (s *coverage) Tags() []Tag { + if s.tags == nil { + return nil + } + return s.tags() +} + +// bases implements sort.Interface and is used to sort base languages. +type bases []Base + +func (b bases) Len() int { + return len(b) +} + +func (b bases) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b bases) Less(i, j int) bool { + return b[i].langID < b[j].langID +} + +// BaseLanguages returns the result from calling s.bases if it is specified or +// otherwise derives the set of supported base languages from tags. +func (s *coverage) BaseLanguages() []Base { + if s.bases == nil { + tags := s.Tags() + if len(tags) == 0 { + return nil + } + a := make([]Base, len(tags)) + for i, t := range tags { + a[i] = Base{language.Language(t.lang())} + } + sort.Sort(bases(a)) + k := 0 + for i := 1; i < len(a); i++ { + if a[k] != a[i] { + k++ + a[k] = a[i] + } + } + return a[:k+1] + } + return s.bases() +} + +func (s *coverage) Scripts() []Script { + if s.scripts == nil { + return nil + } + return s.scripts() +} + +func (s *coverage) Regions() []Region { + if s.regions == nil { + return nil + } + return s.regions() +} + +// NewCoverage returns a Coverage for the given lists. It is typically used by +// packages providing internationalization services to define their level of +// coverage. A list may be of type []T or func() []T, where T is either Tag, +// Base, Script or Region. The returned Coverage derives the value for Bases +// from Tags if no func or slice for []Base is specified. For other unspecified +// types the returned Coverage will return nil for the respective methods. +func NewCoverage(list ...interface{}) Coverage { + s := &coverage{} + for _, x := range list { + switch v := x.(type) { + case func() []Base: + s.bases = v + case func() []Script: + s.scripts = v + case func() []Region: + s.regions = v + case func() []Tag: + s.tags = v + case []Base: + s.bases = func() []Base { return v } + case []Script: + s.scripts = func() []Script { return v } + case []Region: + s.regions = func() []Region { return v } + case []Tag: + s.tags = func() []Tag { return v } + default: + panic(fmt.Sprintf("language: unsupported set type %T", v)) + } + } + return s +} diff --git a/vendor/golang.org/x/text/language/doc.go b/vendor/golang.org/x/text/language/doc.go new file mode 100644 index 00000000000..212b77c9068 --- /dev/null +++ b/vendor/golang.org/x/text/language/doc.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package language implements BCP 47 language tags and related functionality. +// +// The most important function of package language is to match a list of +// user-preferred languages to a list of supported languages. +// It alleviates the developer of dealing with the complexity of this process +// and provides the user with the best experience +// (see https://blog.golang.org/matchlang). +// +// # Matching preferred against supported languages +// +// A Matcher for an application that supports English, Australian English, +// Danish, and standard Mandarin can be created as follows: +// +// var matcher = language.NewMatcher([]language.Tag{ +// language.English, // The first language is used as fallback. +// language.MustParse("en-AU"), +// language.Danish, +// language.Chinese, +// }) +// +// This list of supported languages is typically implied by the languages for +// which there exists translations of the user interface. +// +// User-preferred languages usually come as a comma-separated list of BCP 47 +// language tags. +// The MatchString finds best matches for such strings: +// +// handler(w http.ResponseWriter, r *http.Request) { +// lang, _ := r.Cookie("lang") +// accept := r.Header.Get("Accept-Language") +// tag, _ := language.MatchStrings(matcher, lang.String(), accept) +// +// // tag should now be used for the initialization of any +// // locale-specific service. +// } +// +// The Matcher's Match method can be used to match Tags directly. +// +// Matchers are aware of the intricacies of equivalence between languages, such +// as deprecated subtags, legacy tags, macro languages, mutual +// intelligibility between scripts and languages, and transparently passing +// BCP 47 user configuration. +// For instance, it will know that a reader of Bokmål Danish can read Norwegian +// and will know that Cantonese ("yue") is a good match for "zh-HK". +// +// # Using match results +// +// To guarantee a consistent user experience to the user it is important to +// use the same language tag for the selection of any locale-specific services. +// For example, it is utterly confusing to substitute spelled-out numbers +// or dates in one language in text of another language. +// More subtly confusing is using the wrong sorting order or casing +// algorithm for a certain language. +// +// All the packages in x/text that provide locale-specific services +// (e.g. collate, cases) should be initialized with the tag that was +// obtained at the start of an interaction with the user. +// +// Note that Tag that is returned by Match and MatchString may differ from any +// of the supported languages, as it may contain carried over settings from +// the user tags. +// This may be inconvenient when your application has some additional +// locale-specific data for your supported languages. +// Match and MatchString both return the index of the matched supported tag +// to simplify associating such data with the matched tag. +// +// # Canonicalization +// +// If one uses the Matcher to compare languages one does not need to +// worry about canonicalization. +// +// The meaning of a Tag varies per application. The language package +// therefore delays canonicalization and preserves information as much +// as possible. The Matcher, however, will always take into account that +// two different tags may represent the same language. +// +// By default, only legacy and deprecated tags are converted into their +// canonical equivalent. All other information is preserved. This approach makes +// the confidence scores more accurate and allows matchers to distinguish +// between variants that are otherwise lost. +// +// As a consequence, two tags that should be treated as identical according to +// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The +// Matcher handles such distinctions, though, and is aware of the +// equivalence relations. The CanonType type can be used to alter the +// canonicalization form. +// +// # References +// +// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47 +package language // import "golang.org/x/text/language" + +// TODO: explanation on how to match languages for your own locale-specific +// service. diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go new file mode 100644 index 00000000000..289b3a36d52 --- /dev/null +++ b/vendor/golang.org/x/text/language/language.go @@ -0,0 +1,605 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go -output tables.go + +package language + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "strings" + + "golang.org/x/text/internal/language" + "golang.org/x/text/internal/language/compact" +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag compact.Tag + +func makeTag(t language.Tag) (tag Tag) { + return Tag(compact.Make(t)) +} + +func (t *Tag) tag() language.Tag { + return (*compact.Tag)(t).Tag() +} + +func (t *Tag) isCompact() bool { + return (*compact.Tag)(t).IsCompact() +} + +// TODO: improve performance. +func (t *Tag) lang() language.Language { return t.tag().LangID } +func (t *Tag) region() language.Region { return t.tag().RegionID } +func (t *Tag) script() language.Script { return t.tag().ScriptID } + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + return Default.Make(s) +} + +// Make is a convenience wrapper for c.Parse that omits the error. +// In case of an error, a sensible default is returned. +func (c CanonType) Make(s string) Tag { + t, _ := c.Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +func (t Tag) Raw() (b Base, s Script, r Region) { + tt := t.tag() + return Base{tt.LangID}, Script{tt.ScriptID}, Region{tt.RegionID} +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + return compact.Tag(t).IsRoot() +} + +// CanonType can be used to enable or disable various types of canonicalization. +type CanonType int + +const ( + // Replace deprecated base languages with their preferred replacements. + DeprecatedBase CanonType = 1 << iota + // Replace deprecated scripts with their preferred replacements. + DeprecatedScript + // Replace deprecated regions with their preferred replacements. + DeprecatedRegion + // Remove redundant scripts. + SuppressScript + // Normalize legacy encodings. This includes legacy languages defined in + // CLDR as well as bibliographic codes defined in ISO-639. + Legacy + // Map the dominant language of a macro language group to the macro language + // subtag. For example cmn -> zh. + Macro + // The CLDR flag should be used if full compatibility with CLDR is required. + // There are a few cases where language.Tag may differ from CLDR. To follow all + // of CLDR's suggestions, use All|CLDR. + CLDR + + // Raw can be used to Compose or Parse without Canonicalization. + Raw CanonType = 0 + + // Replace all deprecated tags with their preferred replacements. + Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion + + // All canonicalizations recommended by BCP 47. + BCP47 = Deprecated | SuppressScript + + // All canonicalizations. + All = BCP47 | Legacy | Macro + + // Default is the canonicalization used by Parse, Make and Compose. To + // preserve as much information as possible, canonicalizations that remove + // potentially valuable information are not included. The Matcher is + // designed to recognize similar tags that would be the same if + // they were canonicalized using All. + Default = Deprecated | Legacy + + canonLang = DeprecatedBase | Legacy | Macro + + // TODO: LikelyScript, LikelyRegion: suppress similar to ICU. +) + +// canonicalize returns the canonicalized equivalent of the tag and +// whether there was any change. +func canonicalize(c CanonType, t language.Tag) (language.Tag, bool) { + if c == Raw { + return t, false + } + changed := false + if c&SuppressScript != 0 { + if t.LangID.SuppressScript() == t.ScriptID { + t.ScriptID = 0 + changed = true + } + } + if c&canonLang != 0 { + for { + if l, aliasType := t.LangID.Canonicalize(); l != t.LangID { + switch aliasType { + case language.Legacy: + if c&Legacy != 0 { + if t.LangID == _sh && t.ScriptID == 0 { + t.ScriptID = _Latn + } + t.LangID = l + changed = true + } + case language.Macro: + if c&Macro != 0 { + // We deviate here from CLDR. The mapping "nb" -> "no" + // qualifies as a typical Macro language mapping. However, + // for legacy reasons, CLDR maps "no", the macro language + // code for Norwegian, to the dominant variant "nb". This + // change is currently under consideration for CLDR as well. + // See https://unicode.org/cldr/trac/ticket/2698 and also + // https://unicode.org/cldr/trac/ticket/1790 for some of the + // practical implications. TODO: this check could be removed + // if CLDR adopts this change. + if c&CLDR == 0 || t.LangID != _nb { + changed = true + t.LangID = l + } + } + case language.Deprecated: + if c&DeprecatedBase != 0 { + if t.LangID == _mo && t.RegionID == 0 { + t.RegionID = _MD + } + t.LangID = l + changed = true + // Other canonicalization types may still apply. + continue + } + } + } else if c&Legacy != 0 && t.LangID == _no && c&CLDR != 0 { + t.LangID = _nb + changed = true + } + break + } + } + if c&DeprecatedScript != 0 { + if t.ScriptID == _Qaai { + changed = true + t.ScriptID = _Zinh + } + } + if c&DeprecatedRegion != 0 { + if r := t.RegionID.Canonicalize(); r != t.RegionID { + changed = true + t.RegionID = r + } + } + return t, changed +} + +// Canonicalize returns the canonicalized equivalent of the tag. +func (c CanonType) Canonicalize(t Tag) (Tag, error) { + // First try fast path. + if t.isCompact() { + if _, changed := canonicalize(c, compact.Tag(t).Tag()); !changed { + return t, nil + } + } + // It is unlikely that one will canonicalize a tag after matching. So do + // a slow but simple approach here. + if tag, changed := canonicalize(c, t.tag()); changed { + tag.RemakeString() + return makeTag(tag), nil + } + return t, nil + +} + +// Confidence indicates the level of certainty for a given return value. +// For example, Serbian may be written in Cyrillic or Latin script. +// The confidence level indicates whether a value was explicitly specified, +// whether it is typically the only possible value, or whether there is +// an ambiguity. +type Confidence int + +const ( + No Confidence = iota // full confidence that there was no match + Low // most likely value picked out of a set of alternatives + High // value is generally assumed to be the correct match + Exact // exact match or explicitly specified value +) + +var confName = []string{"No", "Low", "High", "Exact"} + +func (c Confidence) String() string { + return confName[c] +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + return t.tag().String() +} + +// MarshalText implements encoding.TextMarshaler. +func (t Tag) MarshalText() (text []byte, err error) { + return t.tag().MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (t *Tag) UnmarshalText(text []byte) error { + var tag language.Tag + err := tag.UnmarshalText(text) + *t = makeTag(tag) + return err +} + +// Base returns the base language of the language tag. If the base language is +// unspecified, an attempt will be made to infer it from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Base() (Base, Confidence) { + if b := t.lang(); b != 0 { + return Base{b}, Exact + } + tt := t.tag() + c := High + if tt.ScriptID == 0 && !tt.RegionID.IsCountry() { + c = Low + } + if tag, err := tt.Maximize(); err == nil && tag.LangID != 0 { + return Base{tag.LangID}, c + } + return Base{0}, No +} + +// Script infers the script for the language tag. If it was not explicitly given, it will infer +// a most likely candidate. +// If more than one script is commonly used for a language, the most likely one +// is returned with a low confidence indication. For example, it returns (Cyrl, Low) +// for Serbian. +// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined) +// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks +// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts. +// See https://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for +// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified. +// Note that an inferred script is never guaranteed to be the correct one. Latin is +// almost exclusively used for Afrikaans, but Arabic has been used for some texts +// in the past. Also, the script that is commonly used may change over time. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Script() (Script, Confidence) { + if scr := t.script(); scr != 0 { + return Script{scr}, Exact + } + tt := t.tag() + sc, c := language.Script(_Zzzz), No + if scr := tt.LangID.SuppressScript(); scr != 0 { + // Note: it is not always the case that a language with a suppress + // script value is only written in one script (e.g. kk, ms, pa). + if tt.RegionID == 0 { + return Script{scr}, High + } + sc, c = scr, High + } + if tag, err := tt.Maximize(); err == nil { + if tag.ScriptID != sc { + sc, c = tag.ScriptID, Low + } + } else { + tt, _ = canonicalize(Deprecated|Macro, tt) + if tag, err := tt.Maximize(); err == nil && tag.ScriptID != sc { + sc, c = tag.ScriptID, Low + } + } + return Script{sc}, c +} + +// Region returns the region for the language tag. If it was not explicitly given, it will +// infer a most likely candidate from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Region() (Region, Confidence) { + if r := t.region(); r != 0 { + return Region{r}, Exact + } + tt := t.tag() + if tt, err := tt.Maximize(); err == nil { + return Region{tt.RegionID}, Low // TODO: differentiate between high and low. + } + tt, _ = canonicalize(Deprecated|Macro, tt) + if tag, err := tt.Maximize(); err == nil { + return Region{tag.RegionID}, Low + } + return Region{_ZZ}, No // TODO: return world instead of undetermined? +} + +// Variants returns the variants specified explicitly for this language tag. +// or nil if no variant was specified. +func (t Tag) Variants() []Variant { + if !compact.Tag(t).MayHaveVariants() { + return nil + } + v := []Variant{} + x, str := "", t.tag().Variants() + for str != "" { + x, str = nextToken(str) + v = append(v, Variant{x}) + } + return v +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +// +// Parent returns a tag for a less specific language that is mutually +// intelligible or Und if there is no such language. This may not be the same as +// simply stripping the last BCP 47 subtag. For instance, the parent of "zh-TW" +// is "zh-Hant", and the parent of "zh-Hant" is "und". +func (t Tag) Parent() Tag { + return Tag(compact.Tag(t).Parent()) +} + +// returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// Extension is a single BCP 47 extension. +type Extension struct { + s string +} + +// String returns the string representation of the extension, including the +// type tag. +func (e Extension) String() string { + return e.s +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (e Extension, err error) { + ext, err := language.ParseExtension(s) + return Extension{ext}, err +} + +// Type returns the one-byte extension type of e. It returns 0 for the zero +// exception. +func (e Extension) Type() byte { + if e.s == "" { + return 0 + } + return e.s[0] +} + +// Tokens returns the list of tokens of e. +func (e Extension) Tokens() []string { + return strings.Split(e.s, "-") +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext Extension, ok bool) { + if !compact.Tag(t).MayHaveExtensions() { + return Extension{}, false + } + e, ok := t.tag().Extension(x) + return Extension{e}, ok +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []Extension { + if !compact.Tag(t).MayHaveExtensions() { + return nil + } + e := []Extension{} + for _, ext := range t.tag().Extensions() { + e = append(e, Extension{ext}) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. +func (t Tag) TypeForKey(key string) string { + if !compact.Tag(t).MayHaveExtensions() { + if key != "rg" && key != "va" { + return "" + } + } + return t.tag().TypeForKey(key) +} + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + tt, err := t.tag().SetTypeForKey(key, value) + return makeTag(tt), err +} + +// NumCompactTags is the number of compact tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = compact.NumCompactTags + +// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository.The index will change over time +// and should not be stored in persistent storage. If t does not match a compact +// index, exact will be false and the compact index will be returned for the +// first match after repeatedly taking the Parent of t. +func CompactIndex(t Tag) (index int, exact bool) { + id, exact := compact.LanguageID(compact.Tag(t)) + return int(id), exact +} + +var root = language.Tag{} + +// Base is an ISO 639 language code, used for encoding the base language +// of a language tag. +type Base struct { + langID language.Language +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (Base, error) { + l, err := language.ParseBase(s) + return Base{l}, err +} + +// String returns the BCP 47 representation of the base language. +func (b Base) String() string { + return b.langID.String() +} + +// ISO3 returns the ISO 639-3 language code. +func (b Base) ISO3() string { + return b.langID.ISO3() +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b Base) IsPrivateUse() bool { + return b.langID.IsPrivateUse() +} + +// Script is a 4-letter ISO 15924 code for representing scripts. +// It is idiomatically represented in title case. +type Script struct { + scriptID language.Script +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (Script, error) { + sc, err := language.ParseScript(s) + return Script{sc}, err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s Script) String() string { + return s.scriptID.String() +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s Script) IsPrivateUse() bool { + return s.scriptID.IsPrivateUse() +} + +// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions. +type Region struct { + regionID language.Region +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + rid, err := language.EncodeM49(r) + return Region{rid}, err +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (Region, error) { + r, err := language.ParseRegion(s) + return Region{r}, err +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r Region) String() string { + return r.regionID.String() +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r Region) ISO3() string { + return r.regionID.ISO3() +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r Region) M49() int { + return r.regionID.M49() +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r Region) IsPrivateUse() bool { + return r.regionID.IsPrivateUse() +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + return r.regionID.IsCountry() +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + return r.regionID.IsGroup() +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + return r.regionID.Contains(c.regionID) +} + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + tld, err := r.regionID.TLD() + return Region{tld}, err +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + return Region{r.regionID.Canonicalize()} +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + variant string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (Variant, error) { + v, err := language.ParseVariant(s) + return Variant{v.String()}, err +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.variant +} diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go new file mode 100644 index 00000000000..ee45f494747 --- /dev/null +++ b/vendor/golang.org/x/text/language/match.go @@ -0,0 +1,735 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "errors" + "strings" + + "golang.org/x/text/internal/language" +) + +// A MatchOption configures a Matcher. +type MatchOption func(*matcher) + +// PreferSameScript will, in the absence of a match, result in the first +// preferred tag with the same script as a supported tag to match this supported +// tag. The default is currently true, but this may change in the future. +func PreferSameScript(preferSame bool) MatchOption { + return func(m *matcher) { m.preferSameScript = preferSame } +} + +// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface. +// There doesn't seem to be too much need for multiple types. +// Making it a concrete type allows MatchStrings to be a method, which will +// improve its discoverability. + +// MatchStrings parses and matches the given strings until one of them matches +// the language in the Matcher. A string may be an Accept-Language header as +// handled by ParseAcceptLanguage. The default language is returned if no +// other language matched. +func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) { + for _, accept := range lang { + desired, _, err := ParseAcceptLanguage(accept) + if err != nil { + continue + } + if tag, index, conf := m.Match(desired...); conf != No { + return tag, index + } + } + tag, index, _ = m.Match() + return +} + +// Matcher is the interface that wraps the Match method. +// +// Match returns the best match for any of the given tags, along with +// a unique index associated with the returned tag and a confidence +// score. +type Matcher interface { + Match(t ...Tag) (tag Tag, index int, c Confidence) +} + +// Comprehends reports the confidence score for a speaker of a given language +// to being able to comprehend the written form of an alternative language. +func Comprehends(speaker, alternative Tag) Confidence { + _, _, c := NewMatcher([]Tag{alternative}).Match(speaker) + return c +} + +// NewMatcher returns a Matcher that matches an ordered list of preferred tags +// against a list of supported tags based on written intelligibility, closeness +// of dialect, equivalence of subtags and various other rules. It is initialized +// with the list of supported tags. The first element is used as the default +// value in case no match is found. +// +// Its Match method matches the first of the given Tags to reach a certain +// confidence threshold. The tags passed to Match should therefore be specified +// in order of preference. Extensions are ignored for matching. +// +// The index returned by the Match method corresponds to the index of the +// matched tag in t, but is augmented with the Unicode extension ('u')of the +// corresponding preferred tag. This allows user locale options to be passed +// transparently. +func NewMatcher(t []Tag, options ...MatchOption) Matcher { + return newMatcher(t, options) +} + +func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) { + var tt language.Tag + match, w, c := m.getBest(want...) + if match != nil { + tt, index = match.tag, match.index + } else { + // TODO: this should be an option + tt = m.default_.tag + if m.preferSameScript { + outer: + for _, w := range want { + script, _ := w.Script() + if script.scriptID == 0 { + // Don't do anything if there is no script, such as with + // private subtags. + continue + } + for i, h := range m.supported { + if script.scriptID == h.maxScript { + tt, index = h.tag, i + break outer + } + } + } + } + // TODO: select first language tag based on script. + } + if w.RegionID != tt.RegionID && w.RegionID != 0 { + if w.RegionID != 0 && tt.RegionID != 0 && tt.RegionID.Contains(w.RegionID) { + tt.RegionID = w.RegionID + tt.RemakeString() + } else if r := w.RegionID.String(); len(r) == 2 { + // TODO: also filter macro and deprecated. + tt, _ = tt.SetTypeForKey("rg", strings.ToLower(r)+"zzzz") + } + } + // Copy options from the user-provided tag into the result tag. This is hard + // to do after the fact, so we do it here. + // TODO: add in alternative variants to -u-va-. + // TODO: add preferred region to -u-rg-. + if e := w.Extensions(); len(e) > 0 { + b := language.Builder{} + b.SetTag(tt) + for _, e := range e { + b.AddExt(e) + } + tt = b.Make() + } + return makeTag(tt), index, c +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// func (t *Tag) setTagsFrom(id Tag) { +// t.LangID = id.LangID +// t.ScriptID = id.ScriptID +// t.RegionID = id.RegionID +// } + +// Tag Matching +// CLDR defines an algorithm for finding the best match between two sets of language +// tags. The basic algorithm defines how to score a possible match and then find +// the match with the best score +// (see https://www.unicode.org/reports/tr35/#LanguageMatching). +// Using scoring has several disadvantages. The scoring obfuscates the importance of +// the various factors considered, making the algorithm harder to understand. Using +// scoring also requires the full score to be computed for each pair of tags. +// +// We will use a different algorithm which aims to have the following properties: +// - clarity on the precedence of the various selection factors, and +// - improved performance by allowing early termination of a comparison. +// +// Matching algorithm (overview) +// Input: +// - supported: a set of supported tags +// - default: the default tag to return in case there is no match +// - desired: list of desired tags, ordered by preference, starting with +// the most-preferred. +// +// Algorithm: +// 1) Set the best match to the lowest confidence level +// 2) For each tag in "desired": +// a) For each tag in "supported": +// 1) compute the match between the two tags. +// 2) if the match is better than the previous best match, replace it +// with the new match. (see next section) +// b) if the current best match is Exact and pin is true the result will be +// frozen to the language found thusfar, although better matches may +// still be found for the same language. +// 3) If the best match so far is below a certain threshold, return "default". +// +// Ranking: +// We use two phases to determine whether one pair of tags are a better match +// than another pair of tags. First, we determine a rough confidence level. If the +// levels are different, the one with the highest confidence wins. +// Second, if the rough confidence levels are identical, we use a set of tie-breaker +// rules. +// +// The confidence level of matching a pair of tags is determined by finding the +// lowest confidence level of any matches of the corresponding subtags (the +// result is deemed as good as its weakest link). +// We define the following levels: +// Exact - An exact match of a subtag, before adding likely subtags. +// MaxExact - An exact match of a subtag, after adding likely subtags. +// [See Note 2]. +// High - High level of mutual intelligibility between different subtag +// variants. +// Low - Low level of mutual intelligibility between different subtag +// variants. +// No - No mutual intelligibility. +// +// The following levels can occur for each type of subtag: +// Base: Exact, MaxExact, High, Low, No +// Script: Exact, MaxExact [see Note 3], Low, No +// Region: Exact, MaxExact, High +// Variant: Exact, High +// Private: Exact, No +// +// Any result with a confidence level of Low or higher is deemed a possible match. +// Once a desired tag matches any of the supported tags with a level of MaxExact +// or higher, the next desired tag is not considered (see Step 2.b). +// Note that CLDR provides languageMatching data that defines close equivalence +// classes for base languages, scripts and regions. +// +// Tie-breaking +// If we get the same confidence level for two matches, we apply a sequence of +// tie-breaking rules. The first that succeeds defines the result. The rules are +// applied in the following order. +// 1) Original language was defined and was identical. +// 2) Original region was defined and was identical. +// 3) Distance between two maximized regions was the smallest. +// 4) Original script was defined and was identical. +// 5) Distance from want tag to have tag using the parent relation [see Note 5.] +// If there is still no winner after these rules are applied, the first match +// found wins. +// +// Notes: +// [2] In practice, as matching of Exact is done in a separate phase from +// matching the other levels, we reuse the Exact level to mean MaxExact in +// the second phase. As a consequence, we only need the levels defined by +// the Confidence type. The MaxExact confidence level is mapped to High in +// the public API. +// [3] We do not differentiate between maximized script values that were derived +// from suppressScript versus most likely tag data. We determined that in +// ranking the two, one ranks just after the other. Moreover, the two cannot +// occur concurrently. As a consequence, they are identical for practical +// purposes. +// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign +// the MaxExact level to allow iw vs he to still be a closer match than +// en-AU vs en-US, for example. +// [5] In CLDR a locale inherits fields that are unspecified for this locale +// from its parent. Therefore, if a locale is a parent of another locale, +// it is a strong measure for closeness, especially when no other tie +// breaker rule applies. One could also argue it is inconsistent, for +// example, when pt-AO matches pt (which CLDR equates with pt-BR), even +// though its parent is pt-PT according to the inheritance rules. +// +// Implementation Details: +// There are several performance considerations worth pointing out. Most notably, +// we preprocess as much as possible (within reason) at the time of creation of a +// matcher. This includes: +// - creating a per-language map, which includes data for the raw base language +// and its canonicalized variant (if applicable), +// - expanding entries for the equivalence classes defined in CLDR's +// languageMatch data. +// The per-language map ensures that typically only a very small number of tags +// need to be considered. The pre-expansion of canonicalized subtags and +// equivalence classes reduces the amount of map lookups that need to be done at +// runtime. + +// matcher keeps a set of supported language tags, indexed by language. +type matcher struct { + default_ *haveTag + supported []*haveTag + index map[language.Language]*matchHeader + passSettings bool + preferSameScript bool +} + +// matchHeader has the lists of tags for exact matches and matches based on +// maximized and canonicalized tags for a given language. +type matchHeader struct { + haveTags []*haveTag + original bool +} + +// haveTag holds a supported Tag and its maximized script and region. The maximized +// or canonicalized language is not stored as it is not needed during matching. +type haveTag struct { + tag language.Tag + + // index of this tag in the original list of supported tags. + index int + + // conf is the maximum confidence that can result from matching this haveTag. + // When conf < Exact this means it was inserted after applying a CLDR equivalence rule. + conf Confidence + + // Maximized region and script. + maxRegion language.Region + maxScript language.Script + + // altScript may be checked as an alternative match to maxScript. If altScript + // matches, the confidence level for this match is Low. Theoretically there + // could be multiple alternative scripts. This does not occur in practice. + altScript language.Script + + // nextMax is the index of the next haveTag with the same maximized tags. + nextMax uint16 +} + +func makeHaveTag(tag language.Tag, index int) (haveTag, language.Language) { + max := tag + if tag.LangID != 0 || tag.RegionID != 0 || tag.ScriptID != 0 { + max, _ = canonicalize(All, max) + max, _ = max.Maximize() + max.RemakeString() + } + return haveTag{tag, index, Exact, max.RegionID, max.ScriptID, altScript(max.LangID, max.ScriptID), 0}, max.LangID +} + +// altScript returns an alternative script that may match the given script with +// a low confidence. At the moment, the langMatch data allows for at most one +// script to map to another and we rely on this to keep the code simple. +func altScript(l language.Language, s language.Script) language.Script { + for _, alt := range matchScript { + // TODO: also match cases where language is not the same. + if (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) && + language.Script(alt.haveScript) == s { + return language.Script(alt.wantScript) + } + } + return 0 +} + +// addIfNew adds a haveTag to the list of tags only if it is a unique tag. +// Tags that have the same maximized values are linked by index. +func (h *matchHeader) addIfNew(n haveTag, exact bool) { + h.original = h.original || exact + // Don't add new exact matches. + for _, v := range h.haveTags { + if equalsRest(v.tag, n.tag) { + return + } + } + // Allow duplicate maximized tags, but create a linked list to allow quickly + // comparing the equivalents and bail out. + for i, v := range h.haveTags { + if v.maxScript == n.maxScript && + v.maxRegion == n.maxRegion && + v.tag.VariantOrPrivateUseTags() == n.tag.VariantOrPrivateUseTags() { + for h.haveTags[i].nextMax != 0 { + i = int(h.haveTags[i].nextMax) + } + h.haveTags[i].nextMax = uint16(len(h.haveTags)) + break + } + } + h.haveTags = append(h.haveTags, &n) +} + +// header returns the matchHeader for the given language. It creates one if +// it doesn't already exist. +func (m *matcher) header(l language.Language) *matchHeader { + if h := m.index[l]; h != nil { + return h + } + h := &matchHeader{} + m.index[l] = h + return h +} + +func toConf(d uint8) Confidence { + if d <= 10 { + return High + } + if d < 30 { + return Low + } + return No +} + +// newMatcher builds an index for the given supported tags and returns it as +// a matcher. It also expands the index by considering various equivalence classes +// for a given tag. +func newMatcher(supported []Tag, options []MatchOption) *matcher { + m := &matcher{ + index: make(map[language.Language]*matchHeader), + preferSameScript: true, + } + for _, o := range options { + o(m) + } + if len(supported) == 0 { + m.default_ = &haveTag{} + return m + } + // Add supported languages to the index. Add exact matches first to give + // them precedence. + for i, tag := range supported { + tt := tag.tag() + pair, _ := makeHaveTag(tt, i) + m.header(tt.LangID).addIfNew(pair, true) + m.supported = append(m.supported, &pair) + } + m.default_ = m.header(supported[0].lang()).haveTags[0] + // Keep these in two different loops to support the case that two equivalent + // languages are distinguished, such as iw and he. + for i, tag := range supported { + tt := tag.tag() + pair, max := makeHaveTag(tt, i) + if max != tt.LangID { + m.header(max).addIfNew(pair, true) + } + } + + // update is used to add indexes in the map for equivalent languages. + // update will only add entries to original indexes, thus not computing any + // transitive relations. + update := func(want, have uint16, conf Confidence) { + if hh := m.index[language.Language(have)]; hh != nil { + if !hh.original { + return + } + hw := m.header(language.Language(want)) + for _, ht := range hh.haveTags { + v := *ht + if conf < v.conf { + v.conf = conf + } + v.nextMax = 0 // this value needs to be recomputed + if v.altScript != 0 { + v.altScript = altScript(language.Language(want), v.maxScript) + } + hw.addIfNew(v, conf == Exact && hh.original) + } + } + } + + // Add entries for languages with mutual intelligibility as defined by CLDR's + // languageMatch data. + for _, ml := range matchLang { + update(ml.want, ml.have, toConf(ml.distance)) + if !ml.oneway { + update(ml.have, ml.want, toConf(ml.distance)) + } + } + + // Add entries for possible canonicalizations. This is an optimization to + // ensure that only one map lookup needs to be done at runtime per desired tag. + // First we match deprecated equivalents. If they are perfect equivalents + // (their canonicalization simply substitutes a different language code, but + // nothing else), the match confidence is Exact, otherwise it is High. + for i, lm := range language.AliasMap { + // If deprecated codes match and there is no fiddling with the script or + // or region, we consider it an exact match. + conf := Exact + if language.AliasTypes[i] != language.Macro { + if !isExactEquivalent(language.Language(lm.From)) { + conf = High + } + update(lm.To, lm.From, conf) + } + update(lm.From, lm.To, conf) + } + return m +} + +// getBest gets the best matching tag in m for any of the given tags, taking into +// account the order of preference of the given tags. +func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) { + best := bestMatch{} + for i, ww := range want { + w := ww.tag() + var max language.Tag + // Check for exact match first. + h := m.index[w.LangID] + if w.LangID != 0 { + if h == nil { + continue + } + // Base language is defined. + max, _ = canonicalize(Legacy|Deprecated|Macro, w) + // A region that is added through canonicalization is stronger than + // a maximized region: set it in the original (e.g. mo -> ro-MD). + if w.RegionID != max.RegionID { + w.RegionID = max.RegionID + } + // TODO: should we do the same for scripts? + // See test case: en, sr, nl ; sh ; sr + max, _ = max.Maximize() + } else { + // Base language is not defined. + if h != nil { + for i := range h.haveTags { + have := h.haveTags[i] + if equalsRest(have.tag, w) { + return have, w, Exact + } + } + } + if w.ScriptID == 0 && w.RegionID == 0 { + // We skip all tags matching und for approximate matching, including + // private tags. + continue + } + max, _ = w.Maximize() + if h = m.index[max.LangID]; h == nil { + continue + } + } + pin := true + for _, t := range want[i+1:] { + if w.LangID == t.lang() { + pin = false + break + } + } + // Check for match based on maximized tag. + for i := range h.haveTags { + have := h.haveTags[i] + best.update(have, w, max.ScriptID, max.RegionID, pin) + if best.conf == Exact { + for have.nextMax != 0 { + have = h.haveTags[have.nextMax] + best.update(have, w, max.ScriptID, max.RegionID, pin) + } + return best.have, best.want, best.conf + } + } + } + if best.conf <= No { + if len(want) != 0 { + return nil, want[0].tag(), No + } + return nil, language.Tag{}, No + } + return best.have, best.want, best.conf +} + +// bestMatch accumulates the best match so far. +type bestMatch struct { + have *haveTag + want language.Tag + conf Confidence + pinnedRegion language.Region + pinLanguage bool + sameRegionGroup bool + // Cached results from applying tie-breaking rules. + origLang bool + origReg bool + paradigmReg bool + regGroupDist uint8 + origScript bool +} + +// update updates the existing best match if the new pair is considered to be a +// better match. To determine if the given pair is a better match, it first +// computes the rough confidence level. If this surpasses the current match, it +// will replace it and update the tie-breaker rule cache. If there is a tie, it +// proceeds with applying a series of tie-breaker rules. If there is no +// conclusive winner after applying the tie-breaker rules, it leaves the current +// match as the preferred match. +// +// If pin is true and have and tag are a strong match, it will henceforth only +// consider matches for this language. This corresponds to the idea that most +// users have a strong preference for the first defined language. A user can +// still prefer a second language over a dialect of the preferred language by +// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should +// be false. +func (m *bestMatch) update(have *haveTag, tag language.Tag, maxScript language.Script, maxRegion language.Region, pin bool) { + // Bail if the maximum attainable confidence is below that of the current best match. + c := have.conf + if c < m.conf { + return + } + // Don't change the language once we already have found an exact match. + if m.pinLanguage && tag.LangID != m.want.LangID { + return + } + // Pin the region group if we are comparing tags for the same language. + if tag.LangID == m.want.LangID && m.sameRegionGroup { + _, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.LangID) + if !sameGroup { + return + } + } + if c == Exact && have.maxScript == maxScript { + // If there is another language and then another entry of this language, + // don't pin anything, otherwise pin the language. + m.pinLanguage = pin + } + if equalsRest(have.tag, tag) { + } else if have.maxScript != maxScript { + // There is usually very little comprehension between different scripts. + // In a few cases there may still be Low comprehension. This possibility + // is pre-computed and stored in have.altScript. + if Low < m.conf || have.altScript != maxScript { + return + } + c = Low + } else if have.maxRegion != maxRegion { + if High < c { + // There is usually a small difference between languages across regions. + c = High + } + } + + // We store the results of the computations of the tie-breaker rules along + // with the best match. There is no need to do the checks once we determine + // we have a winner, but we do still need to do the tie-breaker computations. + // We use "beaten" to keep track if we still need to do the checks. + beaten := false // true if the new pair defeats the current one. + if c != m.conf { + if c < m.conf { + return + } + beaten = true + } + + // Tie-breaker rules: + // We prefer if the pre-maximized language was specified and identical. + origLang := have.tag.LangID == tag.LangID && tag.LangID != 0 + if !beaten && m.origLang != origLang { + if m.origLang { + return + } + beaten = true + } + + // We prefer if the pre-maximized region was specified and identical. + origReg := have.tag.RegionID == tag.RegionID && tag.RegionID != 0 + if !beaten && m.origReg != origReg { + if m.origReg { + return + } + beaten = true + } + + regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.LangID) + if !beaten && m.regGroupDist != regGroupDist { + if regGroupDist > m.regGroupDist { + return + } + beaten = true + } + + paradigmReg := isParadigmLocale(tag.LangID, have.maxRegion) + if !beaten && m.paradigmReg != paradigmReg { + if !paradigmReg { + return + } + beaten = true + } + + // Next we prefer if the pre-maximized script was specified and identical. + origScript := have.tag.ScriptID == tag.ScriptID && tag.ScriptID != 0 + if !beaten && m.origScript != origScript { + if m.origScript { + return + } + beaten = true + } + + // Update m to the newly found best match. + if beaten { + m.have = have + m.want = tag + m.conf = c + m.pinnedRegion = maxRegion + m.sameRegionGroup = sameGroup + m.origLang = origLang + m.origReg = origReg + m.paradigmReg = paradigmReg + m.origScript = origScript + m.regGroupDist = regGroupDist + } +} + +func isParadigmLocale(lang language.Language, r language.Region) bool { + for _, e := range paradigmLocales { + if language.Language(e[0]) == lang && (r == language.Region(e[1]) || r == language.Region(e[2])) { + return true + } + } + return false +} + +// regionGroupDist computes the distance between two regions based on their +// CLDR grouping. +func regionGroupDist(a, b language.Region, script language.Script, lang language.Language) (dist uint8, same bool) { + const defaultDistance = 4 + + aGroup := uint(regionToGroups[a]) << 1 + bGroup := uint(regionToGroups[b]) << 1 + for _, ri := range matchRegion { + if language.Language(ri.lang) == lang && (ri.script == 0 || language.Script(ri.script) == script) { + group := uint(1 << (ri.group &^ 0x80)) + if 0x80&ri.group == 0 { + if aGroup&bGroup&group != 0 { // Both regions are in the group. + return ri.distance, ri.distance == defaultDistance + } + } else { + if (aGroup|bGroup)&group == 0 { // Both regions are not in the group. + return ri.distance, ri.distance == defaultDistance + } + } + } + } + return defaultDistance, true +} + +// equalsRest compares everything except the language. +func equalsRest(a, b language.Tag) bool { + // TODO: don't include extensions in this comparison. To do this efficiently, + // though, we should handle private tags separately. + return a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags() +} + +// isExactEquivalent returns true if canonicalizing the language will not alter +// the script or region of a tag. +func isExactEquivalent(l language.Language) bool { + for _, o := range notEquivalent { + if o == l { + return false + } + } + return true +} + +var notEquivalent []language.Language + +func init() { + // Create a list of all languages for which canonicalization may alter the + // script or region. + for _, lm := range language.AliasMap { + tag := language.Tag{LangID: language.Language(lm.From)} + if tag, _ = canonicalize(All, tag); tag.ScriptID != 0 || tag.RegionID != 0 { + notEquivalent = append(notEquivalent, language.Language(lm.From)) + } + } + // Maximize undefined regions of paradigm locales. + for i, v := range paradigmLocales { + t := language.Tag{LangID: language.Language(v[0])} + max, _ := t.Maximize() + if v[1] == 0 { + paradigmLocales[i][1] = uint16(max.RegionID) + } + if v[2] == 0 { + paradigmLocales[i][2] = uint16(max.RegionID) + } + } +} diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go new file mode 100644 index 00000000000..4d57222e770 --- /dev/null +++ b/vendor/golang.org/x/text/language/parse.go @@ -0,0 +1,256 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "errors" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/language" +) + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError interface { + error + + // Subtag returns the subtag for which the error occurred. + Subtag() string +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the default canonicalization type. +func Parse(s string) (t Tag, err error) { + return Default.Parse(s) +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the canonicalization type c. +func (c CanonType) Parse(s string) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + + tt, err := language.Parse(s) + if err != nil { + return makeTag(tt), err + } + tt, changed := canonicalize(c, tt) + if changed { + tt.RemakeString() + } + return makeTag(tt), err +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. For -u extensions, though, the key-type pairs are +// added, where later values overwrite older ones. A Tag overwrites all former +// values and typically only makes sense as the first argument. The resulting +// tag is returned after canonicalizing using the Default CanonType. If one or +// more errors are encountered, one of the errors is returned. +func Compose(part ...interface{}) (t Tag, err error) { + return Default.Compose(part...) +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. For -u extensions, though, the key-type pairs are +// added, where later values overwrite older ones. A Tag overwrites all former +// values and typically only makes sense as the first argument. The resulting +// tag is returned after canonicalizing using CanonType c. If one or more errors +// are encountered, one of the errors is returned. +func (c CanonType) Compose(part ...interface{}) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + + var b language.Builder + if err = update(&b, part...); err != nil { + return und, err + } + b.Tag, _ = canonicalize(c, b.Tag) + return makeTag(b.Make()), err +} + +var errInvalidArgument = errors.New("invalid Extension or Variant") + +func update(b *language.Builder, part ...interface{}) (err error) { + for _, x := range part { + switch v := x.(type) { + case Tag: + b.SetTag(v.tag()) + case Base: + b.Tag.LangID = v.langID + case Script: + b.Tag.ScriptID = v.scriptID + case Region: + b.Tag.RegionID = v.regionID + case Variant: + if v.variant == "" { + err = errInvalidArgument + break + } + b.AddVariant(v.variant) + case Extension: + if v.s == "" { + err = errInvalidArgument + break + } + b.SetExt(v.s) + case []Variant: + b.ClearVariants() + for _, v := range v { + b.AddVariant(v.variant) + } + case []Extension: + b.ClearExtensions() + for _, e := range v { + b.SetExt(e.s) + } + // TODO: support parsing of raw strings based on morphology or just extensions? + case error: + if v != nil { + err = v + } + } + } + return +} + +var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") +var errTagListTooLarge = errors.New("tag list exceeds max length") + +// ParseAcceptLanguage parses the contents of an Accept-Language header as +// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and +// a list of corresponding quality weights. It is more permissive than RFC 2616 +// and may return non-nil slices even if the input is not valid. +// The Tags will be sorted by highest weight first and then by first occurrence. +// Tags with a weight of zero will be dropped. An error will be returned if the +// input could not be parsed. +func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { + defer func() { + if recover() != nil { + tag = nil + q = nil + err = language.ErrSyntax + } + }() + + if strings.Count(s, "-") > 1000 { + return nil, nil, errTagListTooLarge + } + + var entry string + for s != "" { + if entry, s = split(s, ','); entry == "" { + continue + } + + entry, weight := split(entry, ';') + + // Scan the language. + t, err := Parse(entry) + if err != nil { + id, ok := acceptFallback[entry] + if !ok { + return nil, nil, err + } + t = makeTag(language.Tag{LangID: id}) + } + + // Scan the optional weight. + w := 1.0 + if weight != "" { + weight = consume(weight, 'q') + weight = consume(weight, '=') + // consume returns the empty string when a token could not be + // consumed, resulting in an error for ParseFloat. + if w, err = strconv.ParseFloat(weight, 32); err != nil { + return nil, nil, errInvalidWeight + } + // Drop tags with a quality weight of 0. + if w <= 0 { + continue + } + } + + tag = append(tag, t) + q = append(q, float32(w)) + } + sort.Stable(&tagSort{tag, q}) + return tag, q, nil +} + +// consume removes a leading token c from s and returns the result or the empty +// string if there is no such token. +func consume(s string, c byte) string { + if s == "" || s[0] != c { + return "" + } + return strings.TrimSpace(s[1:]) +} + +func split(s string, c byte) (head, tail string) { + if i := strings.IndexByte(s, c); i >= 0 { + return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:]) + } + return strings.TrimSpace(s), "" +} + +// Add hack mapping to deal with a small number of cases that occur +// in Accept-Language (with reasonable frequency). +var acceptFallback = map[string]language.Language{ + "english": _en, + "deutsch": _de, + "italian": _it, + "french": _fr, + "*": _mul, // defined in the spec to match all languages. +} + +type tagSort struct { + tag []Tag + q []float32 +} + +func (s *tagSort) Len() int { + return len(s.q) +} + +func (s *tagSort) Less(i, j int) bool { + return s.q[i] > s.q[j] +} + +func (s *tagSort) Swap(i, j int) { + s.tag[i], s.tag[j] = s.tag[j], s.tag[i] + s.q[i], s.q[j] = s.q[j], s.q[i] +} diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go new file mode 100644 index 00000000000..34a732b699d --- /dev/null +++ b/vendor/golang.org/x/text/language/tables.go @@ -0,0 +1,298 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +const ( + _de = 269 + _en = 313 + _fr = 350 + _it = 505 + _mo = 784 + _no = 879 + _nb = 839 + _pt = 960 + _sh = 1031 + _mul = 806 + _und = 0 +) +const ( + _001 = 1 + _419 = 31 + _BR = 65 + _CA = 73 + _ES = 110 + _GB = 123 + _MD = 188 + _PT = 238 + _UK = 306 + _US = 309 + _ZZ = 357 + _XA = 323 + _XC = 325 + _XK = 333 +) +const ( + _Latn = 90 + _Hani = 57 + _Hans = 59 + _Hant = 60 + _Qaaa = 147 + _Qaai = 155 + _Qabx = 196 + _Zinh = 252 + _Zyyy = 257 + _Zzzz = 258 +) + +var regionToGroups = []uint8{ // 358 elements + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x04, + // Entry 40 - 7F + 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x08, + 0x00, 0x04, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x04, 0x01, 0x00, 0x04, 0x02, 0x00, 0x04, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, + // Entry C0 - FF + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, + 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x04, 0x04, 0x05, 0x00, 0x00, + // Entry 140 - 17F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +} // Size: 382 bytes + +var paradigmLocales = [][3]uint16{ // 3 elements + 0: [3]uint16{0x139, 0x0, 0x7b}, + 1: [3]uint16{0x13e, 0x0, 0x1f}, + 2: [3]uint16{0x3c0, 0x41, 0xee}, +} // Size: 42 bytes + +type mutualIntelligibility struct { + want uint16 + have uint16 + distance uint8 + oneway bool +} +type scriptIntelligibility struct { + wantLang uint16 + haveLang uint16 + wantScript uint8 + haveScript uint8 + distance uint8 +} +type regionIntelligibility struct { + lang uint16 + script uint8 + group uint8 + distance uint8 +} + +// matchLang holds pairs of langIDs of base languages that are typically +// mutually intelligible. Each pair is associated with a confidence and +// whether the intelligibility goes one or both ways. +var matchLang = []mutualIntelligibility{ // 113 elements + 0: {want: 0x1d1, have: 0xb7, distance: 0x4, oneway: false}, + 1: {want: 0x407, have: 0xb7, distance: 0x4, oneway: false}, + 2: {want: 0x407, have: 0x1d1, distance: 0x4, oneway: false}, + 3: {want: 0x407, have: 0x432, distance: 0x4, oneway: false}, + 4: {want: 0x43a, have: 0x1, distance: 0x4, oneway: false}, + 5: {want: 0x1a3, have: 0x10d, distance: 0x4, oneway: true}, + 6: {want: 0x295, have: 0x10d, distance: 0x4, oneway: true}, + 7: {want: 0x101, have: 0x36f, distance: 0x8, oneway: false}, + 8: {want: 0x101, have: 0x347, distance: 0x8, oneway: false}, + 9: {want: 0x5, have: 0x3e2, distance: 0xa, oneway: true}, + 10: {want: 0xd, have: 0x139, distance: 0xa, oneway: true}, + 11: {want: 0x16, have: 0x367, distance: 0xa, oneway: true}, + 12: {want: 0x21, have: 0x139, distance: 0xa, oneway: true}, + 13: {want: 0x56, have: 0x13e, distance: 0xa, oneway: true}, + 14: {want: 0x58, have: 0x3e2, distance: 0xa, oneway: true}, + 15: {want: 0x71, have: 0x3e2, distance: 0xa, oneway: true}, + 16: {want: 0x75, have: 0x139, distance: 0xa, oneway: true}, + 17: {want: 0x82, have: 0x1be, distance: 0xa, oneway: true}, + 18: {want: 0xa5, have: 0x139, distance: 0xa, oneway: true}, + 19: {want: 0xb2, have: 0x15e, distance: 0xa, oneway: true}, + 20: {want: 0xdd, have: 0x153, distance: 0xa, oneway: true}, + 21: {want: 0xe5, have: 0x139, distance: 0xa, oneway: true}, + 22: {want: 0xe9, have: 0x3a, distance: 0xa, oneway: true}, + 23: {want: 0xf0, have: 0x15e, distance: 0xa, oneway: true}, + 24: {want: 0xf9, have: 0x15e, distance: 0xa, oneway: true}, + 25: {want: 0x100, have: 0x139, distance: 0xa, oneway: true}, + 26: {want: 0x130, have: 0x139, distance: 0xa, oneway: true}, + 27: {want: 0x13c, have: 0x139, distance: 0xa, oneway: true}, + 28: {want: 0x140, have: 0x151, distance: 0xa, oneway: true}, + 29: {want: 0x145, have: 0x13e, distance: 0xa, oneway: true}, + 30: {want: 0x158, have: 0x101, distance: 0xa, oneway: true}, + 31: {want: 0x16d, have: 0x367, distance: 0xa, oneway: true}, + 32: {want: 0x16e, have: 0x139, distance: 0xa, oneway: true}, + 33: {want: 0x16f, have: 0x139, distance: 0xa, oneway: true}, + 34: {want: 0x17e, have: 0x139, distance: 0xa, oneway: true}, + 35: {want: 0x190, have: 0x13e, distance: 0xa, oneway: true}, + 36: {want: 0x194, have: 0x13e, distance: 0xa, oneway: true}, + 37: {want: 0x1a4, have: 0x1be, distance: 0xa, oneway: true}, + 38: {want: 0x1b4, have: 0x139, distance: 0xa, oneway: true}, + 39: {want: 0x1b8, have: 0x139, distance: 0xa, oneway: true}, + 40: {want: 0x1d4, have: 0x15e, distance: 0xa, oneway: true}, + 41: {want: 0x1d7, have: 0x3e2, distance: 0xa, oneway: true}, + 42: {want: 0x1d9, have: 0x139, distance: 0xa, oneway: true}, + 43: {want: 0x1e7, have: 0x139, distance: 0xa, oneway: true}, + 44: {want: 0x1f8, have: 0x139, distance: 0xa, oneway: true}, + 45: {want: 0x20e, have: 0x1e1, distance: 0xa, oneway: true}, + 46: {want: 0x210, have: 0x139, distance: 0xa, oneway: true}, + 47: {want: 0x22d, have: 0x15e, distance: 0xa, oneway: true}, + 48: {want: 0x242, have: 0x3e2, distance: 0xa, oneway: true}, + 49: {want: 0x24a, have: 0x139, distance: 0xa, oneway: true}, + 50: {want: 0x251, have: 0x139, distance: 0xa, oneway: true}, + 51: {want: 0x265, have: 0x139, distance: 0xa, oneway: true}, + 52: {want: 0x274, have: 0x48a, distance: 0xa, oneway: true}, + 53: {want: 0x28a, have: 0x3e2, distance: 0xa, oneway: true}, + 54: {want: 0x28e, have: 0x1f9, distance: 0xa, oneway: true}, + 55: {want: 0x2a3, have: 0x139, distance: 0xa, oneway: true}, + 56: {want: 0x2b5, have: 0x15e, distance: 0xa, oneway: true}, + 57: {want: 0x2b8, have: 0x139, distance: 0xa, oneway: true}, + 58: {want: 0x2be, have: 0x139, distance: 0xa, oneway: true}, + 59: {want: 0x2c3, have: 0x15e, distance: 0xa, oneway: true}, + 60: {want: 0x2ed, have: 0x139, distance: 0xa, oneway: true}, + 61: {want: 0x2f1, have: 0x15e, distance: 0xa, oneway: true}, + 62: {want: 0x2fa, have: 0x139, distance: 0xa, oneway: true}, + 63: {want: 0x2ff, have: 0x7e, distance: 0xa, oneway: true}, + 64: {want: 0x304, have: 0x139, distance: 0xa, oneway: true}, + 65: {want: 0x30b, have: 0x3e2, distance: 0xa, oneway: true}, + 66: {want: 0x31b, have: 0x1be, distance: 0xa, oneway: true}, + 67: {want: 0x31f, have: 0x1e1, distance: 0xa, oneway: true}, + 68: {want: 0x320, have: 0x139, distance: 0xa, oneway: true}, + 69: {want: 0x331, have: 0x139, distance: 0xa, oneway: true}, + 70: {want: 0x351, have: 0x139, distance: 0xa, oneway: true}, + 71: {want: 0x36a, have: 0x347, distance: 0xa, oneway: false}, + 72: {want: 0x36a, have: 0x36f, distance: 0xa, oneway: true}, + 73: {want: 0x37a, have: 0x139, distance: 0xa, oneway: true}, + 74: {want: 0x387, have: 0x139, distance: 0xa, oneway: true}, + 75: {want: 0x389, have: 0x139, distance: 0xa, oneway: true}, + 76: {want: 0x38b, have: 0x15e, distance: 0xa, oneway: true}, + 77: {want: 0x390, have: 0x139, distance: 0xa, oneway: true}, + 78: {want: 0x395, have: 0x139, distance: 0xa, oneway: true}, + 79: {want: 0x39d, have: 0x139, distance: 0xa, oneway: true}, + 80: {want: 0x3a5, have: 0x139, distance: 0xa, oneway: true}, + 81: {want: 0x3be, have: 0x139, distance: 0xa, oneway: true}, + 82: {want: 0x3c4, have: 0x13e, distance: 0xa, oneway: true}, + 83: {want: 0x3d4, have: 0x10d, distance: 0xa, oneway: true}, + 84: {want: 0x3d9, have: 0x139, distance: 0xa, oneway: true}, + 85: {want: 0x3e5, have: 0x15e, distance: 0xa, oneway: true}, + 86: {want: 0x3e9, have: 0x1be, distance: 0xa, oneway: true}, + 87: {want: 0x3fa, have: 0x139, distance: 0xa, oneway: true}, + 88: {want: 0x40c, have: 0x139, distance: 0xa, oneway: true}, + 89: {want: 0x423, have: 0x139, distance: 0xa, oneway: true}, + 90: {want: 0x429, have: 0x139, distance: 0xa, oneway: true}, + 91: {want: 0x431, have: 0x139, distance: 0xa, oneway: true}, + 92: {want: 0x43b, have: 0x139, distance: 0xa, oneway: true}, + 93: {want: 0x43e, have: 0x1e1, distance: 0xa, oneway: true}, + 94: {want: 0x445, have: 0x139, distance: 0xa, oneway: true}, + 95: {want: 0x450, have: 0x139, distance: 0xa, oneway: true}, + 96: {want: 0x461, have: 0x139, distance: 0xa, oneway: true}, + 97: {want: 0x467, have: 0x3e2, distance: 0xa, oneway: true}, + 98: {want: 0x46f, have: 0x139, distance: 0xa, oneway: true}, + 99: {want: 0x476, have: 0x3e2, distance: 0xa, oneway: true}, + 100: {want: 0x3883, have: 0x139, distance: 0xa, oneway: true}, + 101: {want: 0x480, have: 0x139, distance: 0xa, oneway: true}, + 102: {want: 0x482, have: 0x139, distance: 0xa, oneway: true}, + 103: {want: 0x494, have: 0x3e2, distance: 0xa, oneway: true}, + 104: {want: 0x49d, have: 0x139, distance: 0xa, oneway: true}, + 105: {want: 0x4ac, have: 0x529, distance: 0xa, oneway: true}, + 106: {want: 0x4b4, have: 0x139, distance: 0xa, oneway: true}, + 107: {want: 0x4bc, have: 0x3e2, distance: 0xa, oneway: true}, + 108: {want: 0x4e5, have: 0x15e, distance: 0xa, oneway: true}, + 109: {want: 0x4f2, have: 0x139, distance: 0xa, oneway: true}, + 110: {want: 0x512, have: 0x139, distance: 0xa, oneway: true}, + 111: {want: 0x518, have: 0x139, distance: 0xa, oneway: true}, + 112: {want: 0x52f, have: 0x139, distance: 0xa, oneway: true}, +} // Size: 702 bytes + +// matchScript holds pairs of scriptIDs where readers of one script +// can typically also read the other. Each is associated with a confidence. +var matchScript = []scriptIntelligibility{ // 26 elements + 0: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x5a, haveScript: 0x20, distance: 0x5}, + 1: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x20, haveScript: 0x5a, distance: 0x5}, + 2: {wantLang: 0x58, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa}, + 3: {wantLang: 0xa5, haveLang: 0x139, wantScript: 0xe, haveScript: 0x5a, distance: 0xa}, + 4: {wantLang: 0x1d7, haveLang: 0x3e2, wantScript: 0x8, haveScript: 0x20, distance: 0xa}, + 5: {wantLang: 0x210, haveLang: 0x139, wantScript: 0x2e, haveScript: 0x5a, distance: 0xa}, + 6: {wantLang: 0x24a, haveLang: 0x139, wantScript: 0x4e, haveScript: 0x5a, distance: 0xa}, + 7: {wantLang: 0x251, haveLang: 0x139, wantScript: 0x52, haveScript: 0x5a, distance: 0xa}, + 8: {wantLang: 0x2b8, haveLang: 0x139, wantScript: 0x57, haveScript: 0x5a, distance: 0xa}, + 9: {wantLang: 0x304, haveLang: 0x139, wantScript: 0x6e, haveScript: 0x5a, distance: 0xa}, + 10: {wantLang: 0x331, haveLang: 0x139, wantScript: 0x75, haveScript: 0x5a, distance: 0xa}, + 11: {wantLang: 0x351, haveLang: 0x139, wantScript: 0x22, haveScript: 0x5a, distance: 0xa}, + 12: {wantLang: 0x395, haveLang: 0x139, wantScript: 0x81, haveScript: 0x5a, distance: 0xa}, + 13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5a, distance: 0xa}, + 14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, + 15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, + 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xd4, haveScript: 0x5a, distance: 0xa}, + 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xe3, haveScript: 0x5a, distance: 0xa}, + 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe6, haveScript: 0x5a, distance: 0xa}, + 19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5a, distance: 0xa}, + 20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa}, + 21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa}, + 22: {wantLang: 0x4bc, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa}, + 23: {wantLang: 0x512, haveLang: 0x139, wantScript: 0x3e, haveScript: 0x5a, distance: 0xa}, + 24: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3b, haveScript: 0x3c, distance: 0xf}, + 25: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3c, haveScript: 0x3b, distance: 0x13}, +} // Size: 232 bytes + +var matchRegion = []regionIntelligibility{ // 15 elements + 0: {lang: 0x3a, script: 0x0, group: 0x4, distance: 0x4}, + 1: {lang: 0x3a, script: 0x0, group: 0x84, distance: 0x4}, + 2: {lang: 0x139, script: 0x0, group: 0x1, distance: 0x4}, + 3: {lang: 0x139, script: 0x0, group: 0x81, distance: 0x4}, + 4: {lang: 0x13e, script: 0x0, group: 0x3, distance: 0x4}, + 5: {lang: 0x13e, script: 0x0, group: 0x83, distance: 0x4}, + 6: {lang: 0x3c0, script: 0x0, group: 0x3, distance: 0x4}, + 7: {lang: 0x3c0, script: 0x0, group: 0x83, distance: 0x4}, + 8: {lang: 0x529, script: 0x3c, group: 0x2, distance: 0x4}, + 9: {lang: 0x529, script: 0x3c, group: 0x82, distance: 0x4}, + 10: {lang: 0x3a, script: 0x0, group: 0x80, distance: 0x5}, + 11: {lang: 0x139, script: 0x0, group: 0x80, distance: 0x5}, + 12: {lang: 0x13e, script: 0x0, group: 0x80, distance: 0x5}, + 13: {lang: 0x3c0, script: 0x0, group: 0x80, distance: 0x5}, + 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5}, +} // Size: 114 bytes + +// Total table size 1472 bytes (1KiB); checksum: F86C669 diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go new file mode 100644 index 00000000000..42ea7926660 --- /dev/null +++ b/vendor/golang.org/x/text/language/tags.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "golang.org/x/text/internal/language/compact" + +// TODO: Various sets of commonly use tags and regions. + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func (c CanonType) MustParse(s string) Tag { + t, err := c.Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Base { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag(compact.Afrikaans) + Amharic Tag = Tag(compact.Amharic) + Arabic Tag = Tag(compact.Arabic) + ModernStandardArabic Tag = Tag(compact.ModernStandardArabic) + Azerbaijani Tag = Tag(compact.Azerbaijani) + Bulgarian Tag = Tag(compact.Bulgarian) + Bengali Tag = Tag(compact.Bengali) + Catalan Tag = Tag(compact.Catalan) + Czech Tag = Tag(compact.Czech) + Danish Tag = Tag(compact.Danish) + German Tag = Tag(compact.German) + Greek Tag = Tag(compact.Greek) + English Tag = Tag(compact.English) + AmericanEnglish Tag = Tag(compact.AmericanEnglish) + BritishEnglish Tag = Tag(compact.BritishEnglish) + Spanish Tag = Tag(compact.Spanish) + EuropeanSpanish Tag = Tag(compact.EuropeanSpanish) + LatinAmericanSpanish Tag = Tag(compact.LatinAmericanSpanish) + Estonian Tag = Tag(compact.Estonian) + Persian Tag = Tag(compact.Persian) + Finnish Tag = Tag(compact.Finnish) + Filipino Tag = Tag(compact.Filipino) + French Tag = Tag(compact.French) + CanadianFrench Tag = Tag(compact.CanadianFrench) + Gujarati Tag = Tag(compact.Gujarati) + Hebrew Tag = Tag(compact.Hebrew) + Hindi Tag = Tag(compact.Hindi) + Croatian Tag = Tag(compact.Croatian) + Hungarian Tag = Tag(compact.Hungarian) + Armenian Tag = Tag(compact.Armenian) + Indonesian Tag = Tag(compact.Indonesian) + Icelandic Tag = Tag(compact.Icelandic) + Italian Tag = Tag(compact.Italian) + Japanese Tag = Tag(compact.Japanese) + Georgian Tag = Tag(compact.Georgian) + Kazakh Tag = Tag(compact.Kazakh) + Khmer Tag = Tag(compact.Khmer) + Kannada Tag = Tag(compact.Kannada) + Korean Tag = Tag(compact.Korean) + Kirghiz Tag = Tag(compact.Kirghiz) + Lao Tag = Tag(compact.Lao) + Lithuanian Tag = Tag(compact.Lithuanian) + Latvian Tag = Tag(compact.Latvian) + Macedonian Tag = Tag(compact.Macedonian) + Malayalam Tag = Tag(compact.Malayalam) + Mongolian Tag = Tag(compact.Mongolian) + Marathi Tag = Tag(compact.Marathi) + Malay Tag = Tag(compact.Malay) + Burmese Tag = Tag(compact.Burmese) + Nepali Tag = Tag(compact.Nepali) + Dutch Tag = Tag(compact.Dutch) + Norwegian Tag = Tag(compact.Norwegian) + Punjabi Tag = Tag(compact.Punjabi) + Polish Tag = Tag(compact.Polish) + Portuguese Tag = Tag(compact.Portuguese) + BrazilianPortuguese Tag = Tag(compact.BrazilianPortuguese) + EuropeanPortuguese Tag = Tag(compact.EuropeanPortuguese) + Romanian Tag = Tag(compact.Romanian) + Russian Tag = Tag(compact.Russian) + Sinhala Tag = Tag(compact.Sinhala) + Slovak Tag = Tag(compact.Slovak) + Slovenian Tag = Tag(compact.Slovenian) + Albanian Tag = Tag(compact.Albanian) + Serbian Tag = Tag(compact.Serbian) + SerbianLatin Tag = Tag(compact.SerbianLatin) + Swedish Tag = Tag(compact.Swedish) + Swahili Tag = Tag(compact.Swahili) + Tamil Tag = Tag(compact.Tamil) + Telugu Tag = Tag(compact.Telugu) + Thai Tag = Tag(compact.Thai) + Turkish Tag = Tag(compact.Turkish) + Ukrainian Tag = Tag(compact.Ukrainian) + Urdu Tag = Tag(compact.Urdu) + Uzbek Tag = Tag(compact.Uzbek) + Vietnamese Tag = Tag(compact.Vietnamese) + Chinese Tag = Tag(compact.Chinese) + SimplifiedChinese Tag = Tag(compact.SimplifiedChinese) + TraditionalChinese Tag = Tag(compact.TraditionalChinese) + Zulu Tag = Tag(compact.Zulu) +) diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 79a6bb65456..8f7c29f156a 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, _, tokens := lim.advance(t) // does not mutute lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -131,11 +144,11 @@ const InfDuration = time.Duration(math.MaxInt64) // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, _, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -208,8 +221,8 @@ func (lim *Limiter) Reserve() *Reservation { // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -234,7 +247,7 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { } // wait is the internal implementation of WaitN. -func (lim *Limiter) wait(ctx context.Context, n int, now time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -252,15 +265,15 @@ func (lim *Limiter) wait(ctx context.Context, n int, now time.Time, newTimer fun // Determine wait limit waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } @@ -287,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -304,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -318,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -327,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -339,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, last, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -365,12 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) + r.timeToAct = t.Add(waitDuration) } // Update state if ok { - lim.last = now + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct } else { @@ -383,20 +396,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, last, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 2ed25a75024..620446207e2 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -30,7 +30,7 @@ import ( "io/ioutil" "os/exec" - "golang.org/x/tools/go/internal/gcimporter" + "golang.org/x/tools/internal/gcimporter" ) // Find returns the name of an object (.o) or archive (.a) file @@ -87,7 +87,11 @@ func NewReader(r io.Reader) (io.Reader, error) { // Read reads export data from in, decodes it, and returns type // information for the package. -// The package name is specified by path. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// // File position information is added to fset. // // Read may inspect and add to the imports map to ensure that references diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index de881562de1..6bb7168d2e3 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -60,6 +60,7 @@ func (r *responseDeduper) addAll(dr *driverResponse) { for _, root := range dr.Roots { r.addRoot(root) } + r.dr.GoVersion = dr.GoVersion } func (r *responseDeduper) addPackage(p *Package) { @@ -454,11 +455,14 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse if err != nil { return nil, err } + seen := make(map[string]*jsonPackage) pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - var response driverResponse + response := &driverResponse{ + GoVersion: goVersion, + } for dec := json.NewDecoder(buf); dec.More(); { p := new(jsonPackage) if err := dec.Decode(p); err != nil { @@ -600,17 +604,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse // Work around https://golang.org/issue/28749: // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. - // Filter out any elements of CompiledGoFiles that are also in OtherFiles. - // We have to keep this workaround in place until go1.12 is a distant memory. - if len(pkg.OtherFiles) > 0 { - other := make(map[string]bool, len(pkg.OtherFiles)) - for _, f := range pkg.OtherFiles { - other[f] = true - } - + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { out := pkg.CompiledGoFiles[:0] for _, f := range pkg.CompiledGoFiles { - if other[f] { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file continue } out = append(out, f) @@ -730,7 +729,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) - return &response, nil + return response, nil } func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { @@ -756,6 +755,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath } +// getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index a93dc6add4d..9df20919ba2 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -19,6 +19,7 @@ import ( "log" "os" "path/filepath" + "runtime" "strings" "sync" "time" @@ -233,6 +234,11 @@ type driverResponse struct { // Imports will be connected and then type and syntax information added in a // later pass (see refine). Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int } // Load loads and returns the Go packages named by the given patterns. @@ -256,7 +262,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { return nil, err } l.sizes = response.Sizes - return l.refine(response.Roots, response.Packages...) + return l.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. @@ -297,6 +303,9 @@ type Package struct { // of the package, or while parsing or type-checking its files. Errors []Error + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + // GoFiles lists the absolute file paths of the package's Go source files. GoFiles []string @@ -532,6 +541,7 @@ type loaderPackage struct { needsrc bool // load from source (Mode >= LoadTypes) needtypes bool // type information is either requested or depended on initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. @@ -618,7 +628,8 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type and // and syntax information as requested by the LoadMode. -func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { +func (ld *loader) refine(response *driverResponse) ([]*Package, error) { + roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { rootMap[root] = i @@ -626,7 +637,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { ld.pkgs = make(map[string]*loaderPackage) // first pass, fixup and build the map and roots var initial = make([]*loaderPackage, len(roots)) - for _, pkg := range list { + for _, pkg := range response.Packages { rootIndex := -1 if i, found := rootMap[pkg.ID]; found { rootIndex = i @@ -648,6 +659,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { Package: pkg, needtypes: needtypes, needsrc: needsrc, + goVersion: response.GoVersion, } ld.pkgs[lpkg.ID] = lpkg if rootIndex >= 0 { @@ -902,6 +914,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { case types.Error: // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) errs = append(errs, Error{ Pos: err.Fset.Position(err.Pos).String(), Msg: err.Msg, @@ -923,6 +936,33 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.Errors = append(lpkg.Errors, errs...) } + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { // The config requested loading sources and types, but sources are missing. // Add an error to the package and fall back to loading from export data. @@ -981,7 +1021,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { tc := &types.Config{ Importer: importer, - // Type-check bodies of functions only in non-initial packages. + // Type-check bodies of functions only in initial packages. // Example: for import graph A->B->C and initial packages {A,C}, // we can ignore function bodies in B. IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go similarity index 99% rename from vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go rename to vendor/golang.org/x/tools/internal/gcimporter/bexport.go index 196cb3f9b41..30582ed6d3d 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go @@ -12,7 +12,6 @@ import ( "bytes" "encoding/binary" "fmt" - "go/ast" "go/constant" "go/token" "go/types" @@ -145,7 +144,7 @@ func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) objcount := 0 scope := pkg.Scope() for _, name := range scope.Names() { - if !ast.IsExported(name) { + if !token.IsExported(name) { continue } if trace { @@ -482,7 +481,7 @@ func (p *exporter) method(m *types.Func) { p.pos(m) p.string(m.Name()) - if m.Name() != "_" && !ast.IsExported(m.Name()) { + if m.Name() != "_" && !token.IsExported(m.Name()) { p.pkg(m.Pkg(), false) } @@ -501,7 +500,7 @@ func (p *exporter) fieldName(f *types.Var) { // 3) field name doesn't match base type name (alias name) bname := basetypeName(f.Type()) if name == bname { - if ast.IsExported(name) { + if token.IsExported(name) { name = "" // 1) we don't need to know the field name or package } else { name = "?" // 2) use unexported name "?" to force package export @@ -514,7 +513,7 @@ func (p *exporter) fieldName(f *types.Var) { } p.string(name) - if name != "" && !ast.IsExported(name) { + if name != "" && !token.IsExported(name) { p.pkg(f.Pkg(), false) } } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go rename to vendor/golang.org/x/tools/internal/gcimporter/bimport.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go rename to vendor/golang.org/x/tools/internal/gcimporter/exportdata.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go similarity index 95% rename from vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go rename to vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index e96c39600d1..1faaa365260 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -9,10 +9,11 @@ // Package gcimporter provides various functions for reading // gc-generated object files that can be used to implement the // Importer interface defined by the Go 1.5 standard library package. -package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" +package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" + "bytes" "errors" "fmt" "go/build" @@ -22,10 +23,12 @@ import ( "io" "io/ioutil" "os" + "os/exec" "path/filepath" "sort" "strconv" "strings" + "sync" "text/scanner" ) @@ -38,6 +41,47 @@ const ( trace = false ) +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + var pkgExts = [...]string{".a", ".o"} // FindPkg returns the filename and unique package id for an import @@ -60,11 +104,18 @@ func FindPkg(path, srcDir string) (filename, id string) { } bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) if bp.PkgObj == "" { - id = path // make sure we have an id to print in error message - return + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath } - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath case build.IsLocalImport(path): // "./x" -> "/this/directory/x.ext", "/this/directory/x" @@ -85,6 +136,12 @@ func FindPkg(path, srcDir string) (filename, id string) { } } + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + // try extensions for _, ext := range pkgExts { filename = noext + ext diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go similarity index 90% rename from vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go rename to vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 9a4ff329e12..7d90f00f323 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -12,7 +12,6 @@ import ( "bytes" "encoding/binary" "fmt" - "go/ast" "go/constant" "go/token" "go/types" @@ -26,6 +25,41 @@ import ( "golang.org/x/tools/internal/typeparams" ) +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be +// decoded by the same version of IIExportShallow. If you plan to save +// export data in the file system, be sure to include a cryptographic +// digest of the executable in the key to avoid version skew. +func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow +// in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { + const bundle = false + pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// InsertType is the type of a function that creates a types.TypeName +// object for a named type and inserts it into the scope of the +// specified Package. +type InsertType = func(pkg *types.Package, name string) + // Current bundled export format version. Increase with each format change. // 0: initial implementation const bundleVersion = 0 @@ -36,15 +70,17 @@ const bundleVersion = 0 // The package path of the top-level package will not be recorded, // so that calls to IImportData can override with a provided package path. func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - return iexportCommon(out, fset, false, iexportVersion, []*types.Package{pkg}) + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) } // IExportBundle writes an indexed export bundle for pkgs to out. func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { - return iexportCommon(out, fset, true, iexportVersion, pkgs) + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) } -func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, pkgs []*types.Package) (err error) { +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { if !debug { defer func() { if e := recover(); e != nil { @@ -61,6 +97,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, p := iexporter{ fset: fset, version: version, + shallow: shallow, allPkgs: map[*types.Package]bool{}, stringIndex: map[string]uint64{}, declIndex: map[types.Object]uint64{}, @@ -82,7 +119,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, for _, pkg := range pkgs { scope := pkg.Scope() for _, name := range scope.Names() { - if ast.IsExported(name) { + if token.IsExported(name) { p.pushDecl(scope.Lookup(name)) } } @@ -205,7 +242,8 @@ type iexporter struct { out *bytes.Buffer version int - localpkg *types.Package + shallow bool // don't put types from other packages in the index + localpkg *types.Package // (nil in bundle mode) // allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the @@ -256,6 +294,11 @@ func (p *iexporter) pushDecl(obj types.Object) { panic("cannot export package unsafe") } + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + if _, ok := p.declIndex[obj]; ok { return } @@ -497,7 +540,7 @@ func (w *exportWriter) pkg(pkg *types.Package) { w.string(w.exportPath(pkg)) } -func (w *exportWriter) qualifiedIdent(obj types.Object) { +func (w *exportWriter) qualifiedType(obj *types.TypeName) { name := w.p.exportName(obj) // Ensure any referenced declarations are written out too. @@ -556,11 +599,11 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { return } w.startType(definedType) - w.qualifiedIdent(t.Obj()) + w.qualifiedType(t.Obj()) case *typeparams.TypeParam: w.startType(typeParamType) - w.qualifiedIdent(t.Obj()) + w.qualifiedType(t.Obj()) case *types.Pointer: w.startType(pointerType) @@ -602,14 +645,17 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Struct: w.startType(structType) - w.setPkg(pkg, true) - n := t.NumFields() + if n > 0 { + w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects + } else { + w.setPkg(pkg, true) + } w.uint64(uint64(n)) for i := 0; i < n; i++ { f := t.Field(i) w.pos(f.Pos()) - w.string(f.Name()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg w.typ(f.Type(), pkg) w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go similarity index 95% rename from vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go rename to vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 4caa0f55d9d..a1c46965350 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -51,6 +51,8 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 ) type ident struct { @@ -83,7 +85,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path) + pkgs, err := iimportCommon(fset, imports, data, false, path, nil) if err != nil { return 0, nil, err } @@ -92,11 +94,11 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "") + return iimportCommon(fset, imports, data, true, "", nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) { - const currentVersion = 1 +func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { defer func() { @@ -145,6 +147,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data p := iimporter{ version: int(version), ipath: path, + insert: insert, stringData: stringData, stringCache: make(map[uint64]string), @@ -185,11 +188,18 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } else if pkg.Name() != pkgName { errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) } + if i == 0 && !bundle { + p.localpkg = pkg + } p.pkgCache[pkgPathOff] = pkg + // Read index for package. nameIndex := make(map[string]uint64) - for nSyms := r.uint64(); nSyms > 0; nSyms-- { + nSyms := r.uint64() + // In shallow mode we don't expect an index for other packages. + assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + for ; nSyms > 0; nSyms-- { name := p.stringAt(r.uint64()) nameIndex[name] = r.uint64() } @@ -265,6 +275,9 @@ type iimporter struct { version int ipath string + localpkg *types.Package + insert func(pkg *types.Package, name string) // "shallow" mode only + stringData []byte stringCache map[uint64]string pkgCache map[uint64]*types.Package @@ -308,6 +321,13 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { off, ok := p.pkgIndex[pkg][name] if !ok { + // In "shallow" mode, call back to the application to + // find the object and insert it into the package scope. + if p.insert != nil { + assert(pkg != p.localpkg) + p.insert(pkg, name) // "can't fail" + return + } errorf("%v.%v not in index", pkg, name) } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go rename to vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go rename to vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go rename to vendor/golang.org/x/tools/internal/gcimporter/support_go117.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go similarity index 62% rename from vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go rename to vendor/golang.org/x/tools/internal/gcimporter/support_go118.go index a993843230c..edbe6ea7041 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -21,3 +21,17 @@ func additionalPredeclared() []types.Type { types.Universe.Lookup("any").Type(), } } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go rename to vendor/golang.org/x/tools/internal/gcimporter/unified_no.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go rename to vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go rename to vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go similarity index 72% rename from vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go rename to vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 3c1a4375435..20b99903c51 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -14,7 +14,7 @@ import ( "go/types" "strings" - "golang.org/x/tools/go/internal/pkgbits" + "golang.org/x/tools/internal/pkgbits" ) // A pkgReader holds the shared state for reading a unified IR package @@ -36,6 +36,12 @@ type pkgReader struct { // laterFns holds functions that need to be invoked at the end of // import reading. laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface } // later adds a function to be invoked at the end of import reading. @@ -63,6 +69,15 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] return } +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + // readUnifiedPackage reads a package description from the given // unified IR export data decoder. func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { @@ -102,6 +117,10 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st fn() } + for _, iface := range pr.ifaces { + iface.Complete() + } + pkg.MarkComplete() return pkg } @@ -139,6 +158,17 @@ func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pk } } +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + // @@@ Positions func (r *reader) pos() token.Pos { @@ -163,26 +193,29 @@ func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { return b } - r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) - // Within types2, position bases have a lot more details (e.g., - // keeping track of where //line directives appeared exactly). - // - // For go/types, we just track the file name. + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. - filename := r.String() + filename = r.String() - if r.Bool() { // file base - // Was: "b = token.NewTrimmedFileBase(filename, true)" - } else { // line base - pos := r.pos() - line := r.Uint() - col := r.Uint() + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() - // Was: "b = token.NewLineBase(pos, filename, true, line, col)" - _, _, _ = pos, line, col + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) } - b := filename pr.posBases[idx] = b return b @@ -231,11 +264,35 @@ func (r *reader) doPkg() *types.Package { for i := range imports { imports[i] = r.pkg() } - pkg.SetImports(imports) + pkg.SetImports(flattenImports(imports)) return pkg } +// flattenImports returns the transitive closure of all imported +// packages rooted from pkgs. +func flattenImports(pkgs []*types.Package) []*types.Package { + var res []*types.Package + seen := make(map[*types.Package]struct{}) + for _, pkg := range pkgs { + if _, ok := seen[pkg]; ok { + continue + } + seen[pkg] = struct{}{} + res = append(res, pkg) + + // pkg.Imports() is already flattened. + for _, pkg := range pkg.Imports() { + if _, ok := seen[pkg]; ok { + continue + } + seen[pkg] = struct{}{} + res = append(res, pkg) + } + } + return res +} + // @@@ Types func (r *reader) typ() types.Type { @@ -264,12 +321,15 @@ func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { return typ } - r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) - r.dict = dict - - typ := r.doTyp() - assert(typ != nil) + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } // See comment in pkgReader.typIdx explaining how this happens. if prev := *where; prev != nil { return prev @@ -372,6 +432,16 @@ func (r *reader) interfaceType() *types.Interface { if implicit { iface.MarkImplicit() } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + return iface } @@ -425,18 +495,30 @@ func (r *reader) obj() (types.Object, []types.Type) { } func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { - rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) - objPkg, objName := rname.qualifiedIdent() - assert(objName != "") + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) - tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } if tag == pkgbits.ObjStub { assert(objPkg == nil || objPkg == types.Unsafe) return objPkg, objName } + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + if objPkg.Scope().Lookup(objName) == nil { dict := pr.objDictIdx(idx) @@ -477,13 +559,41 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { named.SetTypeParams(r.typeParamNames()) - // TODO(mdempsky): Rewrite receiver types to underlying is an - // Interface? The go/types importer does this (I think because - // unit tests expected that), but cmd/compile doesn't care - // about it, so maybe we can avoid worrying about that here. rhs := r.typ() - r.p.later(func() { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } underlying := rhs.Underlying() + + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + named.SetUnderlying(underlying) }) @@ -502,25 +612,28 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { } func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { - r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) var dict readerDict - if implicits := r.Len(); implicits != 0 { - errorf("unexpected object with %v implicit type parameter(s)", implicits) - } + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } - dict.bounds = make([]typeInfo, r.Len()) - for i := range dict.bounds { - dict.bounds[i] = r.typInfo() - } + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } - dict.derived = make([]derivedInfo, r.Len()) - dict.derivedTypes = make([]types.Type, len(dict.derived)) - for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} - } + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + pr.retireReader(r) + } // function references follow, but reader doesn't need those return &dict diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 67256dc3974..d50551693f3 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -10,8 +10,10 @@ import ( "context" "fmt" "io" + "log" "os" "regexp" + "runtime" "strconv" "strings" "sync" @@ -232,6 +234,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { return runCmdContext(ctx, cmd) } +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { @@ -243,11 +251,24 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { resChan <- cmd.Wait() }() - select { - case err := <-resChan: - return err - case <-ctx.Done(): + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(1 * time.Minute): + HandleHangingGoCommand(cmd.Process) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } } + // Cancelled. Interrupt and see if it ends voluntarily. cmd.Process.Signal(os.Interrupt) select { @@ -255,11 +276,63 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { return err case <-time.After(time.Second): } + // Didn't shut down in response to interrupt. Kill it hard. - cmd.Process.Kill() + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { + // Don't panic here as this reliably fails on windows with EINVAL. + log.Printf("error killing the Go command: %v", err) + } + + // See above: don't wait indefinitely if we're debugging hanging Go commands. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill + HandleHangingGoCommand(cmd.Process) + } + } return <-resChan } +func HandleHangingGoCommand(proc *os.Process) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) +} + func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 71304368020..98e834f74d3 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -7,11 +7,19 @@ package gocommand import ( "context" "fmt" + "regexp" "strings" ) -// GoVersion checks the go version by running "go list" with modules off. -// It returns the X in Go 1.X. +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} @@ -38,7 +46,7 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { if len(stdout) < 3 { return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) } - // Split up "[go1.1 go1.15]" + // Split up "[go1.1 go1.15]" and return highest go1.X value. tags := strings.Fields(stdout[1 : len(stdout)-2]) for i := len(tags) - 1; i >= 0; i-- { var version int @@ -49,3 +57,23 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { } return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) } + +// GoVersionString reports the go version string as shown in `go version` command output. +// When `go version` outputs in non-standard form, this returns an empty string. +func GoVersionString(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return parseGoVersionOutput(goVersion.Bytes()), nil +} + +func parseGoVersionOutput(data []byte) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return string(m[1]) +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/internal/pkgbits/codes.go similarity index 100% rename from vendor/golang.org/x/tools/go/internal/pkgbits/codes.go rename to vendor/golang.org/x/tools/internal/pkgbits/codes.go diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go similarity index 83% rename from vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go rename to vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 2bc793668ec..3205c9a16c5 100644 --- a/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -6,9 +6,11 @@ package pkgbits import ( "encoding/binary" + "errors" "fmt" "go/constant" "go/token" + "io" "math/big" "os" "runtime" @@ -51,6 +53,8 @@ type PkgDecoder struct { // For example, section K's end positions start at elemEndsEnds[K-1] // (or 0, if K==0) and end at elemEndsEnds[K]. elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt } // PkgPath returns the package path for the package @@ -94,7 +98,7 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) - pos, err := r.Seek(0, os.SEEK_CUR) + pos, err := r.Seek(0, io.SeekCurrent) assert(err == nil) pr.elemData = input[pos:] @@ -164,6 +168,21 @@ func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Deco return r } +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + // NewDecoderRaw returns a Decoder for the given (section, index) pair. // // Most callers should use NewDecoder instead. @@ -187,6 +206,30 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { return r } +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + // A Decoder provides methods for decoding an individual element's // bitstream data. type Decoder struct { @@ -206,11 +249,39 @@ func (r *Decoder) checkErr(err error) { } func (r *Decoder) rawUvarint() uint64 { - x, err := binary.ReadUvarint(&r.Data) + x, err := readUvarint(&r.Data) r.checkErr(err) return x } +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)< var (+ other variable assignment codes) */ - // UntypedNil occurs when the predeclared (untyped) value nil is used to + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to // initialize a variable declared without an explicit type. // // Example: // var x = nil - UntypedNil + UntypedNilUse // WrongAssignCount occurs when the number of values on the right-hand side // of an assignment or or initialization expression does not match the number @@ -1523,4 +1529,32 @@ const ( // Example: // type T[P any] struct{ *P } MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + ) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go index de90e9515ae..15ecf7c5ded 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -8,6 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] _ = x[Test-1] _ = x[BlankPkgName-2] _ = x[MismatchedPkgName-3] @@ -23,7 +24,7 @@ func _() { _ = x[InvalidConstInit-13] _ = x[InvalidConstVal-14] _ = x[InvalidConstType-15] - _ = x[UntypedNil-16] + _ = x[UntypedNilUse-16] _ = x[WrongAssignCount-17] _ = x[UnassignableOperand-18] _ = x[NoNewVar-19] @@ -152,16 +153,27 @@ func _() { _ = x[MisplacedConstraintIface-142] _ = x[InvalidMethodTypeParams-143] _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] } -const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParam" +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) -var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903, 1910, 1922, 1938, 1956, 1974, 1989, 2006, 2025, 2039, 2059, 2071, 2095, 2118, 2136} +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) func (i ErrorCode) String() string { - i -= 1 - if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { - return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]] } diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 66caf24cb13..8356e7f27b0 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -17,92 +17,10 @@ import ( "sync" "time" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" ) -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatibility, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - type typeReader struct { io.Reader typ string @@ -234,7 +152,10 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { mi := &MediaInfo{} opts := googleapi.ProcessMediaOptions(options) if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) + mi.mType = opts.ContentType + if mi.mType == "" { + r, mi.mType = gax.DetermineContentType(r) + } } mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) @@ -245,7 +166,11 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { // call. It returns a MediaInfo using the given reader, size and media type. func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) + mType := mediaType + if mType == "" { + rdr, mType = gax.DetermineContentType(rdr) + } + return &MediaInfo{ size: size, mType: mType, diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index ce0e80c4e80..9e416e92ba1 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.97.0" +const Version = "0.102.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 3a78ffe7798..acb3d6e2398 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,15 +1,17 @@ -# cloud.google.com/go v0.104.0 -## explicit; go 1.17 -cloud.google.com/go +# cloud.google.com/go v0.105.0 +## explicit; go 1.19 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.7.0 -## explicit; go 1.15 +# cloud.google.com/go/compute v1.12.1 +## explicit; go 1.19 +cloud.google.com/go/compute/internal +# cloud.google.com/go/compute/metadata v0.2.1 +## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.3.0 -## explicit; go 1.15 +# cloud.google.com/go/iam v0.6.0 +## explicit; go 1.19 cloud.google.com/go/iam # cloud.google.com/go/storage v1.27.0 ## explicit; go 1.17 @@ -101,7 +103,7 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.44.109 +# github.com/aws/aws-sdk-go v1.44.156 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -217,6 +219,9 @@ github.com/aws/smithy-go/rand github.com/aws/smithy-go/time github.com/aws/smithy-go/transport/http github.com/aws/smithy-go/transport/http/internal/io +# github.com/benbjohnson/clock v1.3.0 +## explicit; go 1.15 +github.com/benbjohnson/clock # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -232,14 +237,15 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash v1.1.0 ## explicit github.com/cespare/xxhash -# github.com/cespare/xxhash/v2 v2.1.2 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/coreos/go-semver v0.3.0 ## explicit github.com/coreos/go-semver/semver -# github.com/coreos/go-systemd/v22 v22.3.2 +# github.com/coreos/go-systemd/v22 v22.4.0 ## explicit; go 1.12 +github.com/coreos/go-systemd/v22/activation github.com/coreos/go-systemd/v22/journal # github.com/davecgh/go-spew v1.1.1 ## explicit @@ -250,7 +256,7 @@ github.com/dennwc/varint # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous -# github.com/docker/go-units v0.4.0 +# github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units # github.com/dustin/go-humanize v1.0.0 @@ -279,7 +285,7 @@ github.com/felixge/fgprof # github.com/felixge/httpsnoop v1.0.3 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.5.4 +# github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify # github.com/go-kit/log v0.2.1 @@ -296,7 +302,7 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/analysis v0.21.2 +# github.com/go-openapi/analysis v0.21.4 ## explicit; go 1.13 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal/debug @@ -305,7 +311,7 @@ github.com/go-openapi/analysis/internal/flatten/operations github.com/go-openapi/analysis/internal/flatten/replace github.com/go-openapi/analysis/internal/flatten/schutils github.com/go-openapi/analysis/internal/flatten/sortref -# github.com/go-openapi/errors v0.20.2 +# github.com/go-openapi/errors v0.20.3 ## explicit; go 1.14 github.com/go-openapi/errors # github.com/go-openapi/jsonpointer v0.19.5 @@ -315,11 +321,11 @@ github.com/go-openapi/jsonpointer ## explicit; go 1.13 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/loads v0.21.1 +# github.com/go-openapi/loads v0.21.2 ## explicit; go 1.13 github.com/go-openapi/loads -# github.com/go-openapi/runtime v0.23.1 -## explicit; go 1.15 +# github.com/go-openapi/runtime v0.25.0 +## explicit; go 1.18 github.com/go-openapi/runtime github.com/go-openapi/runtime/flagext github.com/go-openapi/runtime/logger @@ -328,7 +334,7 @@ github.com/go-openapi/runtime/middleware/denco github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security -# github.com/go-openapi/spec v0.20.4 => github.com/go-openapi/spec v0.20.6 +# github.com/go-openapi/spec v0.20.7 => github.com/go-openapi/spec v0.20.6 ## explicit; go 1.13 github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.21.3 @@ -337,7 +343,7 @@ github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.22.3 ## explicit; go 1.18 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.21.0 +# github.com/go-openapi/validate v0.22.0 ## explicit; go 1.14 github.com/go-openapi/validate # github.com/go-redis/redis/v8 v8.11.5 @@ -350,7 +356,7 @@ github.com/go-redis/redis/v8/internal/pool github.com/go-redis/redis/v8/internal/proto github.com/go-redis/redis/v8/internal/rand github.com/go-redis/redis/v8/internal/util -# github.com/gofrs/uuid v4.2.0+incompatible +# github.com/gofrs/uuid v4.3.1+incompatible ## explicit github.com/gofrs/uuid # github.com/gogo/googleapis v1.4.0 @@ -405,18 +411,18 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 +# github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e ## explicit; go 1.18 github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.1.0 +# github.com/googleapis/enterprise-certificate-proxy v0.2.0 ## explicit; go 1.18 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.5.1 -## explicit; go 1.18 +# github.com/googleapis/gax-go/v2 v2.6.0 +## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto @@ -470,7 +476,7 @@ github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.2 ## explicit github.com/hashicorp/go-sockaddr -# github.com/hashicorp/golang-lru v0.5.4 +# github.com/hashicorp/golang-lru v0.6.0 ## explicit; go 1.12 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -530,7 +536,7 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.14 ## explicit; go 1.12 github.com/mattn/go-isatty -# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 +# github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/miekg/dns v1.1.50 @@ -603,8 +609,8 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/alertmanager v0.24.0 -## explicit; go 1.16 +# github.com/prometheus/alertmanager v0.25.0 +## explicit; go 1.18 github.com/prometheus/alertmanager/api github.com/prometheus/alertmanager/api/metrics github.com/prometheus/alertmanager/api/v1 @@ -660,8 +666,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.37.0 -## explicit; go 1.16 +# github.com/prometheus/common v0.38.0 +## explicit; go 1.17 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -671,15 +677,15 @@ github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/exporter-toolkit v0.7.3 -## explicit; go 1.14 +# github.com/prometheus/exporter-toolkit v0.8.2 +## explicit; go 1.17 github.com/prometheus/exporter-toolkit/web # github.com/prometheus/procfs v0.8.0 ## explicit; go 1.17 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.39.1 +# github.com/prometheus/prometheus v0.40.7 ## explicit; go 1.18 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -688,6 +694,7 @@ github.com/prometheus/prometheus/discovery/file github.com/prometheus/prometheus/discovery/refresh github.com/prometheus/prometheus/discovery/targetgroup github.com/prometheus/prometheus/model/exemplar +github.com/prometheus/prometheus/model/histogram github.com/prometheus/prometheus/model/labels github.com/prometheus/prometheus/model/metadata github.com/prometheus/prometheus/model/relabel @@ -697,6 +704,7 @@ github.com/prometheus/prometheus/model/timestamp github.com/prometheus/prometheus/model/value github.com/prometheus/prometheus/notifier github.com/prometheus/prometheus/prompb +github.com/prometheus/prometheus/prompb/io/prometheus/client github.com/prometheus/prometheus/promql github.com/prometheus/prometheus/promql/parser github.com/prometheus/prometheus/rules @@ -715,9 +723,10 @@ github.com/prometheus/prometheus/tsdb/index github.com/prometheus/prometheus/tsdb/record github.com/prometheus/prometheus/tsdb/tombstones github.com/prometheus/prometheus/tsdb/tsdbutil -github.com/prometheus/prometheus/tsdb/wal +github.com/prometheus/prometheus/tsdb/wlog github.com/prometheus/prometheus/util/gate github.com/prometheus/prometheus/util/httputil +github.com/prometheus/prometheus/util/jsonutil github.com/prometheus/prometheus/util/logging github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/pool @@ -734,6 +743,11 @@ github.com/rs/cors # github.com/rs/xid v1.4.0 ## explicit; go 1.12 github.com/rs/xid +# github.com/rueian/rueidis v0.0.90 +## explicit; go 1.18 +github.com/rueian/rueidis +github.com/rueian/rueidis/internal/cmds +github.com/rueian/rueidis/internal/util # github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 ## explicit github.com/sean-/seed @@ -780,7 +794,7 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing -# github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 +# github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -819,7 +833,7 @@ github.com/thanos-io/thanos/pkg/store/storepb github.com/thanos-io/thanos/pkg/store/storepb/prompb github.com/thanos-io/thanos/pkg/strutil github.com/thanos-io/thanos/pkg/targets/targetspb -github.com/thanos-io/thanos/pkg/testutil +github.com/thanos-io/thanos/pkg/testutil/e2eutil github.com/thanos-io/thanos/pkg/tls github.com/thanos-io/thanos/pkg/tracing github.com/thanos-io/thanos/pkg/tracing/migration @@ -855,7 +869,7 @@ github.com/vimeo/galaxycache/http github.com/vimeo/galaxycache/lru github.com/vimeo/galaxycache/promoter github.com/vimeo/galaxycache/singleflight -# github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae +# github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d ## explicit; go 1.14 github.com/weaveworks/common/errors github.com/weaveworks/common/grpc @@ -899,8 +913,8 @@ go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver -# go.mongodb.org/mongo-driver v1.10.2 -## explicit; go 1.10 +# go.mongodb.org/mongo-driver v1.11.0 +## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -927,8 +941,8 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 -## explicit; go 1.17 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 +## explicit; go 1.18 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp # go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 ## explicit; go 1.17 @@ -975,8 +989,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform # go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc -# go.opentelemetry.io/otel/metric v0.32.0 -## explicit; go 1.17 +# go.opentelemetry.io/otel/metric v0.33.0 +## explicit; go 1.18 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/global go.opentelemetry.io/otel/metric/instrument @@ -1022,7 +1036,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa +# golang.org/x/crypto v0.1.0 ## explicit; go 1.17 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1030,7 +1044,11 @@ golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 +# golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 +## explicit; go 1.18 +golang.org/x/exp/constraints +golang.org/x/exp/slices +# golang.org/x/mod v0.7.0 ## explicit; go 1.17 golang.org/x/mod/semver # golang.org/x/net v0.4.0 @@ -1050,7 +1068,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 +# golang.org/x/oauth2 v0.1.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1060,7 +1078,7 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20220907140024-f12130a52804 +# golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup # golang.org/x/sys v0.3.0 @@ -1073,35 +1091,41 @@ golang.org/x/sys/windows golang.org/x/sys/windows/registry # golang.org/x/text v0.5.0 ## explicit; go 1.17 +golang.org/x/text/cases +golang.org/x/text/internal +golang.org/x/text/internal/language +golang.org/x/text/internal/language/compact +golang.org/x/text/internal/tag +golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 +# golang.org/x/time v0.1.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.1.12 +# golang.org/x/tools v0.4.0 ## explicit; go 1.18 golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver -golang.org/x/tools/go/internal/pkgbits golang.org/x/tools/go/packages golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f +# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.97.0 -## explicit; go 1.15 +# google.golang.org/api v0.102.0 +## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 @@ -1133,7 +1157,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 +# google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody From 817a79597ad12be99b647596c17144a2162d70c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 19:02:33 -0800 Subject: [PATCH 040/133] Bump github.com/minio/minio-go/v7 from 7.0.45 to 7.0.46 (#5082) Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.45 to 7.0.46. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.45...v7.0.46) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/minio/minio-go/v7/Makefile | 2 + .../minio/minio-go/v7/api-bucket-lifecycle.go | 4 +- .../minio/minio-go/v7/api-bucket-policy.go | 4 +- .../minio-go/v7/api-bucket-replication.go | 4 +- .../minio/minio-go/v7/api-bucket-tagging.go | 3 +- .../minio/minio-go/v7/api-compose-object.go | 5 +- .../minio/minio-go/v7/api-copy-object.go | 3 +- .../minio/minio-go/v7/api-error-response.go | 23 +- .../github.com/minio/minio-go/v7/api-list.go | 2 + .../minio-go/v7/api-put-object-multipart.go | 7 +- .../minio-go/v7/api-put-object-streaming.go | 215 +++++++++++++++++- .../minio/minio-go/v7/api-put-object.go | 10 +- .../minio-go/v7/api-putobject-snowball.go | 3 +- .../minio/minio-go/v7/api-select.go | 32 +-- vendor/github.com/minio/minio-go/v7/api.go | 11 +- .../minio/minio-go/v7/functional_tests.go | 124 ++++++---- .../v7/pkg/credentials/assume_role.go | 5 +- .../v7/pkg/credentials/error_response.go | 3 +- .../v7/pkg/credentials/file_minio_client.go | 9 +- .../minio-go/v7/pkg/credentials/iam_aws.go | 6 +- .../v7/pkg/credentials/sts_client_grants.go | 4 +- .../v7/pkg/credentials/sts_ldap_identity.go | 4 +- .../v7/pkg/credentials/sts_tls_identity.go | 3 +- .../v7/pkg/credentials/sts_web_identity.go | 4 +- .../v7/pkg/notification/notification.go | 26 +-- .../v7/pkg/replication/replication.go | 4 + ...st-signature-streaming-unsigned-trailer.go | 3 +- .../pkg/signer/request-signature-streaming.go | 3 +- .../minio/minio-go/v7/post-policy.go | 2 +- .../github.com/minio/minio-go/v7/transport.go | 3 +- vendor/github.com/minio/minio-go/v7/utils.go | 3 +- vendor/modules.txt | 2 +- 34 files changed, 384 insertions(+), 158 deletions(-) diff --git a/go.mod b/go.mod index c3e4661a64b..a414530728c 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/hashicorp/memberlist v0.5.0 github.com/json-iterator/go v1.1.12 github.com/lib/pq v1.10.7 - github.com/minio/minio-go/v7 v7.0.45 + github.com/minio/minio-go/v7 v7.0.46 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e diff --git a/go.sum b/go.sum index 9b7c122d0b0..b70a00518e6 100644 --- a/go.sum +++ b/go.sum @@ -1118,8 +1118,8 @@ github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7Xn github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs= -github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/minio-go/v7 v7.0.46 h1:Vo3tNmNXuj7ME5qrvN4iadO7b4mzu/RSFdUkUhaPldk= +github.com/minio/minio-go/v7 v7.0.46/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index ace66670542..40d57c130ea 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -16,6 +16,8 @@ lint: vet: @GO111MODULE=on go vet ./... + @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest + ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005" test: @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go index 7e2199732ee..3f88d077777 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -21,7 +21,7 @@ import ( "bytes" "context" "encoding/xml" - "io/ioutil" + "io" "net/http" "net/url" @@ -143,5 +143,5 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b } } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go index e7edf9c9712..dbb5259a81c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -18,7 +18,7 @@ package minio import ( "context" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -137,7 +137,7 @@ func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string } } - bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + bucketPolicyBuf, err := io.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go index c80488eee1d..d5895dfe04d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -22,7 +22,7 @@ import ( "context" "encoding/json" "encoding/xml" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -180,7 +180,7 @@ func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName str if resp.StatusCode != http.StatusOK { return s, httpRespToErrorResponse(resp, bucketName, "") } - respBytes, err := ioutil.ReadAll(resp.Body) + respBytes, err := io.ReadAll(resp.Body) if err != nil { return s, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go index 1615f8f8739..86d74298a6a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -22,7 +22,6 @@ import ( "encoding/xml" "errors" "io" - "io/ioutil" "net/http" "net/url" @@ -58,7 +57,7 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags return nil, httpRespToErrorResponse(resp, bucketName, "") } - defer io.Copy(ioutil.Discard, resp.Body) + defer io.Copy(io.Discard, resp.Body) return tags.ParseBucketXML(resp.Body) } diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index 835c8bd8ad5..1ba68c79eef 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -516,7 +515,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs .. return UploadInfo{}, err } if dst.Progress != nil { - io.CopyN(ioutil.Discard, dst.Progress, end-start+1) + io.CopyN(io.Discard, dst.Progress, end-start+1) } objParts = append(objParts, complPart) partIndex++ @@ -525,7 +524,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs .. // 4. Make final complete-multipart request. uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, - completeMultipartUpload{Parts: objParts}, PutObjectOptions{}) + completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption}) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go index 1c0ad2be4c0..0c95d91ec76 100644 --- a/vendor/github.com/minio/minio-go/v7/api-copy-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -20,7 +20,6 @@ package minio import ( "context" "io" - "io/ioutil" "net/http" ) @@ -54,7 +53,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr // Update the progress properly after successful copy. if dst.Progress != nil { - io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size)) + io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size)) } cpObjRes := copyObjectResult{} diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index 33ca394588e..4ec0c87c200 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -22,7 +22,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" ) @@ -108,7 +107,7 @@ const ( func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { // read the whole body (up to 1MB) const maxBodyLength = 1 << 20 - body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) if err != nil { return nil, err } @@ -253,26 +252,6 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) } } -// errInvalidBucketName - Invalid bucket name response. -func errInvalidBucketName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", - Message: message, - RequestID: "minio", - } -} - -// errInvalidObjectName - Invalid object name response. -func errInvalidObjectName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchKey", - Message: message, - RequestID: "minio", - } -} - // errInvalidArgument - Invalid argument response. func errInvalidArgument(message string) error { return ErrorResponse{ diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index d216afb3972..bfb2c11514f 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -897,6 +897,8 @@ func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyM } // listObjectParts list all object parts recursively. +// +//lint:ignore U1000 Keep this around func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { // Part number marker for the next batch of request. var nextPartNumberMarker int diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 3c9a13ff20b..18bdeb24c16 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -26,7 +26,6 @@ import ( "fmt" "hash/crc32" "io" - "io/ioutil" "net/http" "net/url" "sort" @@ -201,7 +200,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - opts = PutObjectOptions{} + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() @@ -412,7 +413,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object // Read resp.Body into a []bytes to parse for Error response inside the body var b []byte - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index e3a14c59d8e..cdc7d3d3885 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -28,6 +28,7 @@ import ( "net/url" "sort" "strings" + "sync" "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/s3utils" @@ -44,7 +45,9 @@ import ( func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error) { - if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) } else { @@ -266,6 +269,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } if withChecksum { // Add hash of hashes. crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) @@ -278,7 +284,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} } - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } @@ -425,6 +431,211 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel. +// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer. +func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string, + reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + if !opts.SendContentMd5 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } + + // Cancel all when an error occurs. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + md5Hash := c.md5Hasher() + defer md5Hash.Close() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + nBuffers := int64(opts.NumThreads) + bufs := make(chan []byte, nBuffers) + all := make([]byte, nBuffers*partSize) + for i := int64(0); i < nBuffers; i++ { + bufs <- all[i*partSize : i*partSize+partSize] + } + + var wg sync.WaitGroup + var mu sync.Mutex + errCh := make(chan error, opts.NumThreads) + + reader = newHook(reader, opts.Progress) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Proceed to upload the part. + var buf []byte + select { + case buf = <-bufs: + case err = <-errCh: + cancel() + wg.Wait() + return UploadInfo{}, err + } + + if int64(len(buf)) != partSize { + return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize) + } + + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + // Done + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + cancel() + wg.Wait() + return UploadInfo{}, rerr + } + + // Calculate md5sum. + customHeader := make(http.Header) + if !opts.SendContentMd5 { + // Add CRC32C instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) + } + + wg.Add(1) + go func(partNumber int) { + // Avoid declaring variables in the for loop + var md5Base64 string + + if opts.SendContentMd5 { + md5Hash.Reset() + md5Hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) + } + + defer wg.Done() + p := uploadPartParams{ + bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: bytes.NewReader(buf[:length]), + partNumber: partNumber, + md5Base64: md5Base64, + size: int64(length), + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + customHeader: customHeader, + } + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + errCh <- uerr + } + + // Save successfully uploaded part metadata. + mu.Lock() + partsInfo[partNumber] = objPart + mu.Unlock() + + // Send buffer back so it can be reused. + bufs <- buf + }(partNumber) + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + } + wg.Wait() + + // Collect any error + select { + case err = <-errCh: + return UploadInfo{}, err + default: + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + opts = PutObjectOptions{} if len(crcBytes) > 0 { // Add hash of hashes. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 5735bee5eb2..2376ee87466 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -87,7 +87,12 @@ type PutObjectOptions struct { SendContentMd5 bool DisableContentSha256 bool DisableMultipart bool - Internal AdvancedPutOptions + + // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, + // fill them serially and upload them in parallel. + // This can be used for faster uploads on non-seekable or slow-to-seek input. + ConcurrentStreamParts bool + Internal AdvancedPutOptions } // getNumThreads - gets the number of threads to be used in the multipart @@ -272,6 +277,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str if opts.DisableMultipart { return UploadInfo{}, errors.New("no length provided and multipart disabled") } + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go index b7502e2d927..899bc6b730b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -24,7 +24,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "strings" "sync" @@ -107,7 +106,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil } } else { - f, err := ioutil.TempFile("", "s3-putsnowballobjects-*") + f, err := os.CreateTemp("", "s3-putsnowballobjects-*") if err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go index 5d47d7ec56a..628d967ff46 100644 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -41,8 +41,8 @@ type CSVFileHeaderInfo string // Constants for file header info. const ( CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" - CSVFileHeaderInfoIgnore = "IGNORE" - CSVFileHeaderInfoUse = "USE" + CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE" + CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE" ) // SelectCompressionType - is the parameter for what type of compression is @@ -52,15 +52,15 @@ type SelectCompressionType string // Constants for compression types under select API. const ( SelectCompressionNONE SelectCompressionType = "NONE" - SelectCompressionGZIP = "GZIP" - SelectCompressionBZIP = "BZIP2" + SelectCompressionGZIP SelectCompressionType = "GZIP" + SelectCompressionBZIP SelectCompressionType = "BZIP2" // Non-standard compression schemes, supported by MinIO hosts: - SelectCompressionZSTD = "ZSTD" // Zstandard compression. - SelectCompressionLZ4 = "LZ4" // LZ4 Stream - SelectCompressionS2 = "S2" // S2 Stream - SelectCompressionSNAPPY = "SNAPPY" // Snappy stream + SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression. + SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream + SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream + SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream ) // CSVQuoteFields - is the parameter for how CSV fields are quoted. @@ -69,7 +69,7 @@ type CSVQuoteFields string // Constants for csv quote styles. const ( CSVQuoteFieldsAlways CSVQuoteFields = "Always" - CSVQuoteFieldsAsNeeded = "AsNeeded" + CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded" ) // QueryExpressionType - is of what syntax the expression is, this should only @@ -87,7 +87,7 @@ type JSONType string // Constants for JSONTypes. const ( JSONDocumentType JSONType = "DOCUMENT" - JSONLinesType = "LINES" + JSONLinesType JSONType = "LINES" ) // ParquetInputOptions parquet input specific options @@ -378,8 +378,8 @@ type SelectObjectType string // Constants for input data types. const ( SelectObjectTypeCSV SelectObjectType = "CSV" - SelectObjectTypeJSON = "JSON" - SelectObjectTypeParquet = "Parquet" + SelectObjectTypeJSON SelectObjectType = "JSON" + SelectObjectTypeParquet SelectObjectType = "Parquet" ) // preludeInfo is used for keeping track of necessary information from the @@ -416,7 +416,7 @@ type messageType string const ( errorMsg messageType = "error" - commonMsg = "event" + commonMsg messageType = "event" ) // eventType represents the type of event. @@ -425,9 +425,9 @@ type eventType string // list of event-types returned by Select API. const ( endEvent eventType = "End" - recordsEvent = "Records" - progressEvent = "Progress" - statsEvent = "Stats" + recordsEvent eventType = "Records" + progressEvent eventType = "Progress" + statsEvent eventType = "Stats" ) // contentType represents content type of event. diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index a93a6c6207a..084b3ccdd95 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -25,7 +25,6 @@ import ( "fmt" "hash/crc32" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -119,7 +118,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.45" + libraryVersion = "v7.0.46" ) // User Agent should always following the below style. @@ -635,7 +634,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } // Read the body to be saved later. - errBodyBytes, err := ioutil.ReadAll(res.Body) + errBodyBytes, err := io.ReadAll(res.Body) // res.Body should be closed closeResponse(res) if err != nil { @@ -644,14 +643,14 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // Save the body. errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = ioutil.NopCloser(errBodySeeker) + res.Body = io.NopCloser(errBodySeeker) // For errors verify if its retryable otherwise fail quickly. errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) // Save the body back again. errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = ioutil.NopCloser(errBodySeeker) + res.Body = io.NopCloser(errBodySeeker) // Bucket region if set in error response and the error // code dictates invalid region, we can retry the request @@ -814,7 +813,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request if metadata.contentLength == 0 { req.Body = nil } else { - req.Body = ioutil.NopCloser(metadata.contentBody) + req.Body = io.NopCloser(metadata.contentBody) } // Set incoming content-length. diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index e86e142e552..23dd7e9b9ed 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -31,7 +31,6 @@ import ( "hash" "hash/crc32" "io" - "io/ioutil" "math/rand" "mime/multipart" "net/http" @@ -346,7 +345,7 @@ func getDataReader(fileName string) io.ReadCloser { if _, ok := dataFileCRC32[fileName]; !ok { dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) } - return ioutil.NopCloser(newRandomReader(size, size)) + return io.NopCloser(newRandomReader(size, size)) } reader, _ := os.Open(getMintDataDirFilePath(fileName)) if _, ok := dataFileCRC32[fileName]; !ok { @@ -989,7 +988,7 @@ func testGetObjectWithVersioning() { for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1131,7 +1130,7 @@ func testPutObjectWithVersioning() { var errs [n]error for i := 0; i < n; i++ { r := newRandomReader(int64((1<<20)*i+i), int64(i)) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1271,7 +1270,7 @@ func testCopyObjectWithVersioning() { testFiles := []string{"datafile-1-b", "datafile-10-kB"} for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1304,7 +1303,7 @@ func testCopyObjectWithVersioning() { return } - oldestContent, err := ioutil.ReadAll(reader) + oldestContent, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) return @@ -1338,7 +1337,7 @@ func testCopyObjectWithVersioning() { } defer readerCopy.Close() - newestContent, err := ioutil.ReadAll(readerCopy) + newestContent, err := io.ReadAll(readerCopy) if err != nil { logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) return @@ -1408,7 +1407,7 @@ func testConcurrentCopyObjectWithVersioning() { testFiles := []string{"datafile-10-kB"} for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1441,7 +1440,7 @@ func testConcurrentCopyObjectWithVersioning() { return } - oldestContent, err := ioutil.ReadAll(reader) + oldestContent, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) return @@ -1491,7 +1490,7 @@ func testConcurrentCopyObjectWithVersioning() { } defer readerCopy.Close() - newestContent, err := ioutil.ReadAll(readerCopy) + newestContent, err := io.ReadAll(readerCopy) if err != nil { logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) return @@ -1571,7 +1570,7 @@ func testComposeObjectWithVersioning() { for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1633,7 +1632,7 @@ func testComposeObjectWithVersioning() { } defer readerCopy.Close() - copyContentBytes, err := ioutil.ReadAll(readerCopy) + copyContentBytes, err := io.ReadAll(readerCopy) if err != nil { logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) return @@ -1733,12 +1732,39 @@ func testRemoveObjectWithVersioning() { logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) return } - - err = c.RemoveBucket(context.Background(), bucketName) + // test delete marker version id is non-null + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // create delete marker + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) return } + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + idx := 0 + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if idx == 0 { + if !info.IsDeleteMarker { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err) + return + } + } + idx++ + } + + defer cleanupBucket(bucketName, c) successLogger(testName, function, args, startTime).Info() } @@ -2461,7 +2487,7 @@ func testGetObjectSeekEnd() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -2982,7 +3008,7 @@ func testFPutObjectMultipart() { fileName := getMintDataDirFilePath("datafile-129-MB") if fileName == "" { // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -3091,7 +3117,7 @@ func testFPutObject() { fName := getMintDataDirFilePath("datafile-129-MB") if fName == "" { // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -3257,7 +3283,7 @@ func testFPutObjectContext() { fName := getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -3357,7 +3383,7 @@ func testFPutObjectContextV2() { fName := getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") if err != nil { logError(testName, function, args, startTime, "", "Temp file creation failed", err) return @@ -3621,7 +3647,7 @@ func testGetObjectS3Zip() { logError(testName, function, args, startTime, "", "file.Open failed", err) return } - want, err := ioutil.ReadAll(zfr) + want, err := io.ReadAll(zfr) if err != nil { logError(testName, function, args, startTime, "", "fzip file read failed", err) return @@ -3638,7 +3664,7 @@ func testGetObjectS3Zip() { } return } - got, err := ioutil.ReadAll(r) + got, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -3722,7 +3748,7 @@ func testGetObjectReadSeekFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -3885,7 +3911,7 @@ func testGetObjectReadAtFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4062,7 +4088,7 @@ func testGetObjectReadAtWhenEOFWasReached() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4181,7 +4207,7 @@ func testPresignedPostPolicy() { metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4245,7 +4271,7 @@ func testPresignedPostPolicy() { filePath := getMintDataDirFilePath("datafile-33-kB") if filePath == "" { // Make a temp file with 33 KB data. - file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -4588,7 +4614,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4770,7 +4796,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4944,7 +4970,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -5127,7 +5153,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -6138,7 +6164,7 @@ func testFunctional() { return } - newReadBytes, err := ioutil.ReadAll(newReader) + newReadBytes, err := io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -6269,7 +6295,7 @@ func testFunctional() { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) return } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) + newPresignedBytes, err := io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return @@ -6312,7 +6338,7 @@ func testFunctional() { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) return } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) + newPresignedBytes, err = io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return @@ -6372,7 +6398,7 @@ func testFunctional() { return } - newReadBytes, err = ioutil.ReadAll(newReader) + newReadBytes, err = io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) return @@ -6428,7 +6454,7 @@ func testFunctional() { return } - newReadBytes, err = ioutil.ReadAll(newReader) + newReadBytes, err = io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) return @@ -6652,7 +6678,7 @@ func testPutObjectUploadSeekedObject() { } args["fileToUpload"] = fileName } else { - tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") + tempfile, err = os.CreateTemp("", "minio-go-upload-test-") if err != nil { logError(testName, function, args, startTime, "", "TempFile create failed", err) return @@ -6916,7 +6942,7 @@ func testFPutObjectV2() { defer cleanupBucket(bucketName, c) // Make a temp file with 11*1024*1024 bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -7145,7 +7171,7 @@ func testGetObjectReadSeekFunctionalV2() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -7299,7 +7325,7 @@ func testGetObjectReadAtFunctionalV2() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -7837,7 +7863,7 @@ func testEncryptedEmptyObject() { } defer reader.Close() - decBytes, err := ioutil.ReadAll(reader) + decBytes, err := io.ReadAll(reader) if err != nil { logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) return @@ -7915,7 +7941,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, return } - decBytes, err := ioutil.ReadAll(reader) + decBytes, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -7955,7 +7981,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, return } - decBytes, err = ioutil.ReadAll(reader) + decBytes, err = io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -7994,7 +8020,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } defer reader.Close() - decBytes, err = ioutil.ReadAll(reader) + decBytes, err = io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11040,7 +11066,7 @@ func testFunctionalV2() { return } - newReadBytes, err := ioutil.ReadAll(newReader) + newReadBytes, err := io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11146,7 +11172,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) return } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) + newPresignedBytes, err := io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11185,7 +11211,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) return } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) + newPresignedBytes, err = io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11239,7 +11265,7 @@ func testFunctionalV2() { return } - newReadBytes, err = ioutil.ReadAll(newReader) + newReadBytes, err = io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) return @@ -11553,7 +11579,7 @@ func testGetObjectRanges() { } for _, test := range tests { wantRC := getDataReader("datafile-129-MB") - io.CopyN(ioutil.Discard, wantRC, test.start) + io.CopyN(io.Discard, wantRC, test.start) want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) opts := minio.GetObjectOptions{} opts.SetRange(test.start, test.end) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index e964b521737..1c73d1008b4 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -24,7 +24,6 @@ import ( "encoding/xml" "errors" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -139,7 +138,7 @@ func closeResponse(resp *http.Response) { // Without this closing connection would disallow re-using // the same connection for future uses. // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } } @@ -191,7 +190,7 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume defer closeResponse(resp) if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return AssumeRoleResponse{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go index f4b027a4145..07a9c2f0927 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go @@ -22,7 +22,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" ) // ErrorResponse - Is the typed error returned. @@ -88,7 +87,7 @@ func xmlDecoder(body io.Reader, v interface{}) error { func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { // read the whole body (up to 1MB) const maxBodyLength = 1 << 20 - body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) if err != nil { return nil, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go index 56437edb26f..e4c0f6717ed 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -18,7 +18,6 @@ package credentials import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -114,6 +113,7 @@ type hostConfig struct { type config struct { Version string `json:"version"` Hosts map[string]hostConfig `json:"hosts"` + Aliases map[string]hostConfig `json:"aliases"` } // loadAliass loads from the file pointed to by shared credentials filename for alias. @@ -123,12 +123,17 @@ func loadAlias(filename, alias string) (hostConfig, error) { cfg := &config{} json := jsoniter.ConfigCompatibleWithStandardLibrary - configBytes, err := ioutil.ReadFile(filename) + configBytes, err := os.ReadFile(filename) if err != nil { return hostConfig{}, err } if err = json.Unmarshal(configBytes, cfg); err != nil { return hostConfig{}, err } + + if cfg.Version == "10" { + return cfg.Aliases[alias], nil + } + return cfg.Hosts[alias], nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index 14369cf10a7..e641639c95b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -22,7 +22,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -106,7 +106,7 @@ func (m *IAM) Retrieve() (Value, error) { Client: m.Client, STSEndpoint: endpoint, GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { - token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) + token, err := os.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) if err != nil { return nil, err } @@ -268,7 +268,7 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { return "", err } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go index 34598bd8e73..9e92c1e0fd7 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -22,7 +22,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -138,7 +138,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, defer resp.Body.Close() if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return AssumeRoleWithClientGrantsResponse{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index 25b45ecb09e..ec5f3f0971b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -21,7 +21,7 @@ import ( "bytes" "encoding/xml" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -156,7 +156,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return value, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index c7ac4db3b75..a0ad090468b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -21,7 +21,6 @@ import ( "encoding/xml" "errors" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -152,7 +151,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { } if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return Value{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 50f5f1ce65f..2e2af50b46d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -22,7 +22,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -155,7 +155,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession defer resp.Body.Close() if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return AssumeRoleWithWebIdentityResponse{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index 931ca5bc284..fd034fdc80a 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -34,19 +34,19 @@ type EventType string // http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations const ( ObjectCreatedAll EventType = "s3:ObjectCreated:*" - ObjectCreatedPut = "s3:ObjectCreated:Put" - ObjectCreatedPost = "s3:ObjectCreated:Post" - ObjectCreatedCopy = "s3:ObjectCreated:Copy" - ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - ObjectAccessedGet = "s3:ObjectAccessed:Get" - ObjectAccessedHead = "s3:ObjectAccessed:Head" - ObjectAccessedAll = "s3:ObjectAccessed:*" - ObjectRemovedAll = "s3:ObjectRemoved:*" - ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - BucketCreatedAll = "s3:BucketCreated:*" - BucketRemovedAll = "s3:BucketRemoved:*" + ObjectCreatedPut EventType = "s3:ObjectCreated:Put" + ObjectCreatedPost EventType = "s3:ObjectCreated:Post" + ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet EventType = "s3:ObjectAccessed:Get" + ObjectAccessedHead EventType = "s3:ObjectAccessed:Head" + ObjectAccessedAll EventType = "s3:ObjectAccessed:*" + ObjectRemovedAll EventType = "s3:ObjectRemoved:*" + ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" + BucketCreatedAll EventType = "s3:BucketCreated:*" + BucketRemovedAll EventType = "s3:BucketRemoved:*" ) // FilterRule - child of S3Key, a tag in the notification xml which diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 97abf8df8ff..645fe18cbef 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -700,6 +700,10 @@ type TargetMetrics struct { PendingCount uint64 `json:"pendingReplicationCount"` // Total number of failed operations including metadata updates FailedCount uint64 `json:"failedReplicationCount"` + // Bandwidth limit in bytes/sec for this target + BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"` + // Current bandwidth used in bytes/sec for this target + CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"` } // Metrics represents inline replication metrics for a bucket. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go index 5838b9de995..77540e2d821 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -21,7 +21,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -132,7 +131,7 @@ func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) if req.Body == nil { - req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) } stReader := &StreamingUSReader{ diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index 49e999b01a3..cf2356fd90f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -22,7 +22,6 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -280,7 +279,7 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok prepareStreamingRequest(req, sessionToken, dataLen, reqTime) if req.Body == nil { - req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) } stReader := &StreamingReader{ diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 4b3df19127f..31b340dcf78 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -25,7 +25,7 @@ import ( ) // expirationDateFormat date format for expiration key in json policy. -const expirationDateFormat = "2006-01-02T15:04:05.999Z" +const expirationDateFormat = "2006-01-02T15:04:05.000Z" // policyCondition explanation: // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go index a88477b7369..1bff6646284 100644 --- a/vendor/github.com/minio/minio-go/v7/transport.go +++ b/vendor/github.com/minio/minio-go/v7/transport.go @@ -23,7 +23,6 @@ package minio import ( "crypto/tls" "crypto/x509" - "io/ioutil" "net" "net/http" "os" @@ -73,7 +72,7 @@ var DefaultTransport = func(secure bool) (*http.Transport, error) { } if f := os.Getenv("SSL_CERT_FILE"); f != "" { rootCAs := mustGetSystemCertPool() - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err == nil { rootCAs.AppendCertsFromPEM(data) } diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index a8a45b1a8ce..7ce8ec5caf7 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -28,7 +28,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -142,7 +141,7 @@ func closeResponse(resp *http.Response) { // Without this closing connection would disallow re-using // the same connection for future uses. // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index acb3d6e2398..e0c197ba8c3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -545,7 +545,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.45 +# github.com/minio/minio-go/v7 v7.0.46 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials From 01762017ebb953460301cb717c280c616c5f78c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 19:02:57 -0800 Subject: [PATCH 041/133] Bump github.com/alicebob/miniredis/v2 from 2.23.1 to 2.30.0 (#5084) Bumps [github.com/alicebob/miniredis/v2](https://github.com/alicebob/miniredis) from 2.23.1 to 2.30.0. - [Release notes](https://github.com/alicebob/miniredis/releases) - [Changelog](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md) - [Commits](https://github.com/alicebob/miniredis/compare/v2.23.1...v2.30.0) --- updated-dependencies: - dependency-name: github.com/alicebob/miniredis/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- .../alicebob/miniredis/v2/CHANGELOG.md | 10 ++ .../alicebob/miniredis/v2/README.md | 2 +- .../alicebob/miniredis/v2/cmd_connection.go | 5 +- .../alicebob/miniredis/v2/cmd_generic.go | 66 ++++++++++++- .../alicebob/miniredis/v2/cmd_list.go | 11 ++- .../alicebob/miniredis/v2/cmd_pubsub.go | 24 +++-- .../alicebob/miniredis/v2/cmd_scripting.go | 95 +++++++++++-------- .../alicebob/miniredis/v2/cmd_sorted_set.go | 5 +- .../alicebob/miniredis/v2/cmd_stream.go | 47 +++++---- .../alicebob/miniredis/v2/cmd_transactions.go | 6 +- .../github.com/alicebob/miniredis/v2/lua.go | 13 +-- .../alicebob/miniredis/v2/miniredis.go | 2 + .../github.com/alicebob/miniredis/v2/redis.go | 12 ++- .../alicebob/miniredis/v2/stream.go | 20 +++- vendor/modules.txt | 2 +- 17 files changed, 230 insertions(+), 96 deletions(-) diff --git a/go.mod b/go.mod index a414530728c..8348ca17dfe 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 github.com/NYTimes/gziphandler v1.1.1 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/alicebob/miniredis/v2 v2.23.1 + github.com/alicebob/miniredis/v2 v2.30.0 github.com/armon/go-metrics v0.4.1 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 diff --git a/go.sum b/go.sum index b70a00518e6..d31abc6a274 100644 --- a/go.sum +++ b/go.sum @@ -174,8 +174,8 @@ github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:C github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.23.1 h1:jR6wZggBxwWygeXcdNyguCOCIjPsZyNUNlAkTx2fu0U= -github.com/alicebob/miniredis/v2 v2.23.1/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= +github.com/alicebob/miniredis/v2 v2.30.0 h1:uA3uhDbCxfO9+DI/DuGeAMr9qI+noVWwGPNTFuKID5M= +github.com/alicebob/miniredis/v2 v2.30.0/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= diff --git a/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md index b70c3df6220..d9700af64e9 100644 --- a/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md +++ b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md @@ -1,7 +1,17 @@ ## Changelog +### v2.30.0 + +- implement redis 7.0.x (from 6.X). Main changes: + - test against 7.0.7 + - update error messages + - support nx|xx|gt|lt options in [P]EXPIRE[AT] + - update how deleted items are processed in pending queues in streams + + ### v2.23.1 + - resolve $ to latest ID in XREAD (thanks @josh-hook) - handle disconnect in blocking functions (thanks @jgirtakovskis) - fix type conversion bug in redisToLua (thanks Sandy Harvie) diff --git a/vendor/github.com/alicebob/miniredis/v2/README.md b/vendor/github.com/alicebob/miniredis/v2/README.md index cf9bde444bf..24df8603c5c 100644 --- a/vendor/github.com/alicebob/miniredis/v2/README.md +++ b/vendor/github.com/alicebob/miniredis/v2/README.md @@ -320,7 +320,7 @@ Commands which will probably not be implemented: ## &c. -Integration tests are run against Redis 6.2.6. The [./integration](./integration/) subdir +Integration tests are run against Redis 7.0.7. The [./integration](./integration/) subdir compares miniredis against a real redis instance. The Redis 6 RESP3 protocol is supported. If there are problems, please open diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go index cca060a7218..1afb5cea180 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go @@ -70,8 +70,9 @@ func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) { if m.checkPubsub(c, cmd) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go index 76d6b0a8ca1..55e20897140 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go @@ -3,6 +3,7 @@ package miniredis import ( + "fmt" "sort" "strconv" "strings" @@ -44,7 +45,7 @@ func commandsGeneric(m *Miniredis) { // converted to a duration. func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, string, []string) { return func(c *server.Peer, cmd string, args []string) { - if len(args) != 2 { + if len(args) < 2 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return @@ -59,11 +60,43 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, var opts struct { key string value int + nx bool + xx bool + gt bool + lt bool } opts.key = args[0] if ok := optInt(c, args[1], &opts.value); !ok { return } + args = args[2:] + for len(args) > 0 { + switch strings.ToLower(args[0]) { + case "nx": + opts.nx = true + case "xx": + opts.xx = true + case "gt": + opts.gt = true + case "lt": + opts.lt = true + default: + setDirty(c) + c.WriteError(fmt.Sprintf("ERR Unsupported option %s", args[0])) + return + } + args = args[1:] + } + if opts.gt && opts.lt { + setDirty(c) + c.WriteError("ERR GT and LT options at the same time are not compatible") + return + } + if opts.nx && (opts.xx || opts.gt || opts.lt) { + setDirty(c) + c.WriteError("ERR NX and XX, GT or LT options at the same time are not compatible") + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -73,11 +106,38 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, c.WriteInt(0) return } + + oldTTL, ok := db.ttl[opts.key] + + var newTTL time.Duration if unix { - db.ttl[opts.key] = m.at(opts.value, d) + newTTL = m.at(opts.value, d) } else { - db.ttl[opts.key] = time.Duration(opts.value) * d + newTTL = time.Duration(opts.value) * d + } + + // > NX -- Set expiry only when the key has no expiry + if opts.nx && ok { + c.WriteInt(0) + return + } + // > XX -- Set expiry only when the key has an existing expiry + if opts.xx && !ok { + c.WriteInt(0) + return + } + // > GT -- Set expiry only when the new expiry is greater than current one + // (no exp == infinity) + if opts.gt && (!ok || newTTL <= oldTTL) { + c.WriteInt(0) + return + } + // > LT -- Set expiry only when the new expiry is less than current one + if opts.lt && ok && newTTL > oldTTL { + c.WriteInt(0) + return } + db.ttl[opts.key] = newTTL db.keyVersion[opts.key]++ db.checkTTL(opts.key) c.WriteInt(1) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_list.go b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go index efd0362b4be..0e74373b017 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_list.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go @@ -462,6 +462,11 @@ func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftri if !db.exists(opts.key) { // non-existing key is fine + if opts.withCount && !c.Resp3 { + // zero-length list in this specific case. Looks like a redis bug to me. + c.WriteLen(-1) + return + } c.WriteNull() return } @@ -481,11 +486,7 @@ func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftri } opts.count -= 1 } - if len(popped) == 0 { - c.WriteLen(-1) - } else { - c.WriteStrings(popped) - } + c.WriteStrings(popped) return } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go b/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go index 70997be5ab1..0fc9f0de355 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go @@ -29,8 +29,9 @@ func (m *Miniredis) cmdSubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -53,8 +54,9 @@ func (m *Miniredis) cmdUnsubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -103,8 +105,9 @@ func (m *Miniredis) cmdPsubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -127,8 +130,9 @@ func (m *Miniredis) cmdPunsubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -212,7 +216,9 @@ func (m *Miniredis) cmdPubSub(c *server.Peer, cmd string, args []string) { case "NUMPAT": argsOk = len(subargs) == 0 default: - argsOk = false + setDirty(c) + c.WriteError(fmt.Sprintf(msgFPubsubUsageSimple, subcommand)) + return } if !argsOk { diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go b/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go index ef10aaef0fe..9f5ce9ef2d5 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go @@ -23,7 +23,7 @@ func commandsScripting(m *Miniredis) { // Execute lua. Needs to run m.Lock()ed, from within withTx(). // Returns true if the lua was OK (and hence should be cached). -func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) bool { +func (m *Miniredis) runLuaScript(c *server.Peer, sha, script string, args []string) bool { l := lua.NewState(lua.Options{SkipOpenLibs: true}) defer l.Close() @@ -80,7 +80,7 @@ func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) b } l.SetGlobal("ARGV", argvTable) - redisFuncs, redisConstants := mkLua(m.srv, c) + redisFuncs, redisConstants := mkLua(m.srv, c, sha) // Register command handlers l.Push(l.NewFunction(func(l *lua.LState) int { mod := l.RegisterModule("redis", redisFuncs).(*lua.LTable) @@ -117,18 +117,18 @@ func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) { if m.checkPubsub(c, cmd) { return } - - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } script, args := args[0], args[1:] withTx(m, c, func(c *server.Peer, ctx *connCtx) { - ok := m.runLuaScript(c, script, args) + sha := sha1Hex(script) + ok := m.runLuaScript(c, sha, script, args) if ok { - sha := sha1Hex(script) m.scripts[sha] = script } }) @@ -146,8 +146,9 @@ func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) { if m.checkPubsub(c, cmd) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -160,7 +161,7 @@ func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) { return } - m.runLuaScript(c, script, args) + m.runLuaScript(c, sha, script, args) }) } @@ -177,28 +178,62 @@ func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } - subcmd, args := args[0], args[1:] + var opts struct { + subcmd string + script string + } - withTx(m, c, func(c *server.Peer, ctx *connCtx) { - switch strings.ToLower(subcmd) { - case "load": - if len(args) != 1 { - c.WriteError(fmt.Sprintf(msgFScriptUsage, "LOAD")) - return + opts.subcmd, args = args[0], args[1:] + + switch strings.ToLower(opts.subcmd) { + case "load": + if len(args) != 1 { + setDirty(c) + c.WriteError(fmt.Sprintf(msgFScriptUsage, "LOAD")) + return + } + opts.script = args[0] + case "exists": + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber("script|exists")) + return + } + case "flush": + if len(args) == 1 { + switch strings.ToUpper(args[0]) { + case "SYNC", "ASYNC": + args = args[1:] + default: } - script := args[0] + } + if len(args) != 0 { + setDirty(c) + c.WriteError(msgScriptFlush) + return + } - if _, err := parse.Parse(strings.NewReader(script), "user_script"); err != nil { + default: + setDirty(c) + c.WriteError(fmt.Sprintf(msgFScriptUsageSimple, strings.ToUpper(opts.subcmd))) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + switch strings.ToLower(opts.subcmd) { + case "load": + if _, err := parse.Parse(strings.NewReader(opts.script), "user_script"); err != nil { c.WriteError(errLuaParseError(err)) return } - sha := sha1Hex(script) - m.scripts[sha] = script + sha := sha1Hex(opts.script) + m.scripts[sha] = opts.script c.WriteBulk(sha) case "exists": @@ -212,23 +247,9 @@ func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) { } case "flush": - if len(args) == 1 { - switch strings.ToUpper(args[0]) { - case "SYNC", "ASYNC": - args = args[1:] - default: - } - } - if len(args) != 0 { - c.WriteError(msgScriptFlush) - return - } - m.scripts = map[string]string{} c.WriteOK() - default: - c.WriteError(fmt.Sprintf(msgFScriptUsage, strings.ToUpper(subcmd))) } }) } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go index 178fcc13e42..ab25c257c2d 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go @@ -1494,10 +1494,9 @@ func (m *Miniredis) cmdZpopmax(reverse bool) server.Cmd { var err error if len(args) > 1 { count, err = strconv.Atoi(args[1]) - - if err != nil { + if err != nil || count < 0 { setDirty(c) - c.WriteError(msgInvalidInt) + c.WriteError(msgInvalidRange) return } } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go index be7067b3784..7735a6bafd2 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go @@ -307,25 +307,25 @@ func (m *Miniredis) cmdXgroup(c *server.Peer, cmd string, args []string) { return } - subCmd, args := strings.ToUpper(args[0]), args[1:] + subCmd, args := strings.ToLower(args[0]), args[1:] switch subCmd { - case "CREATE": + case "create": m.cmdXgroupCreate(c, cmd, args) - case "DESTROY": + case "destroy": m.cmdXgroupDestroy(c, cmd, args) - case "CREATECONSUMER": + case "createconsumer": m.cmdXgroupCreateconsumer(c, cmd, args) - case "DELCONSUMER": + case "delconsumer": m.cmdXgroupDelconsumer(c, cmd, args) - case "HELP", - "SETID": + case "help", + "setid": err := fmt.Sprintf("ERR 'XGROUP %s' not supported", subCmd) setDirty(c) c.WriteError(err) default: setDirty(c) c.WriteError(fmt.Sprintf( - "ERR Unknown subcommand or wrong number of arguments for '%s'. Try XGROUP HELP.", + "ERR unknown subcommand '%s'. Try XGROUP HELP.", subCmd, )) } @@ -508,7 +508,7 @@ func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { default: setDirty(c) c.WriteError(fmt.Sprintf( - "ERR Unknown subcommand or wrong number of arguments for '%s'. Try XINFO HELP.", + "ERR unknown subcommand or wrong number of arguments for '%s'. Try XINFO HELP.", subCmd, )) } @@ -567,19 +567,20 @@ func (m *Miniredis) cmdXinfoGroups(c *server.Peer, args []string) { c.WriteLen(len(s.groups)) for name, g := range s.groups { - c.WriteMapLen(4) + c.WriteMapLen(6) c.WriteBulk("name") c.WriteBulk(name) - c.WriteBulk("consumers") c.WriteInt(len(g.consumers)) - c.WriteBulk("pending") - c.WriteInt(len(g.pending)) - + c.WriteInt(len(g.activePending())) c.WriteBulk("last-delivered-id") c.WriteBulk(g.lastID) + c.WriteBulk("entries-read") + c.WriteNull() + c.WriteBulk("lag") + c.WriteInt(len(g.stream.entries)) } }) } @@ -1132,7 +1133,8 @@ func (m *Miniredis) cmdXpending(c *server.Peer, cmd string, args []string) { } func writeXpendingSummary(c *server.Peer, g streamGroup) { - if len(g.pending) == 0 { + pend := g.activePending() + if len(pend) == 0 { c.WriteLen(4) c.WriteInt(0) c.WriteNull() @@ -1147,9 +1149,9 @@ func writeXpendingSummary(c *server.Peer, g streamGroup) { // - highest ID // - all consumers with > 0 pending items c.WriteLen(4) - c.WriteInt(len(g.pending)) - c.WriteBulk(g.pending[0].id) - c.WriteBulk(g.pending[len(g.pending)-1].id) + c.WriteInt(len(pend)) + c.WriteBulk(pend[0].id) + c.WriteBulk(pend[len(pend)-1].id) cons := map[string]int{} for id := range g.consumers { cnt := g.pendingCount(id) @@ -1481,7 +1483,7 @@ func xautoclaim( } func writeXautoclaim(c *server.Peer, nextCallId string, res []StreamEntry, justId bool) { - c.WriteLen(2) + c.WriteLen(3) c.WriteBulk(nextCallId) c.WriteLen(len(res)) for _, entry := range res { @@ -1497,6 +1499,8 @@ func writeXautoclaim(c *server.Peer, nextCallId string, res []StreamEntry, justI c.WriteBulk(v) } } + // TODO: see "Redis 7" note + c.WriteLen(0) } // XCLAIM @@ -1650,6 +1654,11 @@ func (m *Miniredis) xclaim( } pelEntry.lastDelivery = newLastDelivery + // redis7: don't report entries which are deleted by now + if _, e := group.stream.get(id); e == nil { + continue + } + claimedEntryIDs = append(claimedEntryIDs, id) } if len(claimedEntryIDs) == 0 { diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go b/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go index 9cbcaf3b3c8..94729e00411 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go @@ -30,7 +30,7 @@ func (m *Miniredis) cmdMulti(c *server.Peer, cmd string, args []string) { ctx := getCtx(c) if ctx.nested { - c.WriteError(msgNotFromScripts) + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } if inTx(ctx) { @@ -59,7 +59,7 @@ func (m *Miniredis) cmdExec(c *server.Peer, cmd string, args []string) { ctx := getCtx(c) if ctx.nested { - c.WriteError(msgNotFromScripts) + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } if !inTx(ctx) { @@ -137,7 +137,7 @@ func (m *Miniredis) cmdWatch(c *server.Peer, cmd string, args []string) { ctx := getCtx(c) if ctx.nested { - c.WriteError(msgNotFromScripts) + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } if inTx(ctx) { diff --git a/vendor/github.com/alicebob/miniredis/v2/lua.go b/vendor/github.com/alicebob/miniredis/v2/lua.go index bc86eed7d46..da705676d61 100644 --- a/vendor/github.com/alicebob/miniredis/v2/lua.go +++ b/vendor/github.com/alicebob/miniredis/v2/lua.go @@ -18,7 +18,7 @@ var luaRedisConstants = map[string]lua.LValue{ "LOG_WARNING": lua.LNumber(3), } -func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[string]lua.LValue) { +func mkLua(srv *server.Server, c *server.Peer, sha string) (map[string]lua.LGFunction, map[string]lua.LValue) { mkCall := func(failFast bool) func(l *lua.LState) int { // one server.Ctx for a single Lua run pCtx := &connCtx{} @@ -26,12 +26,13 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s pCtx.authenticated = true } pCtx.nested = true + pCtx.nestedSHA = sha pCtx.selectedDB = getCtx(c).selectedDB return func(l *lua.LState) int { top := l.GetTop() if top == 0 { - l.Error(lua.LString("Please specify at least one argument for redis.call()"), 1) + l.Error(lua.LString(fmt.Sprintf("Please specify at least one argument for this redis lib call script: %s, &c.", sha)), 1) return 0 } var args []string @@ -42,12 +43,12 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s case lua.LString: args = append(args, string(a)) default: - l.Error(lua.LString("Lua redis() command arguments must be strings or integers"), 1) + l.Error(lua.LString(fmt.Sprintf("Lua redis lib command arguments must be strings or integers script: %s, &c.", sha)), 1) return 0 } } if len(args) == 0 { - l.Error(lua.LString(msgNotFromScripts), 1) + l.Error(lua.LString(msgNotFromScripts(sha)), 1) return 0 } @@ -63,7 +64,7 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s if failFast { // call() mode if strings.Contains(err.Error(), "ERR unknown command") { - l.Error(lua.LString("Unknown Redis command called from Lua script"), 1) + l.Error(lua.LString(fmt.Sprintf("Unknown Redis command called from script script: %s, &c.", sha)), 1) } else { l.Error(lua.LString(err.Error()), 1) } @@ -112,7 +113,7 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s return 0 } res := &lua.LTable{} - res.RawSetString("err", lua.LString(msg)) + res.RawSetString("err", lua.LString("ERR "+msg)) l.Push(res) return 1 }, diff --git a/vendor/github.com/alicebob/miniredis/v2/miniredis.go b/vendor/github.com/alicebob/miniredis/v2/miniredis.go index c86e2fe8b30..4eb7cb60c8b 100644 --- a/vendor/github.com/alicebob/miniredis/v2/miniredis.go +++ b/vendor/github.com/alicebob/miniredis/v2/miniredis.go @@ -76,6 +76,7 @@ type dbKey struct { } // connCtx has all state for a single connection. +// (this struct was named before context.Context existed) type connCtx struct { selectedDB int // selected DB authenticated bool // auth enabled and a valid AUTH seen @@ -84,6 +85,7 @@ type connCtx struct { watch map[dbKey]uint // WATCHed keys subscriber *Subscriber // client is in PUBSUB mode if not nil nested bool // this is called via Lua + nestedSHA string // set to the SHA of the nesting function } // NewMiniRedis makes a new, non-started, Miniredis object. diff --git a/vendor/github.com/alicebob/miniredis/v2/redis.go b/vendor/github.com/alicebob/miniredis/v2/redis.go index 7e78ee47066..5cfca355121 100644 --- a/vendor/github.com/alicebob/miniredis/v2/redis.go +++ b/vendor/github.com/alicebob/miniredis/v2/redis.go @@ -20,6 +20,7 @@ const ( msgInvalidMinMax = "ERR min or max is not a float" msgInvalidRangeItem = "ERR min or max not valid string range item" msgInvalidTimeout = "ERR timeout is not a float or out of range" + msgInvalidRange = "ERR value is out of range, must be positive" msgSyntaxError = "ERR syntax error" msgKeyNotFound = "ERR no such key" msgOutOfRange = "ERR index out of range" @@ -31,8 +32,10 @@ const ( msgInvalidPSETEXTime = "ERR invalid expire time in psetex" msgInvalidKeysNumber = "ERR Number of keys can't be greater than number of args" msgNegativeKeysNumber = "ERR Number of keys can't be negative" - msgFScriptUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP." - msgFPubsubUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try PUBSUB HELP." + msgFScriptUsage = "ERR unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP." + msgFScriptUsageSimple = "ERR unknown subcommand '%s'. Try SCRIPT HELP." + msgFPubsubUsage = "ERR unknown subcommand or wrong number of arguments for '%s'. Try PUBSUB HELP." + msgFPubsubUsageSimple = "ERR unknown subcommand '%s'. Try PUBSUB HELP." msgScriptFlush = "ERR SCRIPT FLUSH only support SYNC|ASYNC option" msgSingleElementPair = "ERR INCR option supports a single increment-element pair" msgGTLTandNX = "ERR GT, LT, and/or NX options at the same time are not compatible" @@ -41,7 +44,6 @@ const ( msgStreamIDZero = "ERR The ID specified in XADD must be greater than 0-0" msgNoScriptFound = "NOSCRIPT No matching script. Please use EVAL." msgUnsupportedUnit = "ERR unsupported unit provided. please use m, km, ft, mi" - msgNotFromScripts = "This Redis command is not allowed from scripts" msgXreadUnbalanced = "ERR Unbalanced XREAD list of streams: for each stream key an ID or '$' must be specified." msgXgroupKeyNotFound = "ERR The XGROUP subcommand requires the key to exist. Note that for CREATE you may want to use the MKSTREAM option to create an empty stream automatically." msgXtrimInvalidStrategy = "ERR unsupported XTRIM strategy. Please use MAXLEN, MINID" @@ -70,6 +72,10 @@ func errXreadgroup(key, group string) error { return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s' in XREADGROUP with GROUP option", key, group) } +func msgNotFromScripts(sha string) string { + return fmt.Sprintf("This Redis command is not allowed from script script: %s, &c", sha) +} + // withTx wraps the non-argument-checking part of command handling code in // transaction logic. func withTx( diff --git a/vendor/github.com/alicebob/miniredis/v2/stream.go b/vendor/github.com/alicebob/miniredis/v2/stream.go index 75848d02803..f59820d55ce 100644 --- a/vendor/github.com/alicebob/miniredis/v2/stream.go +++ b/vendor/github.com/alicebob/miniredis/v2/stream.go @@ -394,6 +394,10 @@ func (g *streamGroup) ack(ids []string) (int, error) { consumer.numPendingEntries-- g.pending = append(g.pending[:pos], g.pending[pos+1:]...) + // don't count deleted entries + if _, e := g.stream.get(id); e == nil { + continue + } count++ } return count, nil @@ -426,7 +430,7 @@ func (g *streamGroup) pendingAfter(id string) []pendingEntry { func (g *streamGroup) pendingCount(consumer string) int { n := 0 - for _, p := range g.pending { + for _, p := range g.activePending() { if p.consumer == consumer { n++ } @@ -434,6 +438,20 @@ func (g *streamGroup) pendingCount(consumer string) int { return n } +// pending entries without the entries deleted from the group +func (g *streamGroup) activePending() []pendingEntry { + var pe []pendingEntry + for _, p := range g.pending { + // drop deleted ones + if _, e := g.stream.get(p.id); e == nil { + continue + } + p := p + pe = append(pe, p) + } + return pe +} + func (g *streamGroup) copy() *streamGroup { cns := map[string]*consumer{} for k, v := range g.consumers { diff --git a/vendor/modules.txt b/vendor/modules.txt index e0c197ba8c3..065c0a4bce2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -89,7 +89,7 @@ github.com/alecthomas/units # github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a ## explicit github.com/alicebob/gopher-json -# github.com/alicebob/miniredis/v2 v2.23.1 +# github.com/alicebob/miniredis/v2 v2.30.0 ## explicit; go 1.14 github.com/alicebob/miniredis/v2 github.com/alicebob/miniredis/v2/geohash From 18df1e87e27535a326c109dad63f31b2306eb4a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jan 2023 10:04:13 -0800 Subject: [PATCH 042/133] Bump golang.org/x/time from 0.1.0 to 0.3.0 (#5088) Bumps [golang.org/x/time](https://github.com/golang/time) from 0.1.0 to 0.3.0. - [Release notes](https://github.com/golang/time/releases) - [Commits](https://github.com/golang/time/compare/v0.1.0...v0.3.0) --- updated-dependencies: - dependency-name: golang.org/x/time dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- vendor/golang.org/x/time/rate/rate.go | 20 +++---- vendor/golang.org/x/time/rate/sometimes.go | 67 ++++++++++++++++++++++ vendor/modules.txt | 2 +- 5 files changed, 79 insertions(+), 16 deletions(-) create mode 100644 vendor/golang.org/x/time/rate/sometimes.go diff --git a/go.mod b/go.mod index 8348ca17dfe..ff5b6e8b933 100644 --- a/go.mod +++ b/go.mod @@ -65,7 +65,7 @@ require ( go.uber.org/atomic v1.10.0 golang.org/x/net v0.4.0 golang.org/x/sync v0.1.0 - golang.org/x/time v0.1.0 + golang.org/x/time v0.3.0 google.golang.org/grpc v1.51.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/go.sum b/go.sum index d31abc6a274..dd8d743fb64 100644 --- a/go.sum +++ b/go.sum @@ -2051,8 +2051,8 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f7c29f156a..f0e0cf3cb1d 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -83,7 +83,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, _, tokens := lim.advance(t) // does not mutute lim + _, tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -183,7 +183,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, _, tokens := r.lim.advance(t) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -304,7 +304,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, _, tokens := lim.advance(t) + t, tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -321,7 +321,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, _, tokens := lim.advance(t) + t, tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -356,7 +356,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) } } - t, last, tokens := lim.advance(t) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -379,15 +379,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) if ok { r.tokens = n r.timeToAct = t.Add(waitDuration) - } - // Update state - if ok { + // Update state lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -396,7 +392,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -409,7 +405,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, new if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 00000000000..6ba99ddb67b --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 065c0a4bce2..005578ee3d1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1102,7 +1102,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.1.0 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate # golang.org/x/tools v0.4.0 From b3e9aa01c5a09d8bc683ba55089d9da555fba0f6 Mon Sep 17 00:00:00 2001 From: Sesha Sendhil S Date: Thu, 12 Jan 2023 10:57:17 +0530 Subject: [PATCH 043/133] Support Zstd compression for gRPC (#5092) * add zstd compression to grpcclient Signed-off-by: Sesha Sendhil * fix use of deprecated ioutil pkg Signed-off-by: Sesha Sendhil * update docs Signed-off-by: Sesha Sendhil * update CHANGELOG Signed-off-by: Sesha Sendhil * mark as feature Signed-off-by: Sesha Sendhil Signed-off-by: Sesha Sendhil Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/configuration/config-file-reference.md | 10 +- go.mod | 2 +- go.sum | 4 +- pkg/util/grpcclient/grpcclient.go | 5 +- pkg/util/grpcencoding/zstd/zstd.go | 82 + .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 141 + .../github.com/klauspost/compress/README.md | 593 +++ .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 +++ .../klauspost/compress/fse/decompress.go | 374 ++ .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 233 + .../klauspost/compress/huff0/bitwriter.go | 95 + .../klauspost/compress/huff0/bytereader.go | 44 + .../klauspost/compress/huff0/compress.go | 730 +++ .../klauspost/compress/huff0/decompress.go | 1167 +++++ .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 846 ++++ .../compress/huff0/decompress_generic.go | 299 ++ .../klauspost/compress/huff0/huff0.go | 337 ++ .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 ++ .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 ++ .../compress/internal/snapref/encode_other.go | 240 + .../compress/internal/snapref/snappy.go | 98 + .../klauspost/compress/s2/README.md | 283 +- .../klauspost/compress/s2/decode.go | 6 +- .../klauspost/compress/s2/decode_other.go | 34 +- .../klauspost/compress/s2/encode_all.go | 3 +- .../klauspost/compress/s2/encode_amd64.go | 12 +- .../klauspost/compress/s2/encode_best.go | 35 +- .../klauspost/compress/s2/encode_better.go | 105 +- .../klauspost/compress/s2/encode_go.go | 9 +- .../compress/s2/encodeblock_amd64.go | 23 +- .../klauspost/compress/s2/encodeblock_amd64.s | 919 ++-- vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 ++ .../klauspost/compress/zstd/bitreader.go | 140 + .../klauspost/compress/zstd/bitwriter.go | 113 + .../klauspost/compress/zstd/blockdec.go | 721 +++ .../klauspost/compress/zstd/blockenc.go | 871 ++++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 229 + .../klauspost/compress/zstd/decoder.go | 955 ++++ .../compress/zstd/decoder_options.go | 149 + .../klauspost/compress/zstd/dict.go | 121 + .../klauspost/compress/zstd/enc_base.go | 172 + .../klauspost/compress/zstd/enc_best.go | 566 +++ .../klauspost/compress/zstd/enc_better.go | 1242 +++++ .../klauspost/compress/zstd/enc_dfast.go | 1123 +++++ .../klauspost/compress/zstd/enc_fast.go | 898 ++++ .../klauspost/compress/zstd/encoder.go | 676 +++ .../compress/zstd/encoder_options.go | 318 ++ .../klauspost/compress/zstd/framedec.go | 436 ++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 ++ .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 701 +++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 509 ++ .../klauspost/compress/zstd/seqdec_amd64.go | 379 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 4079 +++++++++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 435 ++ .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 144 + vendor/modules.txt | 11 +- 97 files changed, 26588 insertions(+), 657 deletions(-) create mode 100644 pkg/util/grpcencoding/zstd/zstd.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 397fb9d8366..9218890fb8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ * [FEATURE] Query Frontend/Scheduler: Add a new counter metric `cortex_request_queue_requests_total` for total requests going to queue. #5030 * [FEATURE] Build ARM docker images. #5041 * [FEATURE] Query-frontend/Querier: Create spans to measure time to merge promql responses. #5041 +* [FEATURE] Added zstd as an option for grpc compression #5092 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 * [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 2bd0d36cc1a..285b6fcede3 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -198,7 +198,7 @@ query_scheduler: [max_send_msg_size: | default = 16777216] # Use compression when sending messages. Supported values are: 'gzip', - # 'snappy' and '' (disable compression) + # 'snappy', 'zstd' and '' (disable compression) # CLI flag: -query-scheduler.grpc-client-config.grpc-compression [grpc_compression: | default = ""] @@ -963,7 +963,7 @@ grpc_client_config: [max_send_msg_size: | default = 16777216] # Use compression when sending messages. Supported values are: 'gzip', - # 'snappy' and '' (disable compression) + # 'snappy', 'zstd' and '' (disable compression) # CLI flag: -frontend.grpc-client-config.grpc-compression [grpc_compression: | default = ""] @@ -1126,7 +1126,7 @@ ruler_client: [max_send_msg_size: | default = 16777216] # Use compression when sending messages. Supported values are: 'gzip', - # 'snappy' and '' (disable compression) + # 'snappy', 'zstd' and '' (disable compression) # CLI flag: -ruler.client.grpc-compression [grpc_compression: | default = ""] @@ -2118,7 +2118,7 @@ grpc_client_config: [max_send_msg_size: | default = 16777216] # Use compression when sending messages. Supported values are: 'gzip', - # 'snappy' and '' (disable compression) + # 'snappy', 'zstd' and '' (disable compression) # CLI flag: -ingester.client.grpc-compression [grpc_compression: | default = ""] @@ -2225,7 +2225,7 @@ grpc_client_config: [max_send_msg_size: | default = 16777216] # Use compression when sending messages. Supported values are: 'gzip', - # 'snappy' and '' (disable compression) + # 'snappy', 'zstd' and '' (disable compression) # CLI flag: -querier.frontend-client.grpc-compression [grpc_compression: | default = ""] diff --git a/go.mod b/go.mod index ff5b6e8b933..b3808d2a748 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,7 @@ require ( github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.5.0 github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.15.14 github.com/lib/pq v1.10.7 github.com/minio/minio-go/v7 v7.0.46 github.com/mitchellh/go-wordwrap v1.0.1 @@ -150,7 +151,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/compress v1.15.9 // indirect github.com/klauspost/cpuid/v2 v2.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect diff --git a/go.sum b/go.sum index dd8d743fb64..7ec92c47305 100644 --- a/go.sum +++ b/go.sum @@ -1022,8 +1022,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= +github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= diff --git a/pkg/util/grpcclient/grpcclient.go b/pkg/util/grpcclient/grpcclient.go index 9e848ab06eb..c1cd4bf5d07 100644 --- a/pkg/util/grpcclient/grpcclient.go +++ b/pkg/util/grpcclient/grpcclient.go @@ -13,6 +13,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/backoff" "github.com/cortexproject/cortex/pkg/util/grpcencoding/snappy" + "github.com/cortexproject/cortex/pkg/util/grpcencoding/zstd" "github.com/cortexproject/cortex/pkg/util/tls" ) @@ -40,7 +41,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).") f.IntVar(&cfg.MaxSendMsgSize, prefix+".grpc-max-send-msg-size", 16<<20, "gRPC client max send message size (bytes).") - f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", "", "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)") + f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", "", "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'zstd' and '' (disable compression)") f.Float64Var(&cfg.RateLimit, prefix+".grpc-client-rate-limit", 0., "Rate limit for gRPC client; 0 means disabled.") f.IntVar(&cfg.RateLimitBurst, prefix+".grpc-client-rate-limit-burst", 0, "Rate limit burst for gRPC client.") f.BoolVar(&cfg.BackoffOnRatelimits, prefix+".backoff-on-ratelimits", false, "Enable backoff and retry when we hit ratelimits.") @@ -53,7 +54,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { func (cfg *Config) Validate(log log.Logger) error { switch cfg.GRPCCompression { - case gzip.Name, snappy.Name, "": + case gzip.Name, snappy.Name, zstd.Name, "": // valid default: return errors.Errorf("unsupported compression type: %s", cfg.GRPCCompression) diff --git a/pkg/util/grpcencoding/zstd/zstd.go b/pkg/util/grpcencoding/zstd/zstd.go new file mode 100644 index 00000000000..7a6803c7e7e --- /dev/null +++ b/pkg/util/grpcencoding/zstd/zstd.go @@ -0,0 +1,82 @@ +package zstd + +import ( + "bytes" + "io" + + "github.com/klauspost/compress/zstd" + "google.golang.org/grpc/encoding" +) + +const Name = "zstd" + +type compressor struct { + encoder *zstd.Encoder + decoder *zstd.Decoder +} + +func init() { + enc, _ := zstd.NewWriter(nil) + dec, _ := zstd.NewReader(nil) + c := &compressor{ + encoder: enc, + decoder: dec, + } + encoding.RegisterCompressor(c) +} + +// SetLevel updates the registered compressor to use a particular compression +// level. NOTE: this function must only be called from an init function, and +// is not threadsafe. +func SetLevel(level zstd.EncoderLevel) error { + c := encoding.GetCompressor(Name).(*compressor) + + enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) + if err != nil { + return err + } + + c.encoder = enc + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + return &zstdWriteCloser{ + enc: c.encoder, + writer: w, + }, nil +} + +type zstdWriteCloser struct { + enc *zstd.Encoder + writer io.Writer // Compressed data will be written here. + buf bytes.Buffer // Buffer uncompressed data here, compress on Close. +} + +func (z *zstdWriteCloser) Write(p []byte) (int, error) { + return z.buf.Write(p) +} + +func (z *zstdWriteCloser) Close() error { + compressed := z.enc.EncodeAll(z.buf.Bytes(), nil) + _, err := io.Copy(z.writer, bytes.NewReader(compressed)) + return err +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + compressed, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + uncompressed, err := c.decoder.DecodeAll(compressed, nil) + if err != nil { + return nil, err + } + + return bytes.NewReader(uncompressed), nil +} + +func (c *compressor) Name() string { + return Name +} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 00000000000..402433593c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 00000000000..d31b3781527 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 00000000000..a2bf06e94f6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,141 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + - go install mvdan.cc/garble@v0.7.2 + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 00000000000..d73fb86e4fb --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,593 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +

+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 00000000000..ea5a692d513 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000000..ea7324da671 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000000..f65eb3909cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000000..43e463611b1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000000..abade2d6052 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000000..6f341914c67 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000000..535cbadfdea --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 00000000000..aff942205f1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000000..b3d262958f8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000000..8b6e5c66383 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000000..504a7be9dae --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,233 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000000..ec71f7a349a --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,95 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000000..4dcab8d2327 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,44 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000000..d9223a91efc --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,730 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000000..42a237eac4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000000..ba7e8e6b027 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000000..8d2187a2ce6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,846 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), SI + MOVQ 48(AX), BX + MOVQ 24(AX), R9 + MOVQ 32(AX), R10 + MOVQ (AX), R11 + + // Main loop +main_loop: + MOVQ SI, R8 + CMPQ R8, BX + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ (R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 24(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 48(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 72(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 96(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 120(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 144(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 168(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x02, SI + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), SI + SHLQ $0x02, SI + MOVQ SI, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R9 + MOVQ 32(CX), R10 + MOVQ (CX), R11 + + // Main loop +main_loop: + MOVQ BX, R8 + CMPQ R8, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ (R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 24(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 48(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 72(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 96(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 120(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 144(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 168(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000000..908c17de63f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000000..e8ad17ad08e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000000..3954c51219b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000000..e802579c4f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000000..4465fbe9e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 00000000000..40796a49d65 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 00000000000..77395a6b8b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 00000000000..13c6040a5de --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 00000000000..298c4f8e97d --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,240 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 00000000000..34d01f4aa63 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md index 73c0c462de0..1d80c42a530 100644 --- a/vendor/github.com/klauspost/compress/s2/README.md +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -325,35 +325,35 @@ The content compressed in this mode is fully compatible with the standard decode Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): -| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | -|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| -| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% | -| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - | -| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% | -| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - | -| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% | -| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - | -| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% | -| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - | -| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% | -| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - | -| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% | -| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - | -| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% | -| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - | -| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% | -| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - | -| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% | -| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - | -| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% | -| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - | -| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% | -| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - | +| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | +| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | +| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | +| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | +| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | +| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | +| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | +| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | +| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | +| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | +| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | +| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | +| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | +| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | +| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | +| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | ### Legend -* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. -* `S2 throughput`: Throughput of S2 in MB/s. +* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 Throughput`: Throughput of S2 in MB/s. * `S2 % smaller`: How many percent of the Snappy output size is S2 better. * `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. * `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. @@ -361,7 +361,7 @@ Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all th There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. -Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size. +Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. @@ -404,15 +404,15 @@ The "better" compression mode will actively look for shorter matches, which is w Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: | File | S2 Throughput | S2 throughput | -|--------------------------------|--------------|---------------| -| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | -| 10gb.tar.s2 | 1.30x | 867.07 MB/s | -| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | -| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | -| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | -| enwik9.s2 | 1.67x | 681.53 MB/s | -| adresser.json.s2 | 3.41x | 4230.53 MB/s | -| silesia.tar.s2 | 1.52x | 811.58 | +|--------------------------------|---------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | Even though S2 typically compresses better than Snappy, decompression speed is always better. @@ -450,14 +450,14 @@ The most reliable is a wide dataset. For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), 53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. -| * | Input | Output | Reduction | MB/s | -|-------------------|------------|------------|-----------|--------| -| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** | -| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 | -| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 | -| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 | -| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 | -| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 | +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|------------|------------| +| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | +| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | +| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | +| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | +| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". "Better" mode provides the same compression speed as LZ4 with better compression ratio. @@ -489,42 +489,23 @@ AMD64 assembly is use for both S2 and Snappy. | Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | |-----------------------|-------------|---------|--------------|-------------|-------------|-------------| -| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s | -| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s | -| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s | -| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s | -| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s | -| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s | -| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s | -| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s | -| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s | -| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s | -| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s | -| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s | -| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s | -| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s | -| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s | - - -| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed | -|-----------------------|-------------|------------------|----------|--------------| -| html | 22.31% | 7.58% | 1.07x | 1.20x | -| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x | -| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x | -| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x | -| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x | -| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x | -| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x | -| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x | -| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x | -| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x | -| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x | -| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x | -| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x | -| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x | -| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x | -| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x | +| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | + Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. @@ -543,42 +524,23 @@ So individual benchmarks should only be seen as a guideline and the overall pict | Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | |-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| -| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s | -| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s | -| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s | -| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s | -| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s | -| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s | -| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s | -| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s | -| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s | -| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s | -| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s | -| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s | -| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s | -| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s | -| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s | - - -| Relative Perf | Snappy size | Better size | Better Speed | Better dec | -|-----------------------|-------------|-------------|--------------|------------| -| html | 22.31% | 13.18% | 0.48x | 0.98x | -| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x | -| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x | -| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x | -| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x | -| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x | -| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x | -| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x | -| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x | -| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x | -| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x | -| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x | -| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x | -| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x | -| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x | -| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x | +| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | + Except for the mostly incompressible JPEG image compression is better and usually in the double digits in terms of percentage reduction over Snappy. @@ -605,29 +567,29 @@ Some examples compared on 16 core CPU, amd64 assembly used: ``` * enwik10 -Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s -Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s -Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s +Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s +Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s +Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s * github-june-2days-2019.json -Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s -Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s -Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s +Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s * nyc-taxi-data-10M.csv -Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s -Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s -Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s +Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s * 10gb.tar -Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s -Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s -Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/ +Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s +Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ * consensus.db.10gb -Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s -Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s -Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s +Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s +Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s +Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s ``` Decompression speed should be around the same as using the 'better' compression mode. @@ -648,10 +610,10 @@ If you would like more control, you can use the s2 package as described below: Snappy compatible blocks can be generated with the S2 encoder. Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace -| Snappy | S2 replacement | -|----------------------------|-------------------------| -| snappy.Encode(...) | s2.EncodeSnappy(...) | -| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | +| Snappy | S2 replacement | +|---------------------------|-----------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | `s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. @@ -660,12 +622,12 @@ Compression and speed is typically a bit better `MaxEncodedLen` is also smaller Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), 53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: -| Encoder | Size | MB/s | Reduction | -|-----------------------|------------|------------|------------ -| snappy.Encode | 1128706759 | 725.59 | 71.89% | -| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | -| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | -| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**| +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------| +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | ## Streams @@ -835,6 +797,13 @@ This is done using the regular "Skip" function: This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. +# Compact storage + +For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from +a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). + +This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. + ## Index Format: Each block is structured as a snappy skippable block, with the chunk ID 0x99. @@ -844,20 +813,20 @@ The block can be read from the front, but contains information so it can be read Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), with un-encoded value length of 64 bits, unless other limits are specified. -| Content | Format | -|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| -| ID, `[1]byte` | Always 0x99. | -| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | -| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | -| UncompressedSize, Varint | Total Uncompressed size. | -| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | -| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | -| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | -| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | -| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | -| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | -| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | -| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | +| Content | Format | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | For regular streams the uncompressed offsets are fully predictable, so `HasUncompressedOffsets` allows to specify that compressed blocks all have @@ -929,6 +898,7 @@ To decode from any given uncompressed offset `(wantOffset)`: See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. + # Format Extensions * Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. @@ -951,10 +921,11 @@ The length is specified by reading the 3-bit length specified in the tag and dec | 7 | 65540 + read 3 bytes | This allows any repeat offset + length to be represented by 2 to 5 bytes. +It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. Lengths are stored as little endian values. -The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams. +The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. Default streaming block size is 1MB. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go index 27c0f3c2c45..00c5cc72c24 100644 --- a/vendor/github.com/klauspost/compress/s2/decode.go +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -952,7 +952,11 @@ func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { // Seek allows seeking in compressed data. func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { if r.err != nil { - return 0, r.err + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil } if offset == 0 && whence == io.SeekCurrent { return r.blockStart + int64(r.i), nil diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go index 1074ebd215e..11300c3a810 100644 --- a/vendor/github.com/klauspost/compress/s2/decode_other.go +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -28,6 +28,9 @@ func s2Decode(dst, src []byte) int { // As long as we can read at least 5 bytes... for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 switch src[s] & 0x03 { case tagLiteral: x := uint32(src[s] >> 2) @@ -38,14 +41,19 @@ func s2Decode(dst, src []byte) int { s += 2 x = uint32(src[s-1]) case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 s += 3 - x = uint32(src[s-2]) | uint32(src[s-1])<<8 case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 s += 4 - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 s += 5 - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 } length = int(x) + 1 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { @@ -62,8 +70,8 @@ func s2Decode(dst, src []byte) int { case tagCopy1: s += 2 - length = int(src[s-2]) >> 2 & 0x7 toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 if toffset == 0 { if debug { fmt.Print("(repeat) ") @@ -71,14 +79,16 @@ func s2Decode(dst, src []byte) int { // keep last offset switch length { case 5: + length = int(src[s]) + 4 s += 1 - length = int(uint32(src[s-1])) + 4 case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) s += 2 - length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) s += 3 - length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) default: // 0-> 4 } } else { @@ -86,14 +96,16 @@ func s2Decode(dst, src []byte) int { } length += 4 case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 s += 3 - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 s += 5 - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) } if offset <= 0 || d < offset || length > len(dst)-d { diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go index 8b16c38a68f..54c71d3b5d9 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_all.go +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -58,8 +58,9 @@ func encodeGo(dst, src []byte) []byte { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockGo(dst, src []byte) (d int) { // Initialize the hash table. const ( diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go index e612225f4d3..6b93daa5ae6 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -8,8 +8,9 @@ package s2 // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -43,8 +44,9 @@ func encodeBlock(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetter(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -78,8 +80,9 @@ func encodeBlockBetter(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockSnappy(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -112,8 +115,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappy(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go index 4bc80bc6a70..1b7ea394fae 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_best.go +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -15,8 +15,9 @@ import ( // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBest(dst, src []byte) (d int) { // Initialize the hash tables. const ( @@ -176,14 +177,21 @@ func encodeBlockBest(dst, src []byte) (d int) { best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) } // Search for a match at best match end, see if that is better. - if sAt := best.s + best.length; sAt < sLimit { - sBack := best.s - backL := best.length + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 1-2 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + const skipEnd = 1 + if sAt := best.s + best.length - skipEnd; sAt < sLimit { + + sBack := best.s + skipBeginning - skipEnd + backL := best.length - skipBeginning // Load initial values cv = load64(src, sBack) - // Search for mismatch + + // Grab candidates... next := lTable[hash8(load64(src, sAt), lTableBits)] - //next := sTable[hash4(load64(src, sAt), sTableBits)] if checkAt := getCur(next) - backL; checkAt > 0 { best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) @@ -191,6 +199,16 @@ func encodeBlockBest(dst, src []byte) (d int) { if checkAt := getPrev(next) - backL; checkAt > 0 { best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) } + // Disabled: Extremely small gain + if false { + next = sTable[hash4(load64(src, sAt), sTableBits)] + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } } } } @@ -288,8 +306,9 @@ emitRemainder: // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBestSnappy(dst, src []byte) (d int) { // Initialize the hash tables. const ( @@ -546,6 +565,7 @@ emitRemainder: // emitCopySize returns the size to encode the offset+length // // It assumes that: +// // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 func emitCopySize(offset, length int) int { @@ -584,6 +604,7 @@ func emitCopySize(offset, length int) int { // emitCopyNoRepeatSize returns the size to encode the offset+length // // It assumes that: +// // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 func emitCopyNoRepeatSize(offset, length int) int { diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go index 943215b8ae8..3b66ba42bf3 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_better.go +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -42,8 +42,9 @@ func hash8(u uint64, h uint8) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterGo(dst, src []byte) (d int) { // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are @@ -56,7 +57,7 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { // Initialize the hash tables. const ( // Long hash matches. - lTableBits = 16 + lTableBits = 17 maxLTableSize = 1 << lTableBits // Short hash matches. @@ -97,9 +98,26 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { lTable[hashL] = uint32(s) sTable[hashS] = uint32(s) + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + // Check repeat at offset checkRep. const checkRep = 1 - if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { base := s + checkRep // Extend back for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { @@ -109,8 +127,8 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { d += emitLiteral(dst[d:], src[nextEmit:base]) // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep for s < len(src) { if len(src)-s < 8 { if src[s] == src[candidate] { @@ -127,28 +145,40 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { s += 8 candidate += 8 } - if nextEmit > 0 { - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - } else { - // First match, cannot be repeat. - d += emitCopy(dst[d:], repeat, s-base) - } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) nextEmit = s if s >= sLimit { goto emitRemainder } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } cv = load64(src, s) continue } - if uint32(cv) == load32(src, candidateL) { + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { break } // Check our short candidate - if uint32(cv) == load32(src, candidateS) { + if uint32(cv) == uint32(valShort) { // Try a long candidate at s+1 hashL = hash7(cv>>8, lTableBits) candidateL = int(lTable[hashL]) @@ -227,21 +257,29 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { // Do we have space for more, if not bail. return 0 } - // Index match start+1 (long) and start+2 (short) + + // Index short & long index0 := base + 1 - // Index match end-2 (long) and end-1 (short) index1 := s - 2 cv0 := load64(src, index0) cv1 := load64(src, index1) - cv = load64(src, s) lTable[hash7(cv0, lTableBits)] = uint32(index0) - lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) - lTable[hash7(cv1, lTableBits)] = uint32(index1) - lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } } emitRemainder: @@ -260,8 +298,9 @@ emitRemainder: // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are @@ -402,21 +441,29 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { // Do we have space for more, if not bail. return 0 } - // Index match start+1 (long) and start+2 (short) + + // Index short & long index0 := base + 1 - // Index match end-2 (long) and end-1 (short) index1 := s - 2 cv0 := load64(src, index0) cv1 := load64(src, index1) - cv = load64(src, s) lTable[hash7(cv0, lTableBits)] = uint32(index0) - lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) - lTable[hash7(cv1, lTableBits)] = uint32(index1) - lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } } emitRemainder: diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go index 94784b82ad6..db08fc355e1 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_go.go +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -12,6 +12,7 @@ import ( // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlock(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { @@ -25,6 +26,7 @@ func encodeBlock(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetter(dst, src []byte) (d int) { return encodeBlockBetterGo(dst, src) @@ -35,6 +37,7 @@ func encodeBlockBetter(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetterSnappy(dst, src []byte) (d int) { return encodeBlockBetterSnappyGo(dst, src) @@ -45,6 +48,7 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockSnappy(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { @@ -56,6 +60,7 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 0 <= len(lit) && len(lit) <= math.MaxUint32 func emitLiteral(dst, lit []byte) int { @@ -146,6 +151,7 @@ func emitRepeat(dst []byte, offset, length int) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 @@ -214,6 +220,7 @@ func emitCopy(dst []byte, offset, length int) int { // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 @@ -273,8 +280,8 @@ func emitCopyNoRepeat(dst []byte, offset, length int) int { // matchLen returns how many bytes match in a and b // // It assumes that: -// len(a) <= len(b) // +// len(a) <= len(b) func matchLen(a []byte, b []byte) int { b = b[:len(a)] var checked int diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go index 88f27c09900..7e00bac3eae 100644 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -1,7 +1,6 @@ // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. //go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm package s2 @@ -150,8 +149,9 @@ func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes with margin of 0 bytes -// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 // //go:noescape func emitLiteral(dst []byte, lit []byte) int @@ -165,9 +165,10 @@ func emitRepeat(dst []byte, offset int, length int) int // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 // //go:noescape func emitCopy(dst []byte, offset int, length int) int @@ -175,9 +176,10 @@ func emitCopy(dst []byte, offset int, length int) int // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 // //go:noescape func emitCopyNoRepeat(dst []byte, offset int, length int) int @@ -185,7 +187,8 @@ func emitCopyNoRepeat(dst []byte, offset int, length int) int // matchLen returns how many bytes match in a and b // // It assumes that: -// len(a) <= len(b) +// +// len(a) <= len(b) // //go:noescape func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s index 36915d9495c..81a487d6def 100644 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -1,7 +1,6 @@ // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. //go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm #include "textflag.h" @@ -5743,9 +5742,9 @@ emit_literal_done_emit_remainder_encodeBlockAsm8B: // func encodeBetterBlockAsm(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm(SB), $327704-56 +TEXT ·encodeBetterBlockAsm(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -5797,27 +5796,37 @@ check_maxskip_cont_encodeBetterBlockAsm: MOVQ DI, R11 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ SI, R11 SHRQ $0x32, R11 MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 + MOVL 524312(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVL CX, 524312(SP)(R11*4) + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm + +no_short_found_encodeBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm candidateS_match_encodeBetterBlockAsm: SHRQ $0x08, DI MOVQ DI, R10 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 MOVL 24(SP)(R10*4), SI INCL CX MOVL CX, 24(SP)(R10*4) @@ -6590,52 +6599,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm: match_nolit_dst_ok_encodeBetterBlockAsm: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 524312(SP)(R11*4) + MOVL R14, 524312(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x2f, R8 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm emit_remainder_encodeBetterBlockAsm: MOVQ src_len+32(FP), CX @@ -6815,9 +6821,9 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm: // func encodeBetterBlockAsm4MB(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm4MB(SB), $327704-56 +TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -6869,27 +6875,37 @@ check_maxskip_cont_encodeBetterBlockAsm4MB: MOVQ DI, R11 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ SI, R11 SHRQ $0x32, R11 MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 + MOVL 524312(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVL CX, 524312(SP)(R11*4) + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm4MB - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm4MB - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm4MB + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm4MB + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm4MB + +no_short_found_encodeBetterBlockAsm4MB: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm4MB candidateS_match_encodeBetterBlockAsm4MB: SHRQ $0x08, DI MOVQ DI, R10 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 MOVL 24(SP)(R10*4), SI INCL CX MOVL CX, 24(SP)(R10*4) @@ -7600,52 +7616,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: match_nolit_dst_ok_encodeBetterBlockAsm4MB: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 524312(SP)(R11*4) + MOVL R14, 524312(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm4MB: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm4MB + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x2f, R8 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm4MB + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm4MB emit_remainder_encodeBetterBlockAsm4MB: MOVQ src_len+32(FP), CX @@ -7871,12 +7884,22 @@ search_loop_encodeBetterBlockAsm12B: MOVL 65560(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 65560(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm12B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm12B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm12B + +no_short_found_encodeBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm12B candidateS_match_encodeBetterBlockAsm12B: SHRQ $0x08, DI @@ -8447,52 +8470,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm12B: match_nolit_dst_ok_encodeBetterBlockAsm12B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x34, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x34, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 65560(SP)(R11*4) - MOVL R15, 65560(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 65560(SP)(R11*4) + MOVL R14, 65560(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm12B: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm12B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x32, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 65560(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm12B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm12B emit_remainder_encodeBetterBlockAsm12B: MOVQ src_len+32(FP), CX @@ -8707,12 +8727,22 @@ search_loop_encodeBetterBlockAsm10B: MOVL 16408(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 16408(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm10B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm10B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm10B + +no_short_found_encodeBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm10B candidateS_match_encodeBetterBlockAsm10B: SHRQ $0x08, DI @@ -9283,52 +9313,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm10B: match_nolit_dst_ok_encodeBetterBlockAsm10B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x36, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x36, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 16408(SP)(R11*4) - MOVL R15, 16408(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 16408(SP)(R11*4) + MOVL R14, 16408(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm10B: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm10B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x34, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 16408(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm10B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm10B emit_remainder_encodeBetterBlockAsm10B: MOVQ src_len+32(FP), CX @@ -9543,12 +9570,22 @@ search_loop_encodeBetterBlockAsm8B: MOVL 4120(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 4120(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeBetterBlockAsm8B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm8B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm8B + +no_short_found_encodeBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm8B candidateS_match_encodeBetterBlockAsm8B: SHRQ $0x08, DI @@ -10105,52 +10142,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm8B: match_nolit_dst_ok_encodeBetterBlockAsm8B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x38, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x38, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 4120(SP)(R11*4) - MOVL R15, 4120(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 4120(SP)(R11*4) + MOVL R14, 4120(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm8B: + CMPQ DI, R9 + JAE search_loop_encodeBetterBlockAsm8B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x36, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 4120(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm8B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeBetterBlockAsm8B emit_remainder_encodeBetterBlockAsm8B: MOVQ src_len+32(FP), CX @@ -14287,9 +14321,9 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: // func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm(SB), $327704-56 +TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -14341,27 +14375,37 @@ check_maxskip_cont_encodeSnappyBetterBlockAsm: MOVQ DI, R11 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ SI, R11 SHRQ $0x32, R11 MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 + MOVL 524312(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVL CX, 524312(SP)(R11*4) + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm + +no_short_found_encodeSnappyBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm candidateS_match_encodeSnappyBetterBlockAsm: SHRQ $0x08, DI MOVQ DI, R10 SHLQ $0x08, R10 IMULQ R9, R10 - SHRQ $0x30, R10 + SHRQ $0x2f, R10 MOVL 24(SP)(R10*4), SI INCL CX MOVL CX, 24(SP)(R10*4) @@ -14685,52 +14729,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: match_nolit_dst_ok_encodeSnappyBetterBlockAsm: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 524312(SP)(R11*4) + MOVL R14, 524312(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x2f, R8 SHLQ $0x08, R10 IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + SHRQ $0x2f, R10 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm emit_remainder_encodeSnappyBetterBlockAsm: MOVQ src_len+32(FP), CX @@ -14964,12 +15005,22 @@ search_loop_encodeSnappyBetterBlockAsm64K: MOVL 262168(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm64K - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm64K - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm64K + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm64K + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm64K + +no_short_found_encodeSnappyBetterBlockAsm64K: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm64K candidateS_match_encodeSnappyBetterBlockAsm64K: SHRQ $0x08, DI @@ -15248,52 +15299,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: MOVQ $0x00cf1bbcdcbfa563, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x08, R10 IMULQ SI, R10 SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x32, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x32, R12 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x30, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 262168(SP)(R11*4) + MOVL R14, 262168(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm64K: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x08, R8 + IMULQ SI, R8 + SHRQ $0x30, R8 SHLQ $0x08, R10 IMULQ SI, R10 SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm64K + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm64K emit_remainder_encodeSnappyBetterBlockAsm64K: MOVQ src_len+32(FP), CX @@ -15508,12 +15556,22 @@ search_loop_encodeSnappyBetterBlockAsm12B: MOVL 65560(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 65560(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm12B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm12B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm12B + +no_short_found_encodeSnappyBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm12B candidateS_match_encodeSnappyBetterBlockAsm12B: SHRQ $0x08, DI @@ -15792,52 +15850,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x34, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x34, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 65560(SP)(R11*4) - MOVL R15, 65560(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 65560(SP)(R11*4) + MOVL R14, 65560(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm12B: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x32, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 65560(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm12B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm12B emit_remainder_encodeSnappyBetterBlockAsm12B: MOVQ src_len+32(FP), CX @@ -16052,12 +16107,22 @@ search_loop_encodeSnappyBetterBlockAsm10B: MOVL 16408(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 16408(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm10B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm10B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm10B + +no_short_found_encodeSnappyBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm10B candidateS_match_encodeSnappyBetterBlockAsm10B: SHRQ $0x08, DI @@ -16336,52 +16401,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x36, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x36, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 16408(SP)(R11*4) - MOVL R15, 16408(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 16408(SP)(R11*4) + MOVL R14, 16408(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm10B: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x34, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 16408(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm10B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm10B emit_remainder_encodeSnappyBetterBlockAsm10B: MOVQ src_len+32(FP), CX @@ -16596,12 +16658,22 @@ search_loop_encodeSnappyBetterBlockAsm8B: MOVL 4120(SP)(R11*4), R8 MOVL CX, 24(SP)(R10*4) MOVL CX, 4120(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + CMPQ R10, DI JEQ candidate_match_encodeSnappyBetterBlockAsm8B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm8B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm8B + +no_short_found_encodeSnappyBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm8B candidateS_match_encodeSnappyBetterBlockAsm8B: SHRQ $0x08, DI @@ -16878,52 +16950,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: MOVQ $0x0000cf1bbcdcbf9b, SI MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 + LEAQ 1(DI), DI + LEAQ -2(CX), R9 + MOVQ (DX)(DI*1), R10 + MOVQ 1(DX)(DI*1), R11 + MOVQ (DX)(R9*1), R12 + MOVQ 1(DX)(R9*1), R13 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 SHLQ $0x20, R11 IMULQ R8, R11 SHRQ $0x38, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x38, R12 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 4120(SP)(R11*4) - MOVL R15, 4120(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI + MOVL R9, 24(SP)(R12*4) + MOVL R8, 4120(SP)(R11*4) + MOVL R14, 4120(SP)(R13*4) + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm8B: + CMPQ DI, R9 + JAE search_loop_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(DI*1), R8 + MOVQ (DX)(R9*1), R10 + SHLQ $0x10, R8 + IMULQ SI, R8 + SHRQ $0x36, R8 SHLQ $0x10, R10 IMULQ SI, R10 SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 + MOVL DI, 24(SP)(R8*4) MOVL R9, 24(SP)(R10*4) - MOVL DI, 4120(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm8B + ADDQ $0x02, DI + SUBQ $0x02, R9 + JMP index_loop_encodeSnappyBetterBlockAsm8B emit_remainder_encodeSnappyBetterBlockAsm8B: MOVQ src_len+32(FP), CX diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 00000000000..2263853fcad --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000000..65b38abed80 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000000..97299d499cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,140 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000000..78b3c61be3e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,113 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000000..6b9929ddf5e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,721 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000000..12e8f6f0b61 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,871 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000000..01a01e486e1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000000..176788f2597 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000000..0e59a242d8d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000000..f6a240970d4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,229 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + *h = Header{} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000000..30459cd3fb8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,955 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(&dict) + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000000..f42448e69c9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,149 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000000..b2725f77b5b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,121 @@ +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000000..bfb2e146c33 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,172 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000000..830f5ba74a2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,566 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 + _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + bestOf := func(a, b *match) *match { + if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + + m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) + best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) + + if canRepeat && best.length < goodEnough { + cv32 := uint32(cv >> 8) + spp := s + 1 + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) + if best.length > 0 { + cv32 = uint32(cv >> 24) + spp += 2 + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + // Long at s+1, s+2 + m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) + m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) + best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) + if false { + // Short at s+3. + // Too often worse... + m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) + best = bestOf(best, &m) + } + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + const skipBeginning = 2 + if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd := bestOf(best, &m) + if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd = bestOf(bestEnd, &m) + } + best = bestEnd + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000000..8582f31a7cc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1242 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000000..7d425109adb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000000..315b1a8f2f6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,898 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000000..65c6c36dc13 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,676 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000000..6015f498afa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,318 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000000..65984bf07ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + default: + return err + case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + if d.o.ignoreChecksum { + return nil + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + } + + return nil +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000000..4ef7f5a3e3d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000000..2f8860a722b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 00000000000..d04a829b0a0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 00000000000..bcde3986953 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 00000000000..332e51fe44f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000000..ab26326a8ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000000..474cb77d2b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000000..5d73c21ebdd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000000..09164856d22 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000000..24b53065f40 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000000..777290d44ce --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000000..fc40c820016 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000000..ddb63aa91b1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 00000000000..17901e08040 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 00000000000..d4221edf4fd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000000..0be16cefc7f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000000..6f3b0cb1026 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000000..f833d1541f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,509 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + if size-startSize == 424242 { + panic("here") + } + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000000..191384adfd0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,379 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000000..b94993a0727 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4079 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000000..ac2a80d2911 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000000..8014174a771 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000000..9e1baad73be --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,435 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 00000000000..29c15c8c4ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000000..b1886f7c742 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,144 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/modules.txt b/vendor/modules.txt index 005578ee3d1..0c814832999 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -504,9 +504,16 @@ github.com/json-iterator/go # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.15.9 -## explicit; go 1.16 +# github.com/klauspost/compress v1.15.14 +## explicit; go 1.17 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/s2 +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/cpuid/v2 v2.1.0 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 From f71c3494c4e20618804393585367275c5a01c2a9 Mon Sep 17 00:00:00 2001 From: Kama Huang <121007071+kama910@users.noreply.github.com> Date: Thu, 12 Jan 2023 10:09:49 -0800 Subject: [PATCH 044/133] remove go.mod replacement for weaveworks/common (#5094) Signed-off-by: Kama Huang Signed-off-by: Kama Huang Signed-off-by: Alex Le --- go.mod | 4 - go.sum | 68 ++- .../golang/protobuf/jsonpb/decode.go | 524 ++++++++++++++++ .../golang/protobuf/jsonpb/encode.go | 559 ++++++++++++++++++ .../github.com/golang/protobuf/jsonpb/json.go | 69 +++ .../grpc/attributes/attributes.go | 2 +- vendor/google.golang.org/grpc/backoff.go | 2 +- .../grpc/balancer/balancer.go | 73 +-- .../grpc/balancer/base/balancer.go | 8 +- .../grpc/balancer/conn_state_evaluator.go | 74 +++ .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 + .../grpc_lb_v1/load_balancer_grpc.pb.go | 19 + .../grpc/balancer/grpclb/grpclb.go | 24 +- .../balancer/grpclb/grpclb_remote_balancer.go | 5 +- .../grpc/balancer/roundrobin/roundrobin.go | 16 +- .../grpc/balancer_conn_wrappers.go | 389 ++++++++---- .../grpc_binarylog_v1/binarylog.pb.go | 13 +- .../grpc/channelz/channelz.go | 36 ++ vendor/google.golang.org/grpc/clientconn.go | 484 +++++++-------- .../alts/internal/handshaker/handshaker.go | 4 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 + .../proto/grpc_gcp/handshaker_grpc.pb.go | 17 + .../grpc/credentials/credentials.go | 20 +- .../grpc/credentials/google/xds.go | 55 +- .../google.golang.org/grpc/credentials/tls.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 101 ++-- .../grpc/encoding/encoding.go | 7 +- .../grpc/encoding/gzip/gzip.go | 2 +- .../grpc/grpclog/loggerv2.go | 2 +- .../health/grpc_health_v1/health_grpc.pb.go | 17 + .../balancer/gracefulswitch/gracefulswitch.go | 384 ++++++++++++ .../grpc/internal/binarylog/binarylog.go | 107 ++-- .../grpc/internal/binarylog/env_config.go | 26 +- .../grpc/internal/binarylog/method_logger.go | 29 +- .../grpc/internal/channelz/funcs.go | 159 +++-- .../grpc/internal/channelz/id.go | 75 +++ .../grpc/internal/channelz/logging.go | 91 ++- .../grpc/internal/channelz/types.go | 39 +- .../grpc/internal/envconfig/envconfig.go | 8 +- .../grpc/internal/envconfig/observability.go | 36 ++ .../grpc/internal/envconfig/xds.go | 8 +- .../grpc/internal/googlecloud/googlecloud.go | 76 +-- .../grpc/internal/googlecloud/manufacturer.go | 26 + .../googlecloud/manufacturer_linux.go | 27 + .../googlecloud/manufacturer_windows.go | 50 ++ .../grpc/internal/grpclog/grpclog.go | 2 +- .../grpc/internal/grpcrand/grpcrand.go | 7 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcutil/compressor.go | 47 ++ .../grpc/internal/grpcutil/method.go | 6 +- .../grpc/internal/internal.go | 70 +++ .../grpc/internal/metadata/metadata.go | 46 ++ .../grpc/internal/pretty/pretty.go | 82 +++ .../grpc/internal/resolver/unix/unix.go | 5 +- .../internal/serviceconfig/serviceconfig.go | 8 +- .../grpc/internal/status/status.go | 10 + .../grpc/internal/transport/controlbuf.go | 10 +- .../grpc/internal/transport/handler_server.go | 30 +- .../grpc/internal/transport/http2_client.go | 296 ++++++---- .../grpc/internal/transport/http2_server.go | 153 ++--- .../grpc/internal/transport/http_util.go | 34 +- .../grpc/internal/transport/transport.go | 25 +- .../grpc/metadata/metadata.go | 69 ++- .../google.golang.org/grpc/picker_wrapper.go | 15 +- vendor/google.golang.org/grpc/pickfirst.go | 126 ++-- vendor/google.golang.org/grpc/preloader.go | 2 +- vendor/google.golang.org/grpc/regenerate.sh | 7 +- vendor/google.golang.org/grpc/resolver/map.go | 55 +- .../grpc/resolver/resolver.go | 22 +- .../grpc/resolver_conn_wrapper.go | 23 +- vendor/google.golang.org/grpc/rpc_util.go | 39 +- vendor/google.golang.org/grpc/server.go | 314 ++++++---- .../google.golang.org/grpc/service_config.go | 10 +- .../grpc/serviceconfig/serviceconfig.go | 2 +- .../google.golang.org/grpc/status/status.go | 12 +- vendor/google.golang.org/grpc/stream.go | 432 ++++++++------ vendor/google.golang.org/grpc/tap/tap.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 7 +- vendor/modules.txt | 9 +- 80 files changed, 4289 insertions(+), 1463 deletions(-) create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go diff --git a/go.mod b/go.mod index b3808d2a748..834f5745e8d 100644 --- a/go.mod +++ b/go.mod @@ -229,10 +229,6 @@ replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0- // Replace memberlist with Grafana's fork which includes some fixes that haven't been merged upstream yet replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b -// Replace grpc as weaveworks common needs to update this first -// vendor/github.com/weaveworks/common/httpgrpc/server/server.go:137:3: undefined: grpc.WithBalancerName -replace google.golang.org/grpc => google.golang.org/grpc v1.45.0 - // Same version being used by thanos replace github.com/vimeo/galaxycache => github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e diff --git a/go.sum b/go.sum index 7ec92c47305..acdddf1c27c 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -311,8 +312,13 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -512,8 +518,17 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= @@ -700,6 +715,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -1064,6 +1080,7 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1723,6 +1740,7 @@ golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+o golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1752,6 +1770,7 @@ golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1876,6 +1895,7 @@ golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2055,9 +2075,11 @@ golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2198,6 +2220,7 @@ google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3p google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2303,8 +2326,50 @@ google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljW google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2375,6 +2440,7 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 00000000000..60e82caa9a2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 00000000000..685c80a62bc --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 00000000000..480e2448de6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index ae13ddac14e..02f5dc53189 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,7 +19,7 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 542594f5cc5..29475e31c97 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451c9..392b21fb2d8 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -109,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -192,7 +198,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. @@ -243,7 +249,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -371,55 +377,20 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transient failure state. - numIdle uint64 // Number of addrConns in idle state. +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) } -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else there are no subconns and the aggregated state is Transient Failure -// -// Shutdown is not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - case connectivity.Idle: - cse.numIdle += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - if cse.numTransientFailure > 0 { - return connectivity.TransientFailure - } - if cse.numIdle > 0 { - return connectivity.Idle - } - return connectivity.TransientFailure +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a67074a3ad0..3929c26d31e 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } @@ -153,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 00000000000..c3341358109 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index c393d7ffd3b..bf4c3cb4449 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -52,6 +52,7 @@ type LoadBalanceRequest struct { unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceRequestType: + // // *LoadBalanceRequest_InitialRequest // *LoadBalanceRequest_ClientStats LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` @@ -340,6 +341,7 @@ type LoadBalanceResponse struct { unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceResponseType: + // // *LoadBalanceResponse_InitialResponse // *LoadBalanceResponse_ServerList // *LoadBalanceResponse_FallbackResponse diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index cb4b3c203c5..cf1034830d5 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,3 +1,22 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 6c3402e36c6..dd15810d0ae 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -19,7 +19,8 @@ // Package grpclb defines a grpclb balancer. // // To install grpclb balancer, import this package as: -// import _ "google.golang.org/grpc/balancer/grpclb" +// +// import _ "google.golang.org/grpc/balancer/grpclb" package grpclb import ( @@ -229,8 +230,9 @@ type lbBalancer struct { // regeneratePicker takes a snapshot of the balancer, and generates a picker from // it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { @@ -290,14 +292,14 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // fallback and grpclb). lb.scState contains states for all SubConns, including // those in cache (SubConns are cached for 10 seconds after remove). // -// The aggregated state is: -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; -// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, -// they start to connect immediately. But there's a race between the overall -// state is reported, and when the new SubConn state arrives. And SubConns -// never go back to IDLE. -// - Else the aggregated state is TransientFailure. +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; +// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, +// they start to connect immediately. But there's a race between the overall +// state is reported, and when the new SubConn state arrives. And SubConns +// never go back to IDLE. +// - Else the aggregated state is TransientFailure. func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { var numConnecting uint64 diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index 805bbbb789a..dab1959418e 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -240,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 274eb2f8580..f7031ad2251 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea6174682..0359956d36f 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,131 +19,182 @@ package grpc import ( + "context" "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event + cc *ClientConn + + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +212,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +342,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -235,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { @@ -290,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { defer acbw.mu.Unlock() return acbw.ac } + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf3..64a232f2811 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -261,6 +261,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +695,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 00000000000..32b7fa5794e --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f9af7891371..422639c79db 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + for _, opt := range opts { opt.apply(&cc.dopts) } @@ -159,23 +163,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -281,7 +282,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -289,7 +290,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -398,7 +399,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -464,34 +465,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -500,7 +503,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -519,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -531,19 +534,12 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -623,9 +619,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +647,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +658,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,56 +672,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -764,23 +712,26 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -810,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -853,21 +804,36 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() @@ -879,6 +845,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -959,14 +929,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -991,35 +957,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1041,7 +998,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1070,11 +1027,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1085,22 +1042,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1087,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1271,38 +1228,33 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // address was not successfully connected, or updates ac appropriately with the // new transport. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu - onClose := func() { + onClose := grpcsync.OnceFunc(func() { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. return } hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - } - + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + }) onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) @@ -1312,71 +1264,44 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) - return err - } + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error. - select { - case <-prefaceReceived.Done(): - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1497,19 +1422,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1628,7 +1552,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget return rb, nil @@ -1649,9 +1573,9 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget return rb, nil diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 8bc7ceee0af..7b953a520e5 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -158,7 +158,7 @@ type altsHandshaker struct { // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } @@ -174,7 +174,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 40570e9bf2d..383c5fb97a7 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -216,6 +216,7 @@ type Identity struct { unknownFields protoimpl.UnknownFields // Types that are assignable to IdentityOneof: + // // *Identity_ServiceAccount // *Identity_Hostname IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` @@ -664,6 +665,7 @@ type HandshakerReq struct { unknownFields protoimpl.UnknownFields // Types that are assignable to ReqOneof: + // // *HandshakerReq_ClientStart // *HandshakerReq_ServerStart // *HandshakerReq_Next diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index fd55176b9b6..d3562c6d5e6 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 96ff1877e75..5feac3aa0e4 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index b8c2e8f9204..2c5c8b9eee1 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -21,6 +21,7 @@ package google import ( "context" "net" + "net/url" "strings" "google.golang.org/grpc/credentials" @@ -28,13 +29,18 @@ import ( ) const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name has prefix "google_cfe_", use TLS +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS // - otherwise, use ALTS +// // - else, do TLS // // On the server, ServerHandshake always does TLS. @@ -50,18 +56,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust } } -func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { chi := credentials.ClientHandshakeInfoFromContext(ctx) if chi.Attributes == nil { - return c.tls.ClientHandshake(ctx, authority, rawConn) + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false } - cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { - return c.tls.ClientHandshake(ctx, authority, rawConn) + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) } - // If attributes have cluster name, and cluster name is not cfe, it's a - // backend address, use ALTS. - return c.alts.ClientHandshake(ctx, authority, rawConn) + return c.tls.ClientHandshake(ctx, authority, rawConn) } func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 784822d0560..ce2bbc10a14 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index c4bf09f9e94..9372dc322e8 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,32 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + extraDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,19 +55,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -73,10 +82,12 @@ type DialOption interface { apply(*dialOptions) } +var extraDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -195,25 +206,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -286,7 +278,7 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -304,8 +296,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -315,7 +307,7 @@ func WithInsecure() DialOption { // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -346,7 +338,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -402,7 +394,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -414,7 +420,7 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -494,11 +500,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -539,9 +545,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently enabled by default, but may be disabled by -// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -559,7 +562,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -606,7 +609,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7d0..711763d54fb 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,7 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) } // GetCompressor returns Compressor for the given compressor name. @@ -108,7 +111,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go index ce2f15ed288..ca820bd47d4 100644 --- a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -19,7 +19,7 @@ // Package gzip implements and registers the gzip compressor // during the initialization. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 7c1f6640903..b5560b47ec4 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -242,7 +242,7 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 69f525d1bae..a332dfd7b54 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 00000000000..08666f62a7c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb21..809d73ccafb 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,35 +31,42 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +107,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602fd..f9e80e27ab6 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb4183150..179f4a26d13 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,13 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +63,9 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +76,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +94,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +132,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index ea660a147cf..777cbcd7921 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -25,6 +25,7 @@ package channelz import ( "context" + "errors" "fmt" "sort" "sync" @@ -184,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -240,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 00000000000..c9a27acd371 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c886..8e13a3d2ce7 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154bd..7b2f350e2e6 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6f027254311..7edd196bd3d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,15 @@ import ( ) const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" ) var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 00000000000..821dd0a7c19 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 7d996e51b5c..af09711a3e8 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -77,16 +77,16 @@ var ( // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". - XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index d6c9e03fc4c..6717b757f80 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go new file mode 100644 index 00000000000..ffa0f1ddee5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -0,0 +1,26 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 00000000000..e53b8ffc837 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "io/ioutil" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 00000000000..2d7aaaaa70f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 30a3b4258fc..b68e26a3649 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,7 +110,7 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 740f83c2b76..517ea70642a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 00000000000..6635f7bca96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 00000000000..9f409096798 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index 4e7475060c1..ec62b4775e5 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") @@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 20fb880f344..fd0ee3dcaf1 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -63,6 +63,70 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearGlobalDialOptions func() + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -85,3 +149,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf340..b2980f8ac44 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 00000000000..0177af4b511 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 20852e59df2..7f1a702cacb 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv } addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index badbdbf597f..51e733e495a 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513edd1..b0ead4f54f8 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252df0..409769f48fd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } @@ -880,9 +886,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b4c..fb272235d81 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -49,7 +49,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } @@ -138,7 +138,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler } func (ht *serverHandlerTransport) Close() { @@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, @@ -442,10 +442,10 @@ func (ht *serverHandlerTransport) Drain() { // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d33710..d518b07e16f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,8 +38,10 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -78,6 +80,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -90,7 +93,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -98,17 +101,15 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -132,7 +133,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -192,7 +193,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -214,12 +215,35 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if connectCtx.Err() != nil { + // connectCtx expired before exiting the function. Hard close the connection. + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -251,15 +275,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -297,6 +313,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -311,19 +328,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -341,38 +359,50 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -392,14 +422,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -466,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -502,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -584,7 +625,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -613,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -629,18 +681,17 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -649,11 +700,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -685,7 +736,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea cleanup(err) return err } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -719,6 +769,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.nextID += 2 s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -744,22 +801,17 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -767,29 +819,32 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } @@ -872,19 +927,15 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. t.onClose() t.state = closing streams := t.activeStreams @@ -898,9 +949,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -917,11 +966,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -1001,13 +1050,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1213,7 +1262,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. @@ -1226,18 +1275,29 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } @@ -1433,7 +1493,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { if isHeader { inHeader := &stats.InHeader{ Client: true, @@ -1441,14 +1501,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } @@ -1465,33 +1525,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e59c..3dd15647bc8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -52,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -82,7 +82,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -117,7 +117,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -252,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -260,6 +265,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -267,20 +275,20 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -443,6 +451,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) return false } @@ -479,14 +488,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -516,14 +518,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -544,6 +548,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -561,8 +566,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, @@ -571,7 +576,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -925,12 +930,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -939,10 +959,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -973,14 +991,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -990,17 +1008,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -1029,7 +1049,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -1041,10 +1061,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1056,23 +1076,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1082,12 +1091,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1210,25 +1214,19 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1250,6 +1248,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1269,6 +1272,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1404,6 +1412,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf69..2c601a864d9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,7 +20,6 @@ package transport import ( "bufio" - "bytes" "encoding/base64" "fmt" "io" @@ -45,14 +44,8 @@ import ( const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -257,13 +250,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -272,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -297,23 +290,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { @@ -322,8 +315,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -360,9 +351,6 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 0c43efaa649..2e615ee20cc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -42,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -365,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -522,14 +533,14 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -552,8 +563,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -563,7 +574,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -572,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819fd..fb4a88f59bd 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,16 +41,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,7 +76,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -182,17 +184,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } return out, true } +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD @@ -220,13 +256,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index e8367cb8993..a5d5516ee06 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -129,9 +130,13 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -175,3 +180,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b078..fb7a99e0a27 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad91..cd45547854f 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 978b89f37a4..99db79fafcf 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -68,7 +68,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -80,8 +79,7 @@ SOURCES=( # Note that the protos listed here are all for testing purposes. All protos to # be used externally should have a go_package option (and they don't need to be # listed here). -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ @@ -121,9 +119,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config - # grpc/testing does not have a go_package option. mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index e87ecd0eeb3..efcb7f3efd8 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int { // Get returns the value for the address in the map, if present. func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { // Set updates or adds the value to the address in the map. func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e28b6802606..967cbc7373a 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -95,7 +96,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -230,12 +236,12 @@ type ClientConn interface { // // Examples: // -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // Deprecated: use URL.Scheme instead. Scheme string diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f07..05a9d4e0bac 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 5d407b004b0..934fc1aa015 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -328,7 +328,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +351,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +369,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +379,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +416,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +444,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +455,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +480,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +497,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +508,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +548,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index b24b6d53958..f4dde72b41f 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -73,6 +73,14 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + extraServerOptions = append(extraServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + extraServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -134,7 +142,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -149,8 +157,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -174,6 +183,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -183,7 +193,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -207,6 +217,22 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. // The default value for this buffer is 32KB. @@ -298,7 +324,7 @@ func CustomCodec(codec Codec) ServerOption { // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. // Will be supported throughout 1.x. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -419,7 +445,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -435,7 +461,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -462,7 +502,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -483,7 +523,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -498,7 +538,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -560,6 +600,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,9 +627,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +754,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +766,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +800,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +809,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -866,7 +910,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -887,7 +931,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -945,24 +989,24 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1075,8 +1119,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1123,13 +1169,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1160,7 +1206,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1180,9 +1226,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1202,7 +1255,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1242,7 +1297,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1259,7 +1314,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1268,10 +1323,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Length: len(d), }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1295,18 +1353,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return appErr } @@ -1332,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1363,11 +1435,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return err } @@ -1417,16 +1492,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1438,10 +1515,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1455,7 +1532,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1463,7 +1540,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1476,8 +1555,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1496,7 +1582,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1562,11 +1650,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } // TODO: Should we log an error from WriteStatus here and below? return appErr @@ -1577,11 +1668,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } return err } @@ -1657,7 +1751,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1672,7 +1766,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1687,7 +1781,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1709,11 +1803,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1751,11 +1841,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1808,12 +1894,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1825,8 +1925,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1840,6 +1946,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 6926a06dc52..01bbb2025ae 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -57,10 +57,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -381,6 +380,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f926613..35e7a20a04b 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 6d163b6e384..623be39f26b 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8cdd652e037..960c3e33dfd 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,8 +36,10 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -139,13 +141,13 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. @@ -166,6 +168,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -189,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -295,20 +309,35 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -322,7 +351,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } if desc != unaryStreamDesc { @@ -343,14 +374,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -379,27 +416,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() - - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } - if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. @@ -407,16 +423,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -425,12 +457,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -456,7 +497,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -506,8 +547,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -527,41 +573,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -573,14 +599,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -597,10 +623,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -645,19 +671,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -667,7 +698,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -681,6 +715,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } a := cs.attempt cs.mu.Unlock() err := op(a) @@ -697,7 +743,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -706,17 +752,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -725,10 +779,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -746,10 +802,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -797,47 +852,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cm) + } } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -850,7 +906,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } } return err @@ -871,10 +929,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(chc) + } } // We never returned an error here for reasons. return nil @@ -907,10 +968,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -943,8 +1007,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -954,7 +1018,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -982,6 +1046,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -991,8 +1056,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1051,7 +1116,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1059,7 +1124,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1364,8 +1429,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1426,9 +1493,9 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1448,17 +1515,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } return err } @@ -1467,6 +1546,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1512,20 +1594,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1559,13 +1649,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(chc) + } } return err } @@ -1574,20 +1667,25 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(cm) + } } return nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb5f..bfa5dfa40e4 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5bd4f534c1d..2198e7098d7 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.45.0" +const Version = "1.51.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index ceb436c6ce4..bd8e0cdb33c 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -67,7 +67,9 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +83,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" @@ -147,7 +149,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer diff --git a/vendor/modules.txt b/vendor/modules.txt index 0c814832999..e9f7f7f79ed 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -392,6 +392,7 @@ github.com/golang/groupcache/lru github.com/golang/groupcache/singleflight # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -1175,8 +1176,8 @@ google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.51.0 => google.golang.org/grpc v1.45.0 -## explicit; go 1.14 +# google.golang.org/grpc v1.51.0 +## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1187,6 +1188,7 @@ google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -1207,6 +1209,7 @@ google.golang.org/grpc/grpclog google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -1219,6 +1222,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -1295,7 +1299,6 @@ sigs.k8s.io/yaml # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b -# google.golang.org/grpc => google.golang.org/grpc v1.45.0 # github.com/vimeo/galaxycache => github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e # github.com/go-openapi/spec => github.com/go-openapi/spec v0.20.6 # github.com/ionos-cloud/sdk-go/v6 => github.com/ionos-cloud/sdk-go/v6 v6.0.4 From b56b1481f09612f7cf9a9ffe9b424d096c222a8f Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 12 Jan 2023 13:44:31 -0800 Subject: [PATCH 045/133] Add support for new thanos promql engine (#5093) * add support for new thanos promql engine Signed-off-by: Ben Ye * add some e2e tests Signed-off-by: Ben Ye * update changelog Signed-off-by: Ben Ye * add more tests in ut Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/blocks-storage/querier.md | 6 + docs/configuration/config-file-reference.md | 6 + go.mod | 2 + go.sum | 4 + integration/querier_remote_read_test.go | 196 +++-- integration/querier_test.go | 610 ++++++------- integration/ruler_test.go | 142 +-- pkg/cortex/modules.go | 19 +- pkg/querier/blocks_store_queryable_test.go | 190 +++-- pkg/querier/chunk_store_queryable_test.go | 38 +- pkg/querier/error_translate_queryable_test.go | 64 +- pkg/querier/querier.go | 22 +- pkg/querier/querier_test.go | 657 +++++++------- .../thanos-community/promql-engine/COPYRIGHT | 2 + .../thanos-community/promql-engine/LICENSE | 201 +++++ .../promql-engine/api/remote.go | 31 + .../promql-engine/engine/engine.go | 442 ++++++++++ .../execution/aggregate/hashaggregate.go | 232 +++++ .../execution/aggregate/khashaggregate.go | 233 +++++ .../execution/aggregate/scalar_table.go | 335 ++++++++ .../execution/aggregate/vector_table.go | 96 +++ .../promql-engine/execution/binary/index.go | 35 + .../promql-engine/execution/binary/scalar.go | 188 ++++ .../promql-engine/execution/binary/table.go | 212 +++++ .../promql-engine/execution/binary/vector.go | 342 ++++++++ .../execution/exchange/coalesce.go | 186 ++++ .../execution/exchange/concurrent.go | 97 +++ .../promql-engine/execution/execution.go | 338 ++++++++ .../execution/function/functions.go | 668 +++++++++++++++ .../execution/function/histogram.go | 212 +++++ .../execution/function/operator.go | 281 ++++++ .../execution/function/quantile.go | 165 ++++ .../promql-engine/execution/model/operator.go | 27 + .../promql-engine/execution/model/pool.go | 68 ++ .../promql-engine/execution/model/vector.go | 19 + .../promql-engine/execution/parse/errors.go | 21 + .../execution/remote/operator.go | 102 +++ .../execution/scan/literal_selector.go | 99 +++ .../execution/scan/matrix_selector.go | 281 ++++++ .../execution/scan/vector_selector.go | 195 +++++ .../step_invariant/step_invariant.go | 150 ++++ .../promql-engine/execution/storage/filter.go | 60 ++ .../execution/storage/filtered_selector.go | 61 ++ .../promql-engine/execution/storage/pool.go | 83 ++ .../execution/storage/series_selector.go | 97 +++ .../promql-engine/execution/unary/unary.go | 104 +++ .../promql-engine/logicalplan/distribute.go | 133 +++ .../promql-engine/logicalplan/filter.go | 28 + .../logicalplan/merge_selects.go | 156 ++++ .../promql-engine/logicalplan/plan.go | 150 ++++ .../logicalplan/propagate_selectors.go | 120 +++ .../logicalplan/sort_matchers.go | 29 + .../promql-engine/query/options.go | 36 + .../promql-engine/worker/worker.go | 97 +++ vendor/gonum.org/v1/gonum/AUTHORS | 121 +++ vendor/gonum.org/v1/gonum/CONTRIBUTORS | 124 +++ vendor/gonum.org/v1/gonum/LICENSE | 23 + vendor/gonum.org/v1/gonum/floats/README.md | 4 + vendor/gonum.org/v1/gonum/floats/doc.go | 11 + vendor/gonum.org/v1/gonum/floats/floats.go | 807 ++++++++++++++++++ .../gonum.org/v1/gonum/floats/scalar/doc.go | 6 + .../v1/gonum/floats/scalar/scalar.go | 171 ++++ .../v1/gonum/internal/asm/f64/abssum_amd64.s | 82 ++ .../gonum/internal/asm/f64/abssuminc_amd64.s | 90 ++ .../v1/gonum/internal/asm/f64/add_amd64.s | 66 ++ .../gonum/internal/asm/f64/addconst_amd64.s | 53 ++ .../v1/gonum/internal/asm/f64/axpy.go | 62 ++ .../v1/gonum/internal/asm/f64/axpyinc_amd64.s | 142 +++ .../gonum/internal/asm/f64/axpyincto_amd64.s | 148 ++++ .../internal/asm/f64/axpyunitary_amd64.s | 134 +++ .../internal/asm/f64/axpyunitaryto_amd64.s | 140 +++ .../v1/gonum/internal/asm/f64/cumprod_amd64.s | 71 ++ .../v1/gonum/internal/asm/f64/cumsum_amd64.s | 64 ++ .../v1/gonum/internal/asm/f64/div_amd64.s | 67 ++ .../v1/gonum/internal/asm/f64/divto_amd64.s | 73 ++ .../v1/gonum/internal/asm/f64/doc.go | 6 + .../v1/gonum/internal/asm/f64/dot.go | 38 + .../v1/gonum/internal/asm/f64/dot_amd64.s | 145 ++++ .../v1/gonum/internal/asm/f64/ge_amd64.go | 29 + .../v1/gonum/internal/asm/f64/ge_noasm.go | 125 +++ .../v1/gonum/internal/asm/f64/gemvN_amd64.s | 685 +++++++++++++++ .../v1/gonum/internal/asm/f64/gemvT_amd64.s | 745 ++++++++++++++++ .../v1/gonum/internal/asm/f64/ger_amd64.s | 591 +++++++++++++ .../v1/gonum/internal/asm/f64/l1norm_amd64.s | 58 ++ .../v1/gonum/internal/asm/f64/l2norm_amd64.s | 109 +++ .../v1/gonum/internal/asm/f64/l2norm_noasm.go | 93 ++ .../gonum/internal/asm/f64/l2normdist_amd64.s | 115 +++ .../gonum/internal/asm/f64/l2norminc_amd64.s | 110 +++ .../gonum/internal/asm/f64/linfnorm_amd64.s | 57 ++ .../v1/gonum/internal/asm/f64/scal.go | 62 ++ .../v1/gonum/internal/asm/f64/scalinc_amd64.s | 113 +++ .../gonum/internal/asm/f64/scalincto_amd64.s | 122 +++ .../internal/asm/f64/scalunitary_amd64.s | 112 +++ .../internal/asm/f64/scalunitaryto_amd64.s | 113 +++ .../v1/gonum/internal/asm/f64/stubs_amd64.go | 277 ++++++ .../v1/gonum/internal/asm/f64/stubs_noasm.go | 182 ++++ .../v1/gonum/internal/asm/f64/sum_amd64.s | 99 +++ vendor/modules.txt | 24 + 99 files changed, 13953 insertions(+), 885 deletions(-) create mode 100644 vendor/github.com/thanos-community/promql-engine/COPYRIGHT create mode 100644 vendor/github.com/thanos-community/promql-engine/LICENSE create mode 100644 vendor/github.com/thanos-community/promql-engine/api/remote.go create mode 100644 vendor/github.com/thanos-community/promql-engine/engine/engine.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/aggregate/hashaggregate.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/aggregate/khashaggregate.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/aggregate/scalar_table.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/aggregate/vector_table.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/binary/index.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/binary/scalar.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/binary/table.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/binary/vector.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/exchange/concurrent.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/execution.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/function/functions.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/function/histogram.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/function/operator.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/function/quantile.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/model/operator.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/model/pool.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/model/vector.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/parse/errors.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/remote/operator.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/scan/literal_selector.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/scan/matrix_selector.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/scan/vector_selector.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/step_invariant/step_invariant.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/storage/filter.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/storage/filtered_selector.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/storage/pool.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/storage/series_selector.go create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/unary/unary.go create mode 100644 vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go create mode 100644 vendor/github.com/thanos-community/promql-engine/logicalplan/filter.go create mode 100644 vendor/github.com/thanos-community/promql-engine/logicalplan/merge_selects.go create mode 100644 vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go create mode 100644 vendor/github.com/thanos-community/promql-engine/logicalplan/propagate_selectors.go create mode 100644 vendor/github.com/thanos-community/promql-engine/logicalplan/sort_matchers.go create mode 100644 vendor/github.com/thanos-community/promql-engine/query/options.go create mode 100644 vendor/github.com/thanos-community/promql-engine/worker/worker.go create mode 100644 vendor/gonum.org/v1/gonum/AUTHORS create mode 100644 vendor/gonum.org/v1/gonum/CONTRIBUTORS create mode 100644 vendor/gonum.org/v1/gonum/LICENSE create mode 100644 vendor/gonum.org/v1/gonum/floats/README.md create mode 100644 vendor/gonum.org/v1/gonum/floats/doc.go create mode 100644 vendor/gonum.org/v1/gonum/floats/floats.go create mode 100644 vendor/gonum.org/v1/gonum/floats/scalar/doc.go create mode 100644 vendor/gonum.org/v1/gonum/floats/scalar/scalar.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l2normdist_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l2norminc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s diff --git a/CHANGELOG.md b/CHANGELOG.md index 9218890fb8c..9cd9b10e917 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ * [FEATURE] Query Frontend/Scheduler: Add a new counter metric `cortex_request_queue_requests_total` for total requests going to queue. #5030 * [FEATURE] Build ARM docker images. #5041 * [FEATURE] Query-frontend/Querier: Create spans to measure time to merge promql responses. #5041 +* [FEATURE] Querier/Ruler: Support the new thanos promql engine. This is an experimental feature and might change in the future. #5093 * [FEATURE] Added zstd as an option for grpc compression #5092 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 * [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 1e84886d514..9387292364d 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -218,6 +218,12 @@ querier: # sharding on read path is disabled). # CLI flag: -querier.shuffle-sharding-ingesters-lookback-period [shuffle_sharding_ingesters_lookback_period: | default = 0s] + + # Experimental. Use Thanos promql engine + # https://github.com/thanos-community/promql-engine rather than the Prometheus + # promql engine. + # CLI flag: -querier.thanos-engine + [thanos_engine: | default = false] ``` ### `blocks_storage_config` diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 285b6fcede3..feecef027fe 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -906,6 +906,12 @@ store_gateway_client: # is disabled). # CLI flag: -querier.shuffle-sharding-ingesters-lookback-period [shuffle_sharding_ingesters_lookback_period: | default = 0s] + +# Experimental. Use Thanos promql engine +# https://github.com/thanos-community/promql-engine rather than the Prometheus +# promql engine. +# CLI flag: -querier.thanos-engine +[thanos_engine: | default = false] ``` ### `query_frontend_config` diff --git a/go.mod b/go.mod index 834f5745e8d..4567fd3f33d 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 + github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389 github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a github.com/uber/jaeger-client-go v2.30.0+incompatible @@ -209,6 +210,7 @@ require ( golang.org/x/text v0.5.0 // indirect golang.org/x/tools v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + gonum.org/v1/gonum v0.12.0 // indirect google.golang.org/api v0.102.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect diff --git a/go.sum b/go.sum index acdddf1c27c..d73a45131d2 100644 --- a/go.sum +++ b/go.sum @@ -1507,6 +1507,8 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= +github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389 h1:ZER0Tf+2PnNEd4bcOJFDQ48dOraRyvu5tfYWySMS2S4= +github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389/go.mod h1:GJkKOtKfXos1xbTmHBnX3YTCov8enxdAS1woR6/h4CI= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a h1:oN3VupYNkavPRvdXwq71p54SAFSbOGvL0qL7CeKFrJ0= @@ -2172,6 +2174,8 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNq gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= diff --git a/integration/querier_remote_read_test.go b/integration/querier_remote_read_test.go index 2e3b0ee20ab..99a62ebb631 100644 --- a/integration/querier_remote_read_test.go +++ b/integration/querier_remote_read_test.go @@ -6,8 +6,10 @@ package integration import ( "bytes" "context" + "fmt" "io" "net/http" + "strconv" "testing" "time" @@ -25,99 +27,105 @@ import ( ) func TestQuerierRemoteRead(t *testing.T) { - s, err := e2e.NewScenario(networkName) - require.NoError(t, err) - defer s.Close() - - flags := mergeFlags(BlocksStorageFlags(), map[string]string{}) - - // Start dependencies. - minio := e2edb.NewMinio(9000, bucketName) - consul := e2edb.NewConsul() - require.NoError(t, s.StartAndWaitReady(consul, minio)) - - // Start Cortex components for the write path. - distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - require.NoError(t, s.StartAndWaitReady(distributor, ingester)) - - // Wait until the distributor has updated the ring. - require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) - - // Push a series for each user to Cortex. - now := time.Now() - - c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", "", "user-1") - require.NoError(t, err) - - series, expectedVectors := generateSeries("series_1", now) - res, err := c.Push(series) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) - - storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - require.NoError(t, s.StartAndWaitReady(storeGateway)) - querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), BlocksStorageFlags(), "") - require.NoError(t, s.StartAndWaitReady(querier)) - - // Wait until the querier has updated the ring. - require.NoError(t, querier.WaitSumMetrics(e2e.Equals(2*512), "cortex_ring_tokens_total")) - - matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "series_1") - require.NoError(t, err) - - startMs := now.Add(-1*time.Minute).Unix() * 1000 - endMs := now.Add(time.Minute).Unix() * 1000 - - q, err := remote.ToQuery(startMs, endMs, []*labels.Matcher{matcher}, &storage.SelectHints{ - Step: 1, - Start: startMs, - End: endMs, - }) - require.NoError(t, err) - - req := &prompb.ReadRequest{ - Queries: []*prompb.Query{q}, - AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS}, + for _, thanosEngine := range []bool{false, true} { + t.Run(fmt.Sprintf("thanosEngine=%t", thanosEngine), func(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-querier.thanos-engine": strconv.FormatBool(thanosEngine), + }) + + // Start dependencies. + minio := e2edb.NewMinio(9000, bucketName) + consul := e2edb.NewConsul() + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Start Cortex components for the write path. + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester)) + + // Wait until the distributor has updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + // Push a series for each user to Cortex. + now := time.Now() + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", "", "user-1") + require.NoError(t, err) + + series, expectedVectors := generateSeries("series_1", now) + res, err := c.Push(series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(storeGateway)) + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(querier)) + + // Wait until the querier has updated the ring. + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(2*512), "cortex_ring_tokens_total")) + + matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "series_1") + require.NoError(t, err) + + startMs := now.Add(-1*time.Minute).Unix() * 1000 + endMs := now.Add(time.Minute).Unix() * 1000 + + q, err := remote.ToQuery(startMs, endMs, []*labels.Matcher{matcher}, &storage.SelectHints{ + Step: 1, + Start: startMs, + End: endMs, + }) + require.NoError(t, err) + + req := &prompb.ReadRequest{ + Queries: []*prompb.Query{q}, + AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS}, + } + + data, err := proto.Marshal(req) + require.NoError(t, err) + compressed := snappy.Encode(nil, data) + + // Call the remote read API endpoint with a timeout. + httpReqCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + httpReq, err := http.NewRequestWithContext(httpReqCtx, "POST", "http://"+querier.HTTPEndpoint()+"/prometheus/api/v1/read", bytes.NewReader(compressed)) + require.NoError(t, err) + httpReq.Header.Set("X-Scope-OrgID", "user-1") + httpReq.Header.Add("Content-Encoding", "snappy") + httpReq.Header.Add("Accept-Encoding", "snappy") + httpReq.Header.Set("Content-Type", "application/x-protobuf") + httpReq.Header.Set("User-Agent", "Prometheus/1.8.2") + httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") + + httpResp, err := http.DefaultClient.Do(httpReq) + require.NoError(t, err) + require.Equal(t, http.StatusOK, httpResp.StatusCode) + + compressed, err = io.ReadAll(httpResp.Body) + require.NoError(t, err) + + uncompressed, err := snappy.Decode(nil, compressed) + require.NoError(t, err) + + var resp prompb.ReadResponse + err = proto.Unmarshal(uncompressed, &resp) + require.NoError(t, err) + + // Validate the returned remote read data matches what was written + require.Len(t, resp.Results, 1) + require.Len(t, resp.Results[0].Timeseries, 1) + require.Len(t, resp.Results[0].Timeseries[0].Labels, 1) + require.Equal(t, "series_1", resp.Results[0].Timeseries[0].Labels[0].GetValue()) + require.Len(t, resp.Results[0].Timeseries[0].Samples, 1) + require.Equal(t, int64(expectedVectors[0].Timestamp), resp.Results[0].Timeseries[0].Samples[0].Timestamp) + require.Equal(t, float64(expectedVectors[0].Value), resp.Results[0].Timeseries[0].Samples[0].Value) + }) } - - data, err := proto.Marshal(req) - require.NoError(t, err) - compressed := snappy.Encode(nil, data) - - // Call the remote read API endpoint with a timeout. - httpReqCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - httpReq, err := http.NewRequestWithContext(httpReqCtx, "POST", "http://"+querier.HTTPEndpoint()+"/prometheus/api/v1/read", bytes.NewReader(compressed)) - require.NoError(t, err) - httpReq.Header.Set("X-Scope-OrgID", "user-1") - httpReq.Header.Add("Content-Encoding", "snappy") - httpReq.Header.Add("Accept-Encoding", "snappy") - httpReq.Header.Set("Content-Type", "application/x-protobuf") - httpReq.Header.Set("User-Agent", "Prometheus/1.8.2") - httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") - - httpResp, err := http.DefaultClient.Do(httpReq) - require.NoError(t, err) - require.Equal(t, http.StatusOK, httpResp.StatusCode) - - compressed, err = io.ReadAll(httpResp.Body) - require.NoError(t, err) - - uncompressed, err := snappy.Decode(nil, compressed) - require.NoError(t, err) - - var resp prompb.ReadResponse - err = proto.Unmarshal(uncompressed, &resp) - require.NoError(t, err) - - // Validate the returned remote read data matches what was written - require.Len(t, resp.Results, 1) - require.Len(t, resp.Results[0].Timeseries, 1) - require.Len(t, resp.Results[0].Timeseries[0].Labels, 1) - require.Equal(t, "series_1", resp.Results[0].Timeseries[0].Labels[0].GetValue()) - require.Len(t, resp.Results[0].Timeseries[0].Samples, 1) - require.Equal(t, int64(expectedVectors[0].Timestamp), resp.Results[0].Timeseries[0].Samples[0].Timestamp) - require.Equal(t, float64(expectedVectors[0].Value), resp.Results[0].Timeseries[0].Samples[0].Value) } diff --git a/integration/querier_test.go b/integration/querier_test.go index 7f6f0cb5584..551a15ac465 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -71,189 +71,192 @@ func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { blocksShardingStrategy: "shuffle-sharding", tenantShardSize: 1, ingesterStreamingEnabled: true, - indexCacheBackend: tsdb.IndexCacheBackendMemcached, + indexCacheBackend: tsdb.IndexCacheBackendInMemory, bucketIndexEnabled: true, }, } for testName, testCfg := range tests { - t.Run(testName, func(t *testing.T) { - const blockRangePeriod = 5 * time.Second - - s, err := e2e.NewScenario(networkName) - require.NoError(t, err) - defer s.Close() - - // Configure the blocks storage to frequently compact TSDB head - // and ship blocks to the storage. - flags := mergeFlags(BlocksStorageFlags(), map[string]string{ - "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), - "-blocks-storage.tsdb.ship-interval": "1s", - "-blocks-storage.bucket-store.sync-interval": "1s", - "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), - "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, - "-store-gateway.sharding-enabled": strconv.FormatBool(testCfg.blocksShardingStrategy != ""), - "-store-gateway.sharding-strategy": testCfg.blocksShardingStrategy, - "-store-gateway.tenant-shard-size": fmt.Sprintf("%d", testCfg.tenantShardSize), - "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), - "-querier.query-store-for-labels-enabled": "true", - "-blocks-storage.bucket-store.bucket-index.enabled": strconv.FormatBool(testCfg.bucketIndexEnabled), - }) + for _, thanosEngine := range []bool{false, true} { + t.Run(fmt.Sprintf("%s,thanosEngine=%t", testName, thanosEngine), func(t *testing.T) { + const blockRangePeriod = 5 * time.Second - // Start dependencies. - consul := e2edb.NewConsul() - minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) - memcached := e2ecache.NewMemcached() - require.NoError(t, s.StartAndWaitReady(consul, minio, memcached)) - - // Add the memcached address to the flags. - flags["-blocks-storage.bucket-store.index-cache.memcached.addresses"] = "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort) - - // Start Cortex components. - distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - storeGateway1 := e2ecortex.NewStoreGateway("store-gateway-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - storeGateway2 := e2ecortex.NewStoreGateway("store-gateway-2", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - storeGateways := e2ecortex.NewCompositeCortexService(storeGateway1, storeGateway2) - require.NoError(t, s.StartAndWaitReady(distributor, ingester, storeGateway1, storeGateway2)) - - // Start the querier with configuring store-gateway addresses if sharding is disabled. - if testCfg.blocksShardingStrategy == "" { - flags = mergeFlags(flags, map[string]string{ - "-querier.store-gateway-addresses": strings.Join([]string{storeGateway1.NetworkGRPCEndpoint(), storeGateway2.NetworkGRPCEndpoint()}, ","), + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Configure the blocks storage to frequently compact TSDB head + // and ship blocks to the storage. + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.bucket-store.sync-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, + "-store-gateway.sharding-enabled": strconv.FormatBool(testCfg.blocksShardingStrategy != ""), + "-store-gateway.sharding-strategy": testCfg.blocksShardingStrategy, + "-store-gateway.tenant-shard-size": fmt.Sprintf("%d", testCfg.tenantShardSize), + "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), + "-querier.query-store-for-labels-enabled": "true", + "-querier.thanos-engine": strconv.FormatBool(thanosEngine), + "-blocks-storage.bucket-store.bucket-index.enabled": strconv.FormatBool(testCfg.bucketIndexEnabled), }) - } - querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") - require.NoError(t, s.StartAndWaitReady(querier)) - - // Wait until both the distributor and querier have updated the ring. The querier will also watch - // the store-gateway ring if blocks sharding is enabled. - require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) - if testCfg.blocksShardingStrategy != "" { - require.NoError(t, querier.WaitSumMetrics(e2e.Equals(float64(512+(512*storeGateways.NumInstances()))), "cortex_ring_tokens_total")) - } else { - require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) - } - c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", "user-1") - require.NoError(t, err) + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) + memcached := e2ecache.NewMemcached() + require.NoError(t, s.StartAndWaitReady(consul, minio, memcached)) + + // Add the memcached address to the flags. + flags["-blocks-storage.bucket-store.index-cache.memcached.addresses"] = "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort) + + // Start Cortex components. + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + storeGateway1 := e2ecortex.NewStoreGateway("store-gateway-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + storeGateway2 := e2ecortex.NewStoreGateway("store-gateway-2", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + storeGateways := e2ecortex.NewCompositeCortexService(storeGateway1, storeGateway2) + require.NoError(t, s.StartAndWaitReady(distributor, ingester, storeGateway1, storeGateway2)) + + // Start the querier with configuring store-gateway addresses if sharding is disabled. + if testCfg.blocksShardingStrategy == "" { + flags = mergeFlags(flags, map[string]string{ + "-querier.store-gateway-addresses": strings.Join([]string{storeGateway1.NetworkGRPCEndpoint(), storeGateway2.NetworkGRPCEndpoint()}, ","), + }) + } + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(querier)) + + // Wait until both the distributor and querier have updated the ring. The querier will also watch + // the store-gateway ring if blocks sharding is enabled. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + if testCfg.blocksShardingStrategy != "" { + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(float64(512+(512*storeGateways.NumInstances()))), "cortex_ring_tokens_total")) + } else { + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + } - // Push some series to Cortex. - series1Timestamp := time.Now() - series2Timestamp := series1Timestamp.Add(blockRangePeriod * 2) - series1, expectedVector1 := generateSeries("series_1", series1Timestamp, prompb.Label{Name: "series_1", Value: "series_1"}) - series2, expectedVector2 := generateSeries("series_2", series2Timestamp, prompb.Label{Name: "series_2", Value: "series_2"}) + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) - res, err := c.Push(series1) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) + // Push some series to Cortex. + series1Timestamp := time.Now() + series2Timestamp := series1Timestamp.Add(blockRangePeriod * 2) + series1, expectedVector1 := generateSeries("series_1", series1Timestamp, prompb.Label{Name: "series_1", Value: "series_1"}) + series2, expectedVector2 := generateSeries("series_2", series2Timestamp, prompb.Label{Name: "series_2", Value: "series_2"}) - res, err = c.Push(series2) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) + res, err := c.Push(series1) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) - // Wait until the TSDB head is compacted and shipped to the storage. - // The shipped block contains the 1st series, while the 2ns series in in the head. - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_shipper_uploads_total")) - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series")) - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(2), "cortex_ingester_memory_series_created_total")) - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series_removed_total")) + res, err = c.Push(series2) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) - // Push another series to further compact another block and delete the first block - // due to expired retention. - series3Timestamp := series2Timestamp.Add(blockRangePeriod * 2) - series3, expectedVector3 := generateSeries("series_3", series3Timestamp, prompb.Label{Name: "series_3", Value: "series_3"}) + // Wait until the TSDB head is compacted and shipped to the storage. + // The shipped block contains the 1st series, while the 2ns series in in the head. + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_shipper_uploads_total")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(2), "cortex_ingester_memory_series_created_total")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series_removed_total")) - res, err = c.Push(series3) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) - - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(2), "cortex_ingester_shipper_uploads_total")) - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series")) - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(3), "cortex_ingester_memory_series_created_total")) - require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(2), "cortex_ingester_memory_series_removed_total")) - - if testCfg.bucketIndexEnabled { - // Start the compactor to have the bucket index created before querying. - compactor := e2ecortex.NewCompactor("compactor", consul.NetworkHTTPEndpoint(), flags, "") - require.NoError(t, s.StartAndWaitReady(compactor)) - } else { - // Wait until the querier has discovered the uploaded blocks. - require.NoError(t, querier.WaitSumMetrics(e2e.Equals(2), "cortex_blocks_meta_synced")) - } + // Push another series to further compact another block and delete the first block + // due to expired retention. + series3Timestamp := series2Timestamp.Add(blockRangePeriod * 2) + series3, expectedVector3 := generateSeries("series_3", series3Timestamp, prompb.Label{Name: "series_3", Value: "series_3"}) - // Wait until the store-gateway has synched the new uploaded blocks. When sharding is enabled - // we don't known which store-gateway instance will synch the blocks, so we need to wait on - // metrics extracted from all instances. - if testCfg.blocksShardingStrategy != "" { - // If shuffle sharding is enabled and we have tenant shard size set to 1, - // then the metric only appears in one store gateway instance. - require.NoError(t, storeGateways.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"cortex_bucket_store_blocks_loaded"}, e2e.SkipMissingMetrics)) - } else { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(2*storeGateways.NumInstances())), "cortex_bucket_store_blocks_loaded")) - } + res, err = c.Push(series3) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) - // Check how many tenants have been discovered and synced by store-gateways. - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(1*storeGateways.NumInstances())), "cortex_bucket_stores_tenants_discovered")) - if testCfg.blocksShardingStrategy == "shuffle-sharding" { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(1)), "cortex_bucket_stores_tenants_synced")) - } else { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(1*storeGateways.NumInstances())), "cortex_bucket_stores_tenants_synced")) - } + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(2), "cortex_ingester_shipper_uploads_total")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(3), "cortex_ingester_memory_series_created_total")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(2), "cortex_ingester_memory_series_removed_total")) - // Query back the series (1 only in the storage, 1 only in the ingesters, 1 on both). - result, err := c.Query("series_1", series1Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector1, result.(model.Vector)) + if testCfg.bucketIndexEnabled { + // Start the compactor to have the bucket index created before querying. + compactor := e2ecortex.NewCompactor("compactor", consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(compactor)) + } else { + // Wait until the querier has discovered the uploaded blocks. + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(2), "cortex_blocks_meta_synced")) + } - result, err = c.Query("series_2", series2Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector2, result.(model.Vector)) + // Wait until the store-gateway has synched the new uploaded blocks. When sharding is enabled + // we don't known which store-gateway instance will synch the blocks, so we need to wait on + // metrics extracted from all instances. + if testCfg.blocksShardingStrategy != "" { + // If shuffle sharding is enabled and we have tenant shard size set to 1, + // then the metric only appears in one store gateway instance. + require.NoError(t, storeGateways.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"cortex_bucket_store_blocks_loaded"}, e2e.SkipMissingMetrics)) + } else { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(2*storeGateways.NumInstances())), "cortex_bucket_store_blocks_loaded")) + } - result, err = c.Query("series_3", series3Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector3, result.(model.Vector)) - - // Check the in-memory index cache metrics (in the store-gateway). - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(7), "thanos_store_index_cache_requests_total")) - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty - - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items")) // 2 series both for postings and series cache - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(11), "thanos_memcached_operations_total")) // 7 gets + 4 sets - } + // Check how many tenants have been discovered and synced by store-gateways. + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(1*storeGateways.NumInstances())), "cortex_bucket_stores_tenants_discovered")) + if testCfg.blocksShardingStrategy == "shuffle-sharding" { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(1)), "cortex_bucket_stores_tenants_synced")) + } else { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(float64(1*storeGateways.NumInstances())), "cortex_bucket_stores_tenants_synced")) + } - // Query back again the 1st series from storage. This time it should use the index cache. - result, err = c.Query("series_1", series1Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector1, result.(model.Vector)) + // Query back the series (1 only in the storage, 1 only in the ingesters, 1 on both). + result, err := c.Query("series_1", series1Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector1, result.(model.Vector)) - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(7+2), "thanos_store_index_cache_requests_total")) - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache + result, err = c.Query("series_2", series2Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector2, result.(model.Vector)) - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items")) // as before - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items_added_total")) // as before - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(11+2), "thanos_memcached_operations_total")) // as before + 2 gets - } + result, err = c.Query("series_3", series3Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector3, result.(model.Vector)) + + // Check the in-memory index cache metrics (in the store-gateway). + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(7), "thanos_store_index_cache_requests_total")) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty + + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items")) // 2 series both for postings and series cache + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(11), "thanos_memcached_operations_total")) // 7 gets + 4 sets + } - // Query metadata. - testMetadataQueriesWithBlocksStorage(t, c, series1[0], series2[0], series3[0], blockRangePeriod) + // Query back again the 1st series from storage. This time it should use the index cache. + result, err = c.Query("series_1", series1Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector1, result.(model.Vector)) - // Ensure no service-specific metrics prefix is used by the wrong service. - assertServiceMetricsPrefixes(t, Distributor, distributor) - assertServiceMetricsPrefixes(t, Ingester, ingester) - assertServiceMetricsPrefixes(t, Querier, querier) - assertServiceMetricsPrefixes(t, StoreGateway, storeGateway1) - assertServiceMetricsPrefixes(t, StoreGateway, storeGateway2) - }) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(7+2), "thanos_store_index_cache_requests_total")) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache + + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items")) // as before + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2), "thanos_store_index_cache_items_added_total")) // as before + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(11+2), "thanos_memcached_operations_total")) // as before + 2 gets + } + + // Query metadata. + testMetadataQueriesWithBlocksStorage(t, c, series1[0], series2[0], series3[0], blockRangePeriod) + + // Ensure no service-specific metrics prefix is used by the wrong service. + assertServiceMetricsPrefixes(t, Distributor, distributor) + assertServiceMetricsPrefixes(t, Ingester, ingester) + assertServiceMetricsPrefixes(t, Querier, querier) + assertServiceMetricsPrefixes(t, StoreGateway, storeGateway1) + assertServiceMetricsPrefixes(t, StoreGateway, storeGateway2) + }) + } } } @@ -296,168 +299,171 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { } for testName, testCfg := range tests { - t.Run(testName, func(t *testing.T) { - const blockRangePeriod = 5 * time.Second + for _, thanosEngine := range []bool{false, true} { + t.Run(fmt.Sprintf("%s,thanosEngine=%t", testName, thanosEngine), func(t *testing.T) { + const blockRangePeriod = 5 * time.Second - s, err := e2e.NewScenario(networkName) - require.NoError(t, err) - defer s.Close() - - // Start dependencies. - consul := e2edb.NewConsul() - minio := e2edb.NewMinio(9000, bucketName) - memcached := e2ecache.NewMemcached() - require.NoError(t, s.StartAndWaitReady(consul, minio, memcached)) - - // Setting the replication factor equal to the number of Cortex replicas - // make sure each replica creates the same blocks, so the total number of - // blocks is stable and easy to assert on. - const seriesReplicationFactor = 2 - - // Configure the blocks storage to frequently compact TSDB head - // and ship blocks to the storage. - flags := mergeFlags(BlocksStorageFlags(), map[string]string{ - "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), - "-blocks-storage.tsdb.ship-interval": "1s", - "-blocks-storage.bucket-store.sync-interval": "1s", - "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), - "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, - "-blocks-storage.bucket-store.index-cache.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), - "-blocks-storage.bucket-store.bucket-index.enabled": strconv.FormatBool(testCfg.bucketIndexEnabled), - "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), - "-querier.query-store-for-labels-enabled": "true", - // Ingester. - "-ring.store": "consul", - "-consul.hostname": consul.NetworkHTTPEndpoint(), - // Distributor. - "-distributor.replication-factor": strconv.FormatInt(seriesReplicationFactor, 10), - // Store-gateway. - "-store-gateway.sharding-enabled": strconv.FormatBool(testCfg.blocksShardingEnabled), - "-store-gateway.sharding-ring.store": "consul", - "-store-gateway.sharding-ring.consul.hostname": consul.NetworkHTTPEndpoint(), - "-store-gateway.sharding-ring.replication-factor": "1", - }) + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, bucketName) + memcached := e2ecache.NewMemcached() + require.NoError(t, s.StartAndWaitReady(consul, minio, memcached)) + + // Setting the replication factor equal to the number of Cortex replicas + // make sure each replica creates the same blocks, so the total number of + // blocks is stable and easy to assert on. + const seriesReplicationFactor = 2 + + // Configure the blocks storage to frequently compact TSDB head + // and ship blocks to the storage. + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.bucket-store.sync-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, + "-blocks-storage.bucket-store.index-cache.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), + "-blocks-storage.bucket-store.bucket-index.enabled": strconv.FormatBool(testCfg.bucketIndexEnabled), + "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), + "-querier.query-store-for-labels-enabled": "true", + "-querier.thanos-engine": strconv.FormatBool(thanosEngine), + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": strconv.FormatInt(seriesReplicationFactor, 10), + // Store-gateway. + "-store-gateway.sharding-enabled": strconv.FormatBool(testCfg.blocksShardingEnabled), + "-store-gateway.sharding-ring.store": "consul", + "-store-gateway.sharding-ring.consul.hostname": consul.NetworkHTTPEndpoint(), + "-store-gateway.sharding-ring.replication-factor": "1", + }) - // Start Cortex replicas. - cortex1 := e2ecortex.NewSingleBinary("cortex-1", flags, "") - cortex2 := e2ecortex.NewSingleBinary("cortex-2", flags, "") - cluster := e2ecortex.NewCompositeCortexService(cortex1, cortex2) - require.NoError(t, s.StartAndWaitReady(cortex1, cortex2)) + // Start Cortex replicas. + cortex1 := e2ecortex.NewSingleBinary("cortex-1", flags, "") + cortex2 := e2ecortex.NewSingleBinary("cortex-2", flags, "") + cluster := e2ecortex.NewCompositeCortexService(cortex1, cortex2) + require.NoError(t, s.StartAndWaitReady(cortex1, cortex2)) - // Wait until Cortex replicas have updated the ring state. - for _, replica := range cluster.Instances() { - numTokensPerInstance := 512 // Ingesters ring. - if testCfg.blocksShardingEnabled { - numTokensPerInstance += 512 * 2 // Store-gateway ring (read both by the querier and store-gateway). - } + // Wait until Cortex replicas have updated the ring state. + for _, replica := range cluster.Instances() { + numTokensPerInstance := 512 // Ingesters ring. + if testCfg.blocksShardingEnabled { + numTokensPerInstance += 512 * 2 // Store-gateway ring (read both by the querier and store-gateway). + } - require.NoError(t, replica.WaitSumMetrics(e2e.Equals(float64(numTokensPerInstance*cluster.NumInstances())), "cortex_ring_tokens_total")) - } + require.NoError(t, replica.WaitSumMetrics(e2e.Equals(float64(numTokensPerInstance*cluster.NumInstances())), "cortex_ring_tokens_total")) + } - c, err := e2ecortex.NewClient(cortex1.HTTPEndpoint(), cortex2.HTTPEndpoint(), "", "", "user-1") - require.NoError(t, err) + c, err := e2ecortex.NewClient(cortex1.HTTPEndpoint(), cortex2.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) - // Push some series to Cortex. - series1Timestamp := time.Now() - series2Timestamp := series1Timestamp.Add(blockRangePeriod * 2) - series1, expectedVector1 := generateSeries("series_1", series1Timestamp, prompb.Label{Name: "series_1", Value: "series_1"}) - series2, expectedVector2 := generateSeries("series_2", series2Timestamp, prompb.Label{Name: "series_2", Value: "series_2"}) + // Push some series to Cortex. + series1Timestamp := time.Now() + series2Timestamp := series1Timestamp.Add(blockRangePeriod * 2) + series1, expectedVector1 := generateSeries("series_1", series1Timestamp, prompb.Label{Name: "series_1", Value: "series_1"}) + series2, expectedVector2 := generateSeries("series_2", series2Timestamp, prompb.Label{Name: "series_2", Value: "series_2"}) - res, err := c.Push(series1) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) + res, err := c.Push(series1) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) - res, err = c.Push(series2) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) + res, err = c.Push(series2) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) - // Wait until the TSDB head is compacted and shipped to the storage. - // The shipped block contains the 1st series, while the 2ns series in in the head. - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_shipper_uploads_total")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_memory_series")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*cluster.NumInstances())), "cortex_ingester_memory_series_created_total")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_memory_series_removed_total")) + // Wait until the TSDB head is compacted and shipped to the storage. + // The shipped block contains the 1st series, while the 2ns series in in the head. + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_shipper_uploads_total")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_memory_series")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*cluster.NumInstances())), "cortex_ingester_memory_series_created_total")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_memory_series_removed_total")) - // Push another series to further compact another block and delete the first block - // due to expired retention. - series3Timestamp := series2Timestamp.Add(blockRangePeriod * 2) - series3, expectedVector3 := generateSeries("series_3", series3Timestamp, prompb.Label{Name: "series_3", Value: "series_3"}) + // Push another series to further compact another block and delete the first block + // due to expired retention. + series3Timestamp := series2Timestamp.Add(blockRangePeriod * 2) + series3, expectedVector3 := generateSeries("series_3", series3Timestamp, prompb.Label{Name: "series_3", Value: "series_3"}) - res, err = c.Push(series3) - require.NoError(t, err) - require.Equal(t, 200, res.StatusCode) - - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*cluster.NumInstances())), "cortex_ingester_shipper_uploads_total")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_memory_series")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(3*cluster.NumInstances())), "cortex_ingester_memory_series_created_total")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*cluster.NumInstances())), "cortex_ingester_memory_series_removed_total")) - - if testCfg.bucketIndexEnabled { - // Start the compactor to have the bucket index created before querying. We need to run the compactor - // as a separate service because it's currently not part of the single binary. - compactor := e2ecortex.NewCompactor("compactor", consul.NetworkHTTPEndpoint(), flags, "") - require.NoError(t, s.StartAndWaitReady(compactor)) - } else { - // Wait until the querier has discovered the uploaded blocks (discovered both by the querier and store-gateway). - require.NoError(t, cluster.WaitSumMetricsWithOptions(e2e.Equals(float64(2*cluster.NumInstances()*2)), []string{"cortex_blocks_meta_synced"}, e2e.WithLabelMatchers( - labels.MustNewMatcher(labels.MatchEqual, "component", "querier")))) - } + res, err = c.Push(series3) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*cluster.NumInstances())), "cortex_ingester_shipper_uploads_total")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(1*cluster.NumInstances())), "cortex_ingester_memory_series")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(3*cluster.NumInstances())), "cortex_ingester_memory_series_created_total")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*cluster.NumInstances())), "cortex_ingester_memory_series_removed_total")) + + if testCfg.bucketIndexEnabled { + // Start the compactor to have the bucket index created before querying. We need to run the compactor + // as a separate service because it's currently not part of the single binary. + compactor := e2ecortex.NewCompactor("compactor", consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(compactor)) + } else { + // Wait until the querier has discovered the uploaded blocks (discovered both by the querier and store-gateway). + require.NoError(t, cluster.WaitSumMetricsWithOptions(e2e.Equals(float64(2*cluster.NumInstances()*2)), []string{"cortex_blocks_meta_synced"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "component", "querier")))) + } - // Wait until the store-gateway has synched the new uploaded blocks. The number of blocks loaded - // may be greater than expected if the compactor is running (there may have been compacted). - const shippedBlocks = 2 - if testCfg.blocksShardingEnabled { - require.NoError(t, cluster.WaitSumMetrics(e2e.GreaterOrEqual(float64(shippedBlocks*seriesReplicationFactor)), "cortex_bucket_store_blocks_loaded")) - } else { - require.NoError(t, cluster.WaitSumMetrics(e2e.GreaterOrEqual(float64(shippedBlocks*seriesReplicationFactor*cluster.NumInstances())), "cortex_bucket_store_blocks_loaded")) - } + // Wait until the store-gateway has synched the new uploaded blocks. The number of blocks loaded + // may be greater than expected if the compactor is running (there may have been compacted). + const shippedBlocks = 2 + if testCfg.blocksShardingEnabled { + require.NoError(t, cluster.WaitSumMetrics(e2e.GreaterOrEqual(float64(shippedBlocks*seriesReplicationFactor)), "cortex_bucket_store_blocks_loaded")) + } else { + require.NoError(t, cluster.WaitSumMetrics(e2e.GreaterOrEqual(float64(shippedBlocks*seriesReplicationFactor*cluster.NumInstances())), "cortex_bucket_store_blocks_loaded")) + } - // Query back the series (1 only in the storage, 1 only in the ingesters, 1 on both). - result, err := c.Query("series_1", series1Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector1, result.(model.Vector)) + // Query back the series (1 only in the storage, 1 only in the ingesters, 1 on both). + result, err := c.Query("series_1", series1Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector1, result.(model.Vector)) - result, err = c.Query("series_2", series2Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector2, result.(model.Vector)) + result, err = c.Query("series_2", series2Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector2, result.(model.Vector)) - result, err = c.Query("series_3", series3Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector3, result.(model.Vector)) - - // Check the in-memory index cache metrics (in the store-gateway). - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(7*seriesReplicationFactor)), "thanos_store_index_cache_requests_total")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty - - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items")) // 2 series both for postings and series cache - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(11*seriesReplicationFactor)), "thanos_memcached_operations_total")) // 7 gets + 4 sets - } + result, err = c.Query("series_3", series3Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector3, result.(model.Vector)) + + // Check the in-memory index cache metrics (in the store-gateway). + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(7*seriesReplicationFactor)), "thanos_store_index_cache_requests_total")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty + + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items")) // 2 series both for postings and series cache + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(11*seriesReplicationFactor)), "thanos_memcached_operations_total")) // 7 gets + 4 sets + } - // Query back again the 1st series from storage. This time it should use the index cache. - result, err = c.Query("series_1", series1Timestamp) - require.NoError(t, err) - require.Equal(t, model.ValVector, result.Type()) - assert.Equal(t, expectedVector1, result.(model.Vector)) + // Query back again the 1st series from storage. This time it should use the index cache. + result, err = c.Query("series_1", series1Timestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + assert.Equal(t, expectedVector1, result.(model.Vector)) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64((7+2)*seriesReplicationFactor)), "thanos_store_index_cache_requests_total")) - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*seriesReplicationFactor)), "thanos_store_index_cache_hits_total")) // this time has used the index cache + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64((7+2)*seriesReplicationFactor)), "thanos_store_index_cache_requests_total")) + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*seriesReplicationFactor)), "thanos_store_index_cache_hits_total")) // this time has used the index cache - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items")) // as before - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items_added_total")) // as before - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64((11+2)*seriesReplicationFactor)), "thanos_memcached_operations_total")) // as before + 2 gets - } + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items")) // as before + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64(2*2*seriesReplicationFactor)), "thanos_store_index_cache_items_added_total")) // as before + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, cluster.WaitSumMetrics(e2e.Equals(float64((11+2)*seriesReplicationFactor)), "thanos_memcached_operations_total")) // as before + 2 gets + } - // Query metadata. - testMetadataQueriesWithBlocksStorage(t, c, series1[0], series2[0], series3[0], blockRangePeriod) - }) + // Query metadata. + testMetadataQueriesWithBlocksStorage(t, c, series1[0], series2[0], series3[0], blockRangePeriod) + }) + } } } diff --git a/integration/ruler_test.go b/integration/ruler_test.go index bb974051ee6..b1b6bab1657 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -39,94 +39,102 @@ func TestRulerAPI(t *testing.T) { ) ruleGroup := createTestRuleGroup(t) - s, err := e2e.NewScenario(networkName) - require.NoError(t, err) - defer s.Close() + for _, thanosEngine := range []bool{false, true} { + t.Run(fmt.Sprintf("thanosEngine=%t", thanosEngine), func(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() - // Start dependencies. - consul := e2edb.NewConsul() - minio := e2edb.NewMinio(9000, rulestoreBucketName, bucketName) - require.NoError(t, s.StartAndWaitReady(consul, minio)) + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, rulestoreBucketName, bucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) - // Start Cortex components. - ruler := e2ecortex.NewRuler("ruler", consul.NetworkHTTPEndpoint(), mergeFlags(BlocksStorageFlags(), RulerFlags()), "") - require.NoError(t, s.StartAndWaitReady(ruler)) + flags := mergeFlags(BlocksStorageFlags(), RulerFlags(), map[string]string{ + "-querier.thanos-engine": strconv.FormatBool(thanosEngine), + }) - // Create a client with the ruler address configured - c, err := e2ecortex.NewClient("", "", "", ruler.HTTPEndpoint(), "user-1") - require.NoError(t, err) + // Start Cortex components. + ruler := e2ecortex.NewRuler("ruler", consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(ruler)) - // Set the rule group into the ruler - require.NoError(t, c.SetRuleGroup(ruleGroup, namespaceOne)) + // Create a client with the ruler address configured + c, err := e2ecortex.NewClient("", "", "", ruler.HTTPEndpoint(), "user-1") + require.NoError(t, err) - // Wait until the user manager is created - require.NoError(t, ruler.WaitSumMetrics(e2e.Equals(1), "cortex_ruler_managers_total")) + // Set the rule group into the ruler + require.NoError(t, c.SetRuleGroup(ruleGroup, namespaceOne)) - // Check to ensure the rules running in the ruler match what was set - rgs, err := c.GetRuleGroups() - require.NoError(t, err) + // Wait until the user manager is created + require.NoError(t, ruler.WaitSumMetrics(e2e.Equals(1), "cortex_ruler_managers_total")) - retrievedNamespace, exists := rgs[namespaceOne] - require.True(t, exists) - require.Len(t, retrievedNamespace, 1) - require.Equal(t, retrievedNamespace[0].Name, ruleGroup.Name) + // Check to ensure the rules running in the ruler match what was set + rgs, err := c.GetRuleGroups() + require.NoError(t, err) - // Add a second rule group with a similar namespace - require.NoError(t, c.SetRuleGroup(ruleGroup, namespaceTwo)) - require.NoError(t, ruler.WaitSumMetrics(e2e.Equals(2), "cortex_prometheus_rule_group_rules")) + retrievedNamespace, exists := rgs[namespaceOne] + require.True(t, exists) + require.Len(t, retrievedNamespace, 1) + require.Equal(t, retrievedNamespace[0].Name, ruleGroup.Name) - // Check to ensure the rules running in the ruler match what was set - rgs, err = c.GetRuleGroups() - require.NoError(t, err) + // Add a second rule group with a similar namespace + require.NoError(t, c.SetRuleGroup(ruleGroup, namespaceTwo)) + require.NoError(t, ruler.WaitSumMetrics(e2e.Equals(2), "cortex_prometheus_rule_group_rules")) - retrievedNamespace, exists = rgs[namespaceOne] - require.True(t, exists) - require.Len(t, retrievedNamespace, 1) - require.Equal(t, retrievedNamespace[0].Name, ruleGroup.Name) + // Check to ensure the rules running in the ruler match what was set + rgs, err = c.GetRuleGroups() + require.NoError(t, err) - retrievedNamespace, exists = rgs[namespaceTwo] - require.True(t, exists) - require.Len(t, retrievedNamespace, 1) - require.Equal(t, retrievedNamespace[0].Name, ruleGroup.Name) + retrievedNamespace, exists = rgs[namespaceOne] + require.True(t, exists) + require.Len(t, retrievedNamespace, 1) + require.Equal(t, retrievedNamespace[0].Name, ruleGroup.Name) - // Test compression by inspecting the response Headers - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/api/prom/rules", ruler.HTTPEndpoint()), nil) - require.NoError(t, err) + retrievedNamespace, exists = rgs[namespaceTwo] + require.True(t, exists) + require.Len(t, retrievedNamespace, 1) + require.Equal(t, retrievedNamespace[0].Name, ruleGroup.Name) - req.Header.Set("X-Scope-OrgID", "user-1") - req.Header.Set("Accept-Encoding", "gzip") + // Test compression by inspecting the response Headers + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/api/prom/rules", ruler.HTTPEndpoint()), nil) + require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() + req.Header.Set("X-Scope-OrgID", "user-1") + req.Header.Set("Accept-Encoding", "gzip") - // Execute HTTP request - res, err := http.DefaultClient.Do(req.WithContext(ctx)) - require.NoError(t, err) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() - defer res.Body.Close() - // We assert on the Vary header as the minimum response size for enabling compression is 1500 bytes. - // This is enough to know whenever the handler for compression is enabled or not. - require.Equal(t, "Accept-Encoding", res.Header.Get("Vary")) + // Execute HTTP request + res, err := http.DefaultClient.Do(req.WithContext(ctx)) + require.NoError(t, err) - // Delete the set rule groups - require.NoError(t, c.DeleteRuleGroup(namespaceOne, ruleGroup.Name)) - require.NoError(t, c.DeleteRuleNamespace(namespaceTwo)) + defer res.Body.Close() + // We assert on the Vary header as the minimum response size for enabling compression is 1500 bytes. + // This is enough to know whenever the handler for compression is enabled or not. + require.Equal(t, "Accept-Encoding", res.Header.Get("Vary")) - // Get the rule group and ensure it returns a 404 - resp, err := c.GetRuleGroup(namespaceOne, ruleGroup.Name) - require.NoError(t, err) - defer resp.Body.Close() - require.Equal(t, http.StatusNotFound, resp.StatusCode) + // Delete the set rule groups + require.NoError(t, c.DeleteRuleGroup(namespaceOne, ruleGroup.Name)) + require.NoError(t, c.DeleteRuleNamespace(namespaceTwo)) - // Wait until the users manager has been terminated - require.NoError(t, ruler.WaitSumMetrics(e2e.Equals(0), "cortex_ruler_managers_total")) + // Get the rule group and ensure it returns a 404 + resp, err := c.GetRuleGroup(namespaceOne, ruleGroup.Name) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, http.StatusNotFound, resp.StatusCode) + + // Wait until the users manager has been terminated + require.NoError(t, ruler.WaitSumMetrics(e2e.Equals(0), "cortex_ruler_managers_total")) - // Check to ensure the rule groups are no longer active - _, err = c.GetRuleGroups() - require.Error(t, err) + // Check to ensure the rule groups are no longer active + _, err = c.GetRuleGroups() + require.Error(t, err) - // Ensure no service-specific metrics prefix is used by the wrong service. - assertServiceMetricsPrefixes(t, Ruler, ruler) + // Ensure no service-specific metrics prefix is used by the wrong service. + assertServiceMetricsPrefixes(t, Ruler, ruler) + }) + } } func TestRulerAPISingleBinary(t *testing.T) { diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 189ee6604b1..9470b86042b 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -15,6 +15,9 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" prom_storage "github.com/prometheus/prometheus/storage" + v1 "github.com/prometheus/prometheus/web/api/v1" + "github.com/thanos-community/promql-engine/engine" + "github.com/thanos-community/promql-engine/logicalplan" "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/querysharding" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" @@ -538,7 +541,9 @@ func (t *Cortex) initRuler() (serv services.Service, err error) { if t.Cfg.ExternalPusher != nil && t.Cfg.ExternalQueryable != nil { rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer) - engine := promql.NewEngine(promql.EngineOpts{ + + var queryEngine v1.QueryEngine + opts := promql.EngineOpts{ Logger: util_log.Logger, Reg: rulerRegisterer, ActiveQueryTracker: createActiveQueryTracker(t.Cfg.Querier, util_log.Logger), @@ -551,9 +556,17 @@ func (t *Cortex) initRuler() (serv services.Service, err error) { NoStepSubqueryIntervalFn: func(int64) int64 { return t.Cfg.Querier.DefaultEvaluationInterval.Milliseconds() }, - }) + } + if t.Cfg.Querier.ThanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } - managerFactory := ruler.DefaultTenantManagerFactory(t.Cfg.Ruler, t.Cfg.ExternalPusher, t.Cfg.ExternalQueryable, engine, t.Overrides, prometheus.DefaultRegisterer) + managerFactory := ruler.DefaultTenantManagerFactory(t.Cfg.Ruler, t.Cfg.ExternalPusher, t.Cfg.ExternalQueryable, queryEngine, t.Overrides, prometheus.DefaultRegisterer) manager, err = ruler.NewDefaultMultiTenantManager(t.Cfg.Ruler, managerFactory, prometheus.DefaultRegisterer, util_log.Logger) } else { rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 67f258baed6..664ba36ca0f 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -22,9 +22,12 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/thanos-community/promql-engine/engine" + "github.com/thanos-community/promql-engine/logicalplan" "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" @@ -1319,6 +1322,13 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) } func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { + logger := log.NewNopLogger() + opts := promql.EngineOpts{ + Logger: logger, + Timeout: 10 * time.Second, + MaxSamples: 1e6, + } + block1 := ulid.MustNew(1, nil) block2 := ulid.MustNew(2, nil) series1 := []labelpb.ZLabel{{Name: "__name__", Value: "metric_1"}} @@ -1341,103 +1351,109 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { {T: 1589760015000, V: 2}, {T: 1589760030000, V: 2}, } + for _, thanosEngine := range []bool{false, true} { + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } - // Mock the finder to simulate we need to query two blocks. - finder := &blocksFinderMock{ - Service: services.NewIdleService(nil, nil), - } - finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ - {ID: block1}, - {ID: block2}, - }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) - - // Mock the store to simulate each block is queried from a different store-gateway. - gateway1 := &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series1, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series1Samples[:3]...), // First half. + t.Run(fmt.Sprintf("thanos engine enabled=%t", thanosEngine), func(t *testing.T) { + // Mock the finder to simulate we need to query two blocks. + finder := &blocksFinderMock{ + Service: services.NewIdleService(nil, nil), + } + finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + {ID: block1}, + {ID: block2}, + }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + + // Mock the store to simulate each block is queried from a different store-gateway. + gateway1 := &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series1, + Chunks: []storepb.AggrChunk{ + createAggrChunkWithSamples(series1Samples[:3]...), // First half. + }, + }, }, - }, - }, - }, { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series2, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series2Samples[:3]...), + }, { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series2, + Chunks: []storepb.AggrChunk{ + createAggrChunkWithSamples(series2Samples[:3]...), + }, + }, }, }, - }, - }, - mockHintsResponse(block1), - }} - - gateway2 := &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series1, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series1Samples[3:]...), // Second half. + mockHintsResponse(block1), + }} + + gateway2 := &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series1, + Chunks: []storepb.AggrChunk{ + createAggrChunkWithSamples(series1Samples[3:]...), // Second half. + }, + }, + }, + }, { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series2, + Chunks: []storepb.AggrChunk{ + createAggrChunkWithSamples(series2Samples[3:]...), + }, + }, }, }, - }, - }, { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series2, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series2Samples[3:]...), + mockHintsResponse(block2), + }} + + stores := &blocksStoreSetMock{ + Service: services.NewIdleService(nil, nil), + mockedResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + gateway1: {block1}, + gateway2: {block2}, }, }, - }, - }, - mockHintsResponse(block2), - }} + } - stores := &blocksStoreSetMock{ - Service: services.NewIdleService(nil, nil), - mockedResponses: []interface{}{ - map[BlocksStoreClient][]ulid.ULID{ - gateway1: {block1}, - gateway2: {block2}, - }, - }, + // Instance the querier that will be executed to run the query. + queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, logger, nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), queryable)) + defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck + + // Run a query. + q, err := queryEngine.NewRangeQuery(queryable, nil, `{__name__=~"metric.*"}`, time.Unix(1589759955, 0), time.Unix(1589760030, 0), 15*time.Second) + require.NoError(t, err) + + ctx := user.InjectOrgID(context.Background(), "user-1") + res := q.Exec(ctx) + require.NoError(t, err) + require.NoError(t, res.Err) + + matrix, err := res.Matrix() + require.NoError(t, err) + require.Len(t, matrix, 2) + + assert.Equal(t, labelpb.ZLabelsToPromLabels(series1), matrix[0].Metric) + assert.Equal(t, labelpb.ZLabelsToPromLabels(series2), matrix[1].Metric) + assert.Equal(t, series1Samples, matrix[0].Points) + assert.Equal(t, series2Samples, matrix[1].Points) + }) } - - // Instance the querier that will be executed to run the query. - logger := log.NewNopLogger() - queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, logger, nil) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), queryable)) - defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck - - engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, - Timeout: 10 * time.Second, - MaxSamples: 1e6, - }) - - // Run a query. - q, err := engine.NewRangeQuery(queryable, nil, `{__name__=~"metric.*"}`, time.Unix(1589759955, 0), time.Unix(1589760030, 0), 15*time.Second) - require.NoError(t, err) - - ctx := user.InjectOrgID(context.Background(), "user-1") - res := q.Exec(ctx) - require.NoError(t, err) - require.NoError(t, res.Err) - - matrix, err := res.Matrix() - require.NoError(t, err) - require.Len(t, matrix, 2) - - assert.Equal(t, labelpb.ZLabelsToPromLabels(series1), matrix[0].Metric) - assert.Equal(t, labelpb.ZLabelsToPromLabels(series2), matrix[1].Metric) - assert.Equal(t, series1Samples, matrix[0].Points) - assert.Equal(t, series2Samples, matrix[1].Points) } type blocksStoreSetMock struct { diff --git a/pkg/querier/chunk_store_queryable_test.go b/pkg/querier/chunk_store_queryable_test.go index 3dea589e419..34d89bf30ca 100644 --- a/pkg/querier/chunk_store_queryable_test.go +++ b/pkg/querier/chunk_store_queryable_test.go @@ -7,9 +7,14 @@ import ( "testing" "time" + "github.com/go-kit/log" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/require" + "github.com/thanos-community/promql-engine/engine" + "github.com/thanos-community/promql-engine/logicalplan" "github.com/cortexproject/cortex/pkg/chunk" promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" @@ -19,14 +24,31 @@ import ( var _ SeriesWithChunks = &chunkSeries{} func TestChunkQueryable(t *testing.T) { - for _, testcase := range testcases { - for _, encoding := range encodings { - for _, query := range queries { - t.Run(fmt.Sprintf("%s/%s/%s", testcase.name, encoding.name, query.query), func(t *testing.T) { - store, from := makeMockChunkStore(t, 24, encoding.e) - queryable := newMockStoreQueryable(store, testcase.f) - testRangeQuery(t, queryable, from, query) - }) + opts := promql.EngineOpts{ + Logger: log.NewNopLogger(), + ActiveQueryTracker: promql.NewActiveQueryTracker(t.TempDir(), 10, log.NewNopLogger()), + MaxSamples: 1e6, + Timeout: 1 * time.Minute, + } + for _, thanosEngine := range []bool{false, true} { + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + for _, testcase := range testcases { + for _, encoding := range encodings { + for _, query := range queries { + t.Run(fmt.Sprintf("%s/%s/%s/ thanos engine enabled = %t", testcase.name, encoding.name, query.query, thanosEngine), func(t *testing.T) { + store, from := makeMockChunkStore(t, 24, encoding.e) + queryable := newMockStoreQueryable(store, testcase.f) + testRangeQuery(t, queryable, queryEngine, from, query) + }) + } } } } diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index 3f6b0846b8a..5c1682c6e2c 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -19,6 +19,8 @@ import ( "github.com/prometheus/prometheus/storage" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/require" + "github.com/thanos-community/promql-engine/engine" + "github.com/thanos-community/promql-engine/logicalplan" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" @@ -100,36 +102,46 @@ func TestApiStatusCodes(t *testing.T) { expectedCode: 422, }, } { - for k, q := range map[string]storage.SampleAndChunkQueryable{ - "error from queryable": errorTestQueryable{err: tc.err}, - "error from querier": errorTestQueryable{q: errorTestQuerier{err: tc.err}}, - "error from seriesset": errorTestQueryable{q: errorTestQuerier{s: errorTestSeriesSet{err: tc.err}}}, - } { - t.Run(fmt.Sprintf("%s/%d", k, ix), func(t *testing.T) { - r := createPrometheusAPI(NewErrorTranslateSampleAndChunkQueryable(q)) - rec := httptest.NewRecorder() - - req := httptest.NewRequest("GET", "/api/v1/query?query=up", nil) - req = req.WithContext(user.InjectOrgID(context.Background(), "test org")) - - r.ServeHTTP(rec, req) - - require.Equal(t, tc.expectedCode, rec.Code) - require.Contains(t, rec.Body.String(), tc.expectedString) - }) + for _, thanosEngine := range []bool{false, true} { + for k, q := range map[string]storage.SampleAndChunkQueryable{ + "error from queryable": errorTestQueryable{err: tc.err}, + "error from querier": errorTestQueryable{q: errorTestQuerier{err: tc.err}}, + "error from seriesset": errorTestQueryable{q: errorTestQuerier{s: errorTestSeriesSet{err: tc.err}}}, + } { + t.Run(fmt.Sprintf("%s/%d", k, ix), func(t *testing.T) { + opts := promql.EngineOpts{ + Logger: log.NewNopLogger(), + Reg: nil, + ActiveQueryTracker: nil, + MaxSamples: 100, + Timeout: 5 * time.Second, + } + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + r := createPrometheusAPI(NewErrorTranslateSampleAndChunkQueryable(q), queryEngine) + rec := httptest.NewRecorder() + + req := httptest.NewRequest("GET", "/api/v1/query?query=up", nil) + req = req.WithContext(user.InjectOrgID(context.Background(), "test org")) + + r.ServeHTTP(rec, req) + + require.Equal(t, tc.expectedCode, rec.Code) + require.Contains(t, rec.Body.String(), tc.expectedString) + }) + } } } } -func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { - engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), - Reg: nil, - ActiveQueryTracker: nil, - MaxSamples: 100, - Timeout: 5 * time.Second, - }) - +func createPrometheusAPI(q storage.SampleAndChunkQueryable, engine v1.QueryEngine) *route.Router { api := v1.NewAPI( engine, q, diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 3e4ceb76e3e..c3ae96d3219 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -17,6 +17,8 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" v1 "github.com/prometheus/prometheus/web/api/v1" + "github.com/thanos-community/promql-engine/engine" + "github.com/thanos-community/promql-engine/logicalplan" "github.com/thanos-io/thanos/pkg/strutil" "golang.org/x/sync/errgroup" @@ -74,6 +76,10 @@ type Config struct { StoreGatewayClient ClientConfig `yaml:"store_gateway_client"` ShuffleShardingIngestersLookbackPeriod time.Duration `yaml:"shuffle_sharding_ingesters_lookback_period"` + + // Experimental. Use https://github.com/thanos-community/promql-engine rather than + // the Prometheus query engine. + ThanosEngine bool `yaml:"thanos_engine"` } var ( @@ -105,6 +111,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.StoreGatewayAddresses, "querier.store-gateway-addresses", "", "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).") f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", 5*time.Minute, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") f.DurationVar(&cfg.ShuffleShardingIngestersLookbackPeriod, "querier.shuffle-sharding-ingesters-lookback-period", 0, "When distributor's sharding strategy is shuffle-sharding and this setting is > 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured 'query store after' and 'query ingesters within'. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).") + f.BoolVar(&cfg.ThanosEngine, "querier.thanos-engine", false, "Experimental. Use Thanos promql engine https://github.com/thanos-community/promql-engine rather than the Prometheus promql engine.") } // Validate the config @@ -166,7 +173,8 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor return lazyquery.NewLazyQuerier(querier), nil }) - engine := promql.NewEngine(promql.EngineOpts{ + var queryEngine v1.QueryEngine + opts := promql.EngineOpts{ Logger: logger, Reg: reg, ActiveQueryTracker: createActiveQueryTracker(cfg, logger), @@ -179,8 +187,16 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor NoStepSubqueryIntervalFn: func(int64) int64 { return cfg.DefaultEvaluationInterval.Milliseconds() }, - }) - return NewSampleAndChunkQueryable(lazyQueryable), exemplarQueryable, engine + } + if cfg.ThanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + return NewSampleAndChunkQueryable(lazyQueryable), exemplarQueryable, queryEngine } // NewSampleAndChunkQueryable creates a SampleAndChunkQueryable from a diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 24f57950547..8ed80d03a6c 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strconv" + "strings" "sync" "testing" "time" @@ -16,9 +17,12 @@ import ( "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/thanos-community/promql-engine/engine" + "github.com/thanos-community/promql-engine/logicalplan" "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk" @@ -261,35 +265,46 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { } for _, tc := range tCases { - t.Run(tc.name, func(t *testing.T) { - wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} - var wQueriables []QueryableWithFilter - for _, queriable := range tc.storeQueriables { - wQueriables = append(wQueriables, &wrappedSampleAndChunkQueryable{QueryableWithFilter: queriable}) - } - queryable := NewQueryable(wDistributorQueriable, wQueriables, batch.NewChunkMergeIterator, cfg, overrides, purger.NewNoopTombstonesLoader()) - queryTracker := promql.NewActiveQueryTracker(t.TempDir(), 10, log.NewNopLogger()) - - engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), - ActiveQueryTracker: queryTracker, - MaxSamples: 1e6, - Timeout: 1 * time.Minute, - }) + for _, thanosEngine := range []bool{false, true} { + t.Run(tc.name+fmt.Sprintf(", thanos engine: %s", strconv.FormatBool(thanosEngine)), func(t *testing.T) { + wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} + var wQueriables []QueryableWithFilter + for _, queriable := range tc.storeQueriables { + wQueriables = append(wQueriables, &wrappedSampleAndChunkQueryable{QueryableWithFilter: queriable}) + } + queryable := NewQueryable(wDistributorQueriable, wQueriables, batch.NewChunkMergeIterator, cfg, overrides, purger.NewNoopTombstonesLoader()) + queryTracker := promql.NewActiveQueryTracker(t.TempDir(), 10, log.NewNopLogger()) + + opts := promql.EngineOpts{ + Logger: log.NewNopLogger(), + ActiveQueryTracker: queryTracker, + MaxSamples: 1e6, + Timeout: 1 * time.Minute, + } + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } - query, err := engine.NewRangeQuery(queryable, nil, "foo", start, end, 1*time.Minute) - r := query.Exec(ctx) + query, err := queryEngine.NewRangeQuery(queryable, nil, "foo", start, end, 1*time.Minute) + r := query.Exec(ctx) - require.NoError(t, err) - require.Equal(t, 2, r.Value.(promql.Matrix).Len()) + require.NoError(t, err) + require.Equal(t, 2, r.Value.(promql.Matrix).Len()) - for _, queryable := range append(wQueriables, wDistributorQueriable) { - var wQueryable = queryable.(*wrappedSampleAndChunkQueryable) - if wQueryable.UseQueryable(time.Now(), start.Unix()*1000, end.Unix()*1000) { - require.Equal(t, tc.sorted, wQueryable.queriers[0].selectCallsArgs[0][0]) + for _, queryable := range append(wQueriables, wDistributorQueriable) { + var wQueryable = queryable.(*wrappedSampleAndChunkQueryable) + if wQueryable.UseQueryable(time.Now(), start.Unix()*1000, end.Unix()*1000) { + require.Equal(t, tc.sorted, wQueryable.queriers[0].selectCallsArgs[0][0]) + } } - } - }) + }) + } } } @@ -305,24 +320,41 @@ func TestQuerier(t *testing.T) { } db, _ := mockTSDB(t, []labels.Labels{lset}, model.Time(0), int(chunks*samplesPerChunk), sampleRate, chunkOffset, int(samplesPerChunk)) - for _, query := range queries { - for _, encoding := range encodings { - for _, streaming := range []bool{false, true} { - for _, iterators := range []bool{false, true} { - t.Run(fmt.Sprintf("%s/%s/streaming=%t/iterators=%t", query.query, encoding.name, streaming, iterators), func(t *testing.T) { - cfg.IngesterStreaming = streaming - cfg.Iterators = iterators - - chunkStore, through := makeMockChunkStore(t, chunks, encoding.e) - distributor := mockDistibutorFor(t, chunkStore, through) - - overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) - require.NoError(t, err) - - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore)), UseAlwaysQueryable(db)} - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - testRangeQuery(t, queryable, through, query) - }) + opts := promql.EngineOpts{ + Logger: log.NewNopLogger(), + ActiveQueryTracker: promql.NewActiveQueryTracker(t.TempDir(), 10, log.NewNopLogger()), + MaxSamples: 1e6, + Timeout: 1 * time.Minute, + } + for _, thanosEngine := range []bool{false, true} { + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + for _, query := range queries { + for _, encoding := range encodings { + for _, streaming := range []bool{false, true} { + for _, iterators := range []bool{false, true} { + t.Run(fmt.Sprintf("%s/%s/streaming=%t/iterators=%t", query.query, encoding.name, streaming, iterators), func(t *testing.T) { + cfg.IngesterStreaming = streaming + cfg.Iterators = iterators + + chunkStore, through := makeMockChunkStore(t, chunks, encoding.e) + distributor := mockDistibutorFor(t, chunkStore, through) + + overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) + require.NoError(t, err) + + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore)), UseAlwaysQueryable(db)} + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + testRangeQuery(t, queryable, queryEngine, through, query) + }) + } } } } @@ -414,43 +446,57 @@ func TestNoHistoricalQueryToIngester(t *testing.T) { } queryTracker := promql.NewActiveQueryTracker(t.TempDir(), 10, log.NewNopLogger()) - - engine := promql.NewEngine(promql.EngineOpts{ + opts := promql.EngineOpts{ Logger: log.NewNopLogger(), ActiveQueryTracker: queryTracker, MaxSamples: 1e6, Timeout: 1 * time.Minute, - }) + } cfg := Config{} for _, ingesterStreaming := range []bool{true, false} { - cfg.IngesterStreaming = ingesterStreaming - for _, c := range testCases { - cfg.QueryIngestersWithin = c.queryIngestersWithin - t.Run(fmt.Sprintf("IngesterStreaming=%t,test=%s", cfg.IngesterStreaming, c.name), func(t *testing.T) { - chunkStore, _ := makeMockChunkStore(t, 24, encodings[0].e) - distributor := &errDistributor{} + for _, thanosEngine := range []bool{true, false} { + cfg.IngesterStreaming = ingesterStreaming + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + for _, c := range testCases { + cfg.QueryIngestersWithin = c.queryIngestersWithin + t.Run(fmt.Sprintf("IngesterStreaming=%t,thanosEngine=%t,test=%s", cfg.IngesterStreaming, thanosEngine, c.name), func(t *testing.T) { + chunkStore, _ := makeMockChunkStore(t, 24, encodings[0].e) + distributor := &errDistributor{} - overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) - require.NoError(t, err) + overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) + require.NoError(t, err) - queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))}, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - query, err := engine.NewRangeQuery(queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) - require.NoError(t, err) + queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))}, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + query, err := queryEngine.NewRangeQuery(queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) + require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "0") - r := query.Exec(ctx) - _, err = r.Matrix() + ctx := user.InjectOrgID(context.Background(), "0") + r := query.Exec(ctx) + _, err = r.Matrix() - if c.hitIngester { - // If the ingester was hit, the distributor always returns errDistributorError. Prometheus - // wrap any Select() error into "expanding series", so we do wrap it as well to have a match. - require.Error(t, err) - require.Equal(t, errors.Wrap(errDistributorError, "expanding series").Error(), err.Error()) - } else { - // If the ingester was hit, there would have been an error from errDistributor. - require.NoError(t, err) - } - }) + if c.hitIngester { + // If the ingester was hit, the distributor always returns errDistributorError. Prometheus + // wrap any Select() error into "expanding series", so we do wrap it as well to have a match. + require.Error(t, err) + if !thanosEngine { + require.Equal(t, errors.Wrap(errDistributorError, "expanding series").Error(), err.Error()) + } else { + require.Equal(t, errDistributorError.Error(), err.Error()) + } + } else { + // If the ingester was hit, there would have been an error from errDistributor. + require.NoError(t, err) + } + }) + } } } } @@ -497,53 +543,64 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryIntoFuture(t *testing.T) { }, } - engine := promql.NewEngine(promql.EngineOpts{ + opts := promql.EngineOpts{ Logger: log.NewNopLogger(), MaxSamples: 1e6, Timeout: 1 * time.Minute, LookbackDelta: engineLookbackDelta, - }) + } cfg := Config{} flagext.DefaultValues(&cfg) for _, ingesterStreaming := range []bool{true, false} { cfg.IngesterStreaming = ingesterStreaming - for name, c := range tests { - cfg.MaxQueryIntoFuture = c.maxQueryIntoFuture - t.Run(fmt.Sprintf("%s (ingester streaming enabled = %t)", name, cfg.IngesterStreaming), func(t *testing.T) { - // We don't need to query any data for this test, so an empty store is fine. - chunkStore := &emptyChunkStore{} - distributor := &MockDistributor{} - distributor.On("Query", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(model.Matrix{}, nil) - distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, nil) + for _, thanosEngine := range []bool{true, false} { + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + for name, c := range tests { + cfg.MaxQueryIntoFuture = c.maxQueryIntoFuture + t.Run(fmt.Sprintf("%s (ingester streaming enabled = %t, thanos engine enabled = %t)", name, cfg.IngesterStreaming, thanosEngine), func(t *testing.T) { + // We don't need to query any data for this test, so an empty store is fine. + chunkStore := &emptyChunkStore{} + distributor := &MockDistributor{} + distributor.On("Query", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(model.Matrix{}, nil) + distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, nil) - overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) - require.NoError(t, err) + overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) + require.NoError(t, err) - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - query, err := engine.NewRangeQuery(queryable, nil, "dummy", c.queryStartTime, c.queryEndTime, time.Minute) - require.NoError(t, err) + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + query, err := queryEngine.NewRangeQuery(queryable, nil, "dummy", c.queryStartTime, c.queryEndTime, time.Minute) + require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "0") - r := query.Exec(ctx) - require.Nil(t, r.Err) + ctx := user.InjectOrgID(context.Background(), "0") + r := query.Exec(ctx) + require.Nil(t, r.Err) - _, err = r.Matrix() - require.Nil(t, err) - - if !c.expectedSkipped { - // Assert on the time range of the actual executed query (5s delta). - delta := float64(5000) - require.Len(t, distributor.Calls, 1) - assert.InDelta(t, util.TimeToMillis(c.expectedStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) - assert.InDelta(t, util.TimeToMillis(c.expectedEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) - } else { - // Ensure no query has been executed executed (because skipped). - assert.Len(t, distributor.Calls, 0) - } - }) + _, err = r.Matrix() + require.Nil(t, err) + + if !c.expectedSkipped { + // Assert on the time range of the actual executed query (5s delta). + delta := float64(5000) + require.Len(t, distributor.Calls, 1) + assert.InDelta(t, util.TimeToMillis(c.expectedStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) + assert.InDelta(t, util.TimeToMillis(c.expectedEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) + } else { + // Ensure no query has been executed (because skipped). + assert.Len(t, distributor.Calls, 0) + } + }) + } } } } @@ -583,44 +640,53 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLength(t *testing.T) { }, } + opts := promql.EngineOpts{ + Logger: log.NewNopLogger(), + ActiveQueryTracker: nil, + MaxSamples: 1e6, + Timeout: 1 * time.Minute, + } for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - var cfg Config - flagext.DefaultValues(&cfg) - - limits := DefaultLimitsConfig() - limits.MaxQueryLength = model.Duration(maxQueryLength) - overrides, err := validation.NewOverrides(limits, nil) - require.NoError(t, err) + for _, thanosEngine := range []bool{true, false} { + t.Run(fmt.Sprintf("%s, (thanos engine enabled = %t)", testName, thanosEngine), func(t *testing.T) { + var cfg Config + flagext.DefaultValues(&cfg) - // We don't need to query any data for this test, so an empty store is fine. - chunkStore := &emptyChunkStore{} - distributor := &emptyDistributor{} + limits := DefaultLimitsConfig() + limits.MaxQueryLength = model.Duration(maxQueryLength) + overrides, err := validation.NewOverrides(limits, nil) + require.NoError(t, err) - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + // We don't need to query any data for this test, so an empty store is fine. + chunkStore := &emptyChunkStore{} + distributor := &emptyDistributor{} - // Create the PromQL engine to execute the query. - engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), - ActiveQueryTracker: nil, - MaxSamples: 1e6, - Timeout: 1 * time.Minute, - }) + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - query, err := engine.NewRangeQuery(queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) - require.NoError(t, err) + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + query, err := queryEngine.NewRangeQuery(queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) + require.NoError(t, err) - ctx := user.InjectOrgID(context.Background(), "test") - r := query.Exec(ctx) + ctx := user.InjectOrgID(context.Background(), "test") + r := query.Exec(ctx) - if testData.expected != nil { - require.NotNil(t, r.Err) - assert.Equal(t, testData.expected.Error(), r.Err.Error()) - } else { - assert.Nil(t, r.Err) - } - }) + if testData.expected != nil { + require.NotNil(t, r.Err) + assert.True(t, strings.Contains(testData.expected.Error(), r.Err.Error())) + } else { + assert.Nil(t, r.Err) + } + }) + } } } @@ -705,14 +771,13 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { }, } - // Create the PromQL engine to execute the queries. - engine := promql.NewEngine(promql.EngineOpts{ + opts := promql.EngineOpts{ Logger: log.NewNopLogger(), ActiveQueryTracker: nil, MaxSamples: 1e6, LookbackDelta: engineLookbackDelta, Timeout: 1 * time.Minute, - }) + } for _, ingesterStreaming := range []bool{true, false} { expectedMethodForLabelMatchers := "MetricsForLabelMatchers" @@ -723,178 +788,188 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { expectedMethodForLabelNames = "LabelNamesStream" expectedMethodForLabelValues = "LabelValuesForLabelNameStream" } - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - ctx := user.InjectOrgID(context.Background(), "test") - - var cfg Config - flagext.DefaultValues(&cfg) - cfg.IngesterStreaming = ingesterStreaming - cfg.IngesterMetadataStreaming = ingesterStreaming - - limits := DefaultLimitsConfig() - limits.MaxQueryLookback = testData.maxQueryLookback - overrides, err := validation.NewOverrides(limits, nil) - require.NoError(t, err) - - // We don't need to query any data for this test, so an empty store is fine. - chunkStore := &emptyChunkStore{} - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} - - t.Run("query range", func(t *testing.T) { - if testData.query == "" { - return - } - distributor := &MockDistributor{} - distributor.On("Query", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(model.Matrix{}, nil) - distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, nil) - - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + for _, thanosEngine := range []bool{true, false} { + t.Run(fmt.Sprintf("%s, thanos engine enabled = %t", testName, thanosEngine), func(t *testing.T) { + ctx := user.InjectOrgID(context.Background(), "test") + + var cfg Config + flagext.DefaultValues(&cfg) + cfg.IngesterStreaming = ingesterStreaming + cfg.IngesterMetadataStreaming = ingesterStreaming + + limits := DefaultLimitsConfig() + limits.MaxQueryLookback = testData.maxQueryLookback + overrides, err := validation.NewOverrides(limits, nil) require.NoError(t, err) - query, err := engine.NewRangeQuery(queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) - require.NoError(t, err) + // We don't need to query any data for this test, so an empty store is fine. + chunkStore := &emptyChunkStore{} + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} - r := query.Exec(ctx) - require.Nil(t, r.Err) + t.Run("query range", func(t *testing.T) { + if testData.query == "" { + return + } + distributor := &MockDistributor{} + distributor.On("Query", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(model.Matrix{}, nil) + distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, nil) - _, err = r.Matrix() - require.Nil(t, err) + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + require.NoError(t, err) - if !testData.expectedSkipped { - // Assert on the time range of the actual executed query (5s delta). - delta := float64(5000) - require.Len(t, distributor.Calls, 1) - assert.InDelta(t, util.TimeToMillis(testData.expectedQueryStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) - assert.InDelta(t, util.TimeToMillis(testData.expectedQueryEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) - } else { - // Ensure no query has been executed (because skipped). - assert.Len(t, distributor.Calls, 0) - } - }) + var queryEngine v1.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + query, err := queryEngine.NewRangeQuery(queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) + require.NoError(t, err) - t.Run("series", func(t *testing.T) { - distributor := &MockDistributor{} - distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]metric.Metric{}, nil) - distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]metric.Metric{}, nil) + r := query.Exec(ctx) + require.Nil(t, r.Err) + + _, err = r.Matrix() + require.Nil(t, err) + + if !testData.expectedSkipped { + // Assert on the time range of the actual executed query (5s delta). + delta := float64(5000) + require.Len(t, distributor.Calls, 1) + assert.InDelta(t, util.TimeToMillis(testData.expectedQueryStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) + assert.InDelta(t, util.TimeToMillis(testData.expectedQueryEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) + } else { + // Ensure no query has been executed (because skipped). + assert.Len(t, distributor.Calls, 0) + } + }) - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) - require.NoError(t, err) + t.Run("series", func(t *testing.T) { + distributor := &MockDistributor{} + distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]metric.Metric{}, nil) + distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]metric.Metric{}, nil) - // We apply the validation here again since when initializing querier we change the start/end time, - // but when querying series we don't validate again. So we should pass correct hints here. - start, end, err := validateQueryTimeRange(ctx, "test", util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime), overrides, 0) - // Skipped query will hit errEmptyTimeRange during validation. - if !testData.expectedSkipped { + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) - } - - hints := &storage.SelectHints{ - Start: start, - End: end, - Func: "series", - } - matcher := labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test") - - set := q.Select(false, hints, matcher) - require.False(t, set.Next()) // Expected to be empty. - require.NoError(t, set.Err()) - if !testData.expectedSkipped { - // Assert on the time range of the actual executed query (5s delta). - delta := float64(5000) - require.Len(t, distributor.Calls, 1) - assert.Equal(t, expectedMethodForLabelMatchers, distributor.Calls[0].Method) - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) - } else { - // Ensure no query has been executed (because skipped). - assert.Len(t, distributor.Calls, 0) - } - }) + // We apply the validation here again since when initializing querier we change the start/end time, + // but when querying series we don't validate again. So we should pass correct hints here. + start, end, err := validateQueryTimeRange(ctx, "test", util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime), overrides, 0) + // Skipped query will hit errEmptyTimeRange during validation. + if !testData.expectedSkipped { + require.NoError(t, err) + } + + hints := &storage.SelectHints{ + Start: start, + End: end, + Func: "series", + } + matcher := labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test") + + set := q.Select(false, hints, matcher) + require.False(t, set.Next()) // Expected to be empty. + require.NoError(t, set.Err()) + + if !testData.expectedSkipped { + // Assert on the time range of the actual executed query (5s delta). + delta := float64(5000) + require.Len(t, distributor.Calls, 1) + assert.Equal(t, expectedMethodForLabelMatchers, distributor.Calls[0].Method) + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) + } else { + // Ensure no query has been executed (because skipped). + assert.Len(t, distributor.Calls, 0) + } + }) - t.Run("label names", func(t *testing.T) { - distributor := &MockDistributor{} - distributor.On("LabelNames", mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) - distributor.On("LabelNamesStream", mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + t.Run("label names", func(t *testing.T) { + distributor := &MockDistributor{} + distributor.On("LabelNames", mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + distributor.On("LabelNamesStream", mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) - require.NoError(t, err) + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + require.NoError(t, err) - _, _, err = q.LabelNames() - require.NoError(t, err) + _, _, err = q.LabelNames() + require.NoError(t, err) - if !testData.expectedSkipped { - // Assert on the time range of the actual executed query (5s delta). - delta := float64(5000) - require.Len(t, distributor.Calls, 1) - assert.Equal(t, expectedMethodForLabelNames, distributor.Calls[0].Method) - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) - } else { - // Ensure no query has been executed (because skipped). - assert.Len(t, distributor.Calls, 0) - } - }) + if !testData.expectedSkipped { + // Assert on the time range of the actual executed query (5s delta). + delta := float64(5000) + require.Len(t, distributor.Calls, 1) + assert.Equal(t, expectedMethodForLabelNames, distributor.Calls[0].Method) + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) + } else { + // Ensure no query has been executed (because skipped). + assert.Len(t, distributor.Calls, 0) + } + }) - t.Run("label names with matchers", func(t *testing.T) { - matchers := []*labels.Matcher{ - labels.MustNewMatcher(labels.MatchNotEqual, "route", "get_user"), - } - distributor := &MockDistributor{} - distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]metric.Metric{}, nil) - distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]metric.Metric{}, nil) + t.Run("label names with matchers", func(t *testing.T) { + matchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchNotEqual, "route", "get_user"), + } + distributor := &MockDistributor{} + distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]metric.Metric{}, nil) + distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]metric.Metric{}, nil) - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) - require.NoError(t, err) + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + require.NoError(t, err) - _, _, err = q.LabelNames(matchers...) - require.NoError(t, err) + _, _, err = q.LabelNames(matchers...) + require.NoError(t, err) - if !testData.expectedSkipped { - // Assert on the time range of the actual executed query (5s delta). - delta := float64(5000) - require.Len(t, distributor.Calls, 1) - assert.Equal(t, expectedMethodForLabelMatchers, distributor.Calls[0].Method) - args := distributor.Calls[0].Arguments - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(args.Get(1).(model.Time)), delta) - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(args.Get(2).(model.Time)), delta) - assert.Equal(t, matchers, args.Get(3).([]*labels.Matcher)) - } else { - // Ensure no query has been executed executed (because skipped). - assert.Len(t, distributor.Calls, 0) - } - }) + if !testData.expectedSkipped { + // Assert on the time range of the actual executed query (5s delta). + delta := float64(5000) + require.Len(t, distributor.Calls, 1) + assert.Equal(t, expectedMethodForLabelMatchers, distributor.Calls[0].Method) + args := distributor.Calls[0].Arguments + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(args.Get(1).(model.Time)), delta) + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(args.Get(2).(model.Time)), delta) + assert.Equal(t, matchers, args.Get(3).([]*labels.Matcher)) + } else { + // Ensure no query has been executed (because skipped). + assert.Len(t, distributor.Calls, 0) + } + }) - t.Run("label values", func(t *testing.T) { - distributor := &MockDistributor{} - distributor.On("LabelValuesForLabelName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) - distributor.On("LabelValuesForLabelNameStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + t.Run("label values", func(t *testing.T) { + distributor := &MockDistributor{} + distributor.On("LabelValuesForLabelName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) + distributor.On("LabelValuesForLabelNameStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) - queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) - require.NoError(t, err) + queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) + q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + require.NoError(t, err) - _, _, err = q.LabelValues(labels.MetricName) - require.NoError(t, err) + _, _, err = q.LabelValues(labels.MetricName) + require.NoError(t, err) - if !testData.expectedSkipped { - // Assert on the time range of the actual executed query (5s delta). - delta := float64(5000) - require.Len(t, distributor.Calls, 1) - assert.Equal(t, expectedMethodForLabelValues, distributor.Calls[0].Method) - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) - assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) - } else { - // Ensure no query has been executed executed (because skipped). - assert.Len(t, distributor.Calls, 0) - } + if !testData.expectedSkipped { + // Assert on the time range of the actual executed query (5s delta). + delta := float64(5000) + require.Len(t, distributor.Calls, 1) + assert.Equal(t, expectedMethodForLabelValues, distributor.Calls[0].Method) + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataStartTime), int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), delta) + assert.InDelta(t, util.TimeToMillis(testData.expectedMetadataEndTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) + } else { + // Ensure no query has been executed executed (because skipped). + assert.Len(t, distributor.Calls, 0) + } + }) }) - }) + } } } } @@ -974,17 +1049,9 @@ func mockDistibutorFor(t *testing.T, cs mockChunkStore, through model.Time) *Moc return result } -func testRangeQuery(t testing.TB, queryable storage.Queryable, end model.Time, q query) *promql.Result { - queryTracker := promql.NewActiveQueryTracker(t.TempDir(), 10, log.NewNopLogger()) - +func testRangeQuery(t testing.TB, queryable storage.Queryable, queryEngine v1.QueryEngine, end model.Time, q query) *promql.Result { from, through, step := time.Unix(0, 0), end.Time(), q.step - engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), - ActiveQueryTracker: queryTracker, - MaxSamples: 1e6, - Timeout: 1 * time.Minute, - }) - query, err := engine.NewRangeQuery(queryable, nil, q.query, from, through, step) + query, err := queryEngine.NewRangeQuery(queryable, nil, q.query, from, through, step) require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), "0") diff --git a/vendor/github.com/thanos-community/promql-engine/COPYRIGHT b/vendor/github.com/thanos-community/promql-engine/COPYRIGHT new file mode 100644 index 00000000000..8bd4235c369 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The Thanos Community Authors. +Licensed under the Apache License 2.0. \ No newline at end of file diff --git a/vendor/github.com/thanos-community/promql-engine/LICENSE b/vendor/github.com/thanos-community/promql-engine/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/thanos-community/promql-engine/api/remote.go b/vendor/github.com/thanos-community/promql-engine/api/remote.go new file mode 100644 index 00000000000..2f8c9f2c146 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/api/remote.go @@ -0,0 +1,31 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package api + +import ( + "time" + + "github.com/prometheus/prometheus/promql" +) + +type RemoteEndpoints interface { + Engines() []RemoteEngine +} + +type RemoteEngine interface { + NewInstantQuery(opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) + NewRangeQuery(opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) +} + +type staticEndpoints struct { + engines []RemoteEngine +} + +func (m staticEndpoints) Engines() []RemoteEngine { + return m.engines +} + +func NewStaticEndpoints(engines []RemoteEngine) RemoteEndpoints { + return &staticEndpoints{engines: engines} +} diff --git a/vendor/github.com/thanos-community/promql-engine/engine/engine.go b/vendor/github.com/thanos-community/promql-engine/engine/engine.go new file mode 100644 index 00000000000..399c0c55c3b --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/engine/engine.go @@ -0,0 +1,442 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package engine + +import ( + "context" + + v1 "github.com/prometheus/prometheus/web/api/v1" + + "io" + "math" + "runtime" + "sort" + "time" + + "github.com/thanos-community/promql-engine/api" + + "github.com/efficientgo/core/errors" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/stats" + + "github.com/thanos-community/promql-engine/execution" + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/parse" + "github.com/thanos-community/promql-engine/logicalplan" +) + +type QueryType int + +const ( + InstantQuery QueryType = 1 + RangeQuery QueryType = 2 +) + +type Opts struct { + promql.EngineOpts + + // LogicalOptimizers are optimizers that are run if the value is not nil. If it is nil then the default optimizers are run. Default optimizer list is available in the logicalplan package. + LogicalOptimizers []logicalplan.Optimizer + + // DisableFallback enables mode where engine returns error if some expression of feature is not yet implemented + // in the new engine, instead of falling back to prometheus engine. + DisableFallback bool + + // DebugWriter specifies output for debug (multi-line) information meant for humans debugging the engine. + // If nil, nothing will be printed. + // NOTE: Users will not check the errors, debug writing is best effort. + DebugWriter io.Writer +} + +func (o Opts) getLogicalOptimizers() []logicalplan.Optimizer { + if o.LogicalOptimizers == nil { + return logicalplan.DefaultOptimizers + } + + return o.LogicalOptimizers +} + +type localEngine struct { + q storage.Queryable + engine *compatibilityEngine +} + +func NewLocalEngine(opts Opts, q storage.Queryable) *localEngine { + return &localEngine{ + q: q, + engine: New(opts), + } +} + +func (l localEngine) NewInstantQuery(opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { + return l.engine.NewInstantQuery(l.q, opts, qs, ts) +} + +func (l localEngine) NewRangeQuery(opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { + return l.engine.NewRangeQuery(l.q, opts, qs, start, end, interval) +} + +type distributedEngine struct { + endpoints api.RemoteEndpoints + localEngine *compatibilityEngine +} + +func NewDistributedEngine(opts Opts, endpoints api.RemoteEndpoints) v1.QueryEngine { + opts.LogicalOptimizers = append( + opts.LogicalOptimizers, + logicalplan.DistributedExecutionOptimizer{Endpoints: endpoints}, + ) + return &distributedEngine{ + endpoints: endpoints, + localEngine: New(opts), + } +} + +func (l distributedEngine) SetQueryLogger(log promql.QueryLogger) {} + +func (l distributedEngine) NewInstantQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { + return l.localEngine.NewInstantQuery(q, opts, qs, ts) +} + +func (l distributedEngine) NewRangeQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { + return l.localEngine.NewRangeQuery(q, opts, qs, start, end, interval) +} + +func New(opts Opts) *compatibilityEngine { + if opts.Logger == nil { + opts.Logger = log.NewNopLogger() + } + if opts.LookbackDelta == 0 { + opts.LookbackDelta = 5 * time.Minute + level.Debug(opts.Logger).Log("msg", "lookback delta is zero, setting to default value", "value", 5*time.Minute) + } + + return &compatibilityEngine{ + prom: promql.NewEngine(opts.EngineOpts), + queries: promauto.With(opts.Reg).NewCounterVec( + prometheus.CounterOpts{ + Name: "promql_engine_queries_total", + Help: "Number of PromQL queries.", + }, []string{"fallback"}, + ), + debugWriter: opts.DebugWriter, + disableFallback: opts.DisableFallback, + logger: opts.Logger, + lookbackDelta: opts.LookbackDelta, + logicalOptimizers: opts.getLogicalOptimizers(), + } +} + +type compatibilityEngine struct { + prom *promql.Engine + queries *prometheus.CounterVec + + debugWriter io.Writer + + disableFallback bool + logger log.Logger + lookbackDelta time.Duration + logicalOptimizers []logicalplan.Optimizer +} + +func (e *compatibilityEngine) SetQueryLogger(l promql.QueryLogger) { + e.prom.SetQueryLogger(l) +} + +func (e *compatibilityEngine) NewInstantQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { + expr, err := parser.ParseExpr(qs) + if err != nil { + return nil, err + } + + lplan := logicalplan.New(expr, ts, ts) + lplan = lplan.Optimize(e.logicalOptimizers) + + exec, err := execution.New(lplan.Expr(), q, ts, ts, 0, e.lookbackDelta) + if e.triggerFallback(err) { + e.queries.WithLabelValues("true").Inc() + return e.prom.NewInstantQuery(q, opts, qs, ts) + } + e.queries.WithLabelValues("false").Inc() + if err != nil { + return nil, err + } + + if e.debugWriter != nil { + explain(e.debugWriter, exec, "", "") + } + + return &compatibilityQuery{ + Query: &Query{exec: exec}, + engine: e, + expr: expr, + ts: ts, + t: InstantQuery, + }, nil +} + +func (e *compatibilityEngine) NewRangeQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, step time.Duration) (promql.Query, error) { + expr, err := parser.ParseExpr(qs) + if err != nil { + return nil, err + } + + // Use same check as Prometheus for range queries. + if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { + return nil, errors.Newf("invalid expression type %q for range Query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) + } + + lplan := logicalplan.New(expr, start, end) + lplan = lplan.Optimize(e.logicalOptimizers) + + exec, err := execution.New(lplan.Expr(), q, start, end, step, e.lookbackDelta) + if e.triggerFallback(err) { + e.queries.WithLabelValues("true").Inc() + return e.prom.NewRangeQuery(q, opts, qs, start, end, step) + } + e.queries.WithLabelValues("false").Inc() + if err != nil { + return nil, err + } + + if e.debugWriter != nil { + explain(e.debugWriter, exec, "", "") + } + + return &compatibilityQuery{ + Query: &Query{exec: exec}, + engine: e, + expr: expr, + t: RangeQuery, + }, nil +} + +type Query struct { + exec model.VectorOperator +} + +// Explain returns human-readable explanation of the created executor. +func (q *Query) Explain() string { + // TODO(bwplotka): Explain plan and steps. + return "not implemented" +} + +func (q *Query) Profile() { + // TODO(bwplotka): Return profile. +} + +type compatibilityQuery struct { + *Query + engine *compatibilityEngine + expr parser.Expr + ts time.Time // Empty for range queries. + t QueryType + + cancel context.CancelFunc +} + +func (q *compatibilityQuery) Exec(ctx context.Context) (ret *promql.Result) { + // Handle case with strings early on as this does not need us to process samples. + // TODO(saswatamcode): Modify models.StepVector to support all types and check during executor creation. + ret = &promql.Result{ + Value: promql.Vector{}, + } + defer recoverEngine(q.engine.logger, q.expr, &ret.Err) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + q.cancel = cancel + + resultSeries, err := q.Query.exec.Series(ctx) + if err != nil { + return newErrResult(ret, err) + } + + series := make([]promql.Series, len(resultSeries)) + for i := 0; i < len(resultSeries); i++ { + series[i].Metric = resultSeries[i] + } +loop: + for { + select { + case <-ctx.Done(): + return newErrResult(ret, ctx.Err()) + default: + r, err := q.Query.exec.Next(ctx) + if err != nil { + return newErrResult(ret, err) + } + if r == nil { + break loop + } + + // Case where Series call might return nil, but samples are present. + // For example scalar(http_request_total) where http_request_total has multiple values. + if len(resultSeries) == 0 && len(r) != 0 { + numSeries := 0 + for i := range r { + numSeries += len(r[i].Samples) + } + + series = make([]promql.Series, numSeries) + + for _, vector := range r { + for i := range vector.Samples { + series[i].Points = append(series[i].Points, promql.Point{ + T: vector.T, + V: vector.Samples[i], + }) + } + q.Query.exec.GetPool().PutStepVector(vector) + } + q.Query.exec.GetPool().PutVectors(r) + continue + } + + for _, vector := range r { + for i, s := range vector.SampleIDs { + if len(series[s].Points) == 0 { + series[s].Points = make([]promql.Point, 0, 121) // Typically 1h of data. + } + series[s].Points = append(series[s].Points, promql.Point{ + T: vector.T, + V: vector.Samples[i], + }) + } + q.Query.exec.GetPool().PutStepVector(vector) + } + q.Query.exec.GetPool().PutVectors(r) + } + } + + // For range Query we expect always a Matrix value type. + if q.t == RangeQuery { + resultMatrix := make(promql.Matrix, 0, len(series)) + for _, s := range series { + if len(s.Points) == 0 { + continue + } + resultMatrix = append(resultMatrix, s) + } + sort.Sort(resultMatrix) + ret.Value = resultMatrix + return ret + } + + var result parser.Value + switch q.expr.Type() { + case parser.ValueTypeMatrix: + result = promql.Matrix(series) + case parser.ValueTypeVector: + // Convert matrix with one value per series into vector. + vector := make(promql.Vector, 0, len(resultSeries)) + for i := range series { + if len(series[i].Points) == 0 { + continue + } + // Point might have a different timestamp, force it to the evaluation + // timestamp as that is when we ran the evaluation. + vector = append(vector, promql.Sample{ + Metric: series[i].Metric, + Point: promql.Point{ + V: series[i].Points[0].V, + T: q.ts.UnixMilli(), + }, + }) + } + result = vector + case parser.ValueTypeScalar: + v := math.NaN() + if len(series) != 0 { + v = series[0].Points[0].V + } + result = promql.Scalar{V: v, T: q.ts.UnixMilli()} + default: + panic(errors.Newf("new.Engine.exec: unexpected expression type %q", q.expr.Type())) + } + + ret.Value = result + return ret +} + +func newErrResult(r *promql.Result, err error) *promql.Result { + if r == nil { + r = &promql.Result{} + } + if r.Err == nil && err != nil { + r.Err = err + } + return r +} + +func (q *compatibilityQuery) Statement() parser.Statement { return nil } + +func (q *compatibilityQuery) Stats() *stats.Statistics { return &stats.Statistics{} } + +func (q *compatibilityQuery) Close() { q.Cancel() } + +func (q *compatibilityQuery) String() string { return q.expr.String() } + +func (q *compatibilityQuery) Cancel() { + if q.cancel != nil { + q.cancel() + q.cancel = nil + } +} + +func (e *compatibilityEngine) triggerFallback(err error) bool { + if e.disableFallback { + return false + } + + return errors.Is(err, parse.ErrNotSupportedExpr) || errors.Is(err, parse.ErrNotImplemented) +} + +func recoverEngine(logger log.Logger, expr parser.Expr, errp *error) { + e := recover() + if e == nil { + return + } + + switch err := e.(type) { + case runtime.Error: + // Print the stack trace but do not inhibit the running application. + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + level.Error(logger).Log("msg", "runtime panic in engine", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + *errp = errors.Wrap(err, "unexpected error") + } +} + +func explain(w io.Writer, o model.VectorOperator, indent, indentNext string) { + me, next := o.Explain() + _, _ = w.Write([]byte(indent)) + _, _ = w.Write([]byte(me)) + if len(next) == 0 { + _, _ = w.Write([]byte("\n")) + return + } + + if me == "[*CancellableOperator]" { + _, _ = w.Write([]byte(": ")) + explain(w, next[0], "", indentNext) + return + } + _, _ = w.Write([]byte(":\n")) + + for i, n := range next { + if i == len(next)-1 { + explain(w, n, indentNext+"└──", indentNext+" ") + } else { + explain(w, n, indentNext+"├──", indentNext+"│ ") + } + } +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/aggregate/hashaggregate.go b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/hashaggregate.go new file mode 100644 index 00000000000..8803ff37bf6 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/hashaggregate.go @@ -0,0 +1,232 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package aggregate + +import ( + "context" + "fmt" + "math" + "sync" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "golang.org/x/exp/slices" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/parse" + "github.com/thanos-community/promql-engine/worker" +) + +type aggregate struct { + next model.VectorOperator + paramOp model.VectorOperator + // params holds the aggregate parameter for each step. + params []float64 + + vectorPool *model.VectorPool + + by bool + labels []string + aggregation parser.ItemType + + once sync.Once + tables []aggregateTable + series []labels.Labels + newAccumulator newAccumulatorFunc + stepsBatch int + workers worker.Group +} + +func NewHashAggregate( + points *model.VectorPool, + next model.VectorOperator, + paramOp model.VectorOperator, + aggregation parser.ItemType, + by bool, + labels []string, + stepsBatch int, +) (model.VectorOperator, error) { + newAccumulator, err := makeAccumulatorFunc(aggregation) + if err != nil { + return nil, err + } + + // Grouping labels need to be sorted in order for metric hashing to work. + // https://github.com/prometheus/prometheus/blob/8ed39fdab1ead382a354e45ded999eb3610f8d5f/model/labels/labels.go#L162-L181 + slices.Sort(labels) + a := &aggregate{ + next: next, + paramOp: paramOp, + params: make([]float64, stepsBatch), + vectorPool: points, + by: by, + aggregation: aggregation, + labels: labels, + stepsBatch: stepsBatch, + newAccumulator: newAccumulator, + } + a.workers = worker.NewGroup(stepsBatch, a.workerTask) + + return a, nil +} + +func (a *aggregate) Explain() (me string, next []model.VectorOperator) { + var ops []model.VectorOperator + + if a.paramOp != nil { + ops = append(ops, a.paramOp) + } + ops = append(ops, a.next) + + if a.by { + return fmt.Sprintf("[*aggregate] %v by (%v)", a.aggregation.String(), a.labels), ops + } + return fmt.Sprintf("[*aggregate] %v without (%v)", a.aggregation.String(), a.labels), ops +} + +func (a *aggregate) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + a.once.Do(func() { err = a.initializeTables(ctx) }) + if err != nil { + return nil, err + } + + return a.series, nil +} + +func (a *aggregate) GetPool() *model.VectorPool { + return a.vectorPool +} + +func (a *aggregate) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + in, err := a.next.Next(ctx) + if err != nil { + return nil, err + } + if in == nil { + return nil, nil + } + defer a.next.GetPool().PutVectors(in) + + a.once.Do(func() { err = a.initializeTables(ctx) }) + if err != nil { + return nil, err + } + + if a.paramOp != nil { + args, err := a.paramOp.Next(ctx) + if err != nil { + return nil, err + } + for i := range a.params { + a.params[i] = math.NaN() + if i < len(args) { + a.params[i] = args[i].Samples[0] + a.paramOp.GetPool().PutStepVector(args[i]) + } + } + a.paramOp.GetPool().PutVectors(args) + } + + result := a.vectorPool.GetVectorBatch() + for i, vector := range in { + if err = a.workers[i].Send(a.params[i], vector); err != nil { + return nil, err + } + } + + for i, vector := range in { + output, err := a.workers[i].GetOutput() + if err != nil { + return nil, err + } + result = append(result, output) + a.next.GetPool().PutStepVector(vector) + } + + return result, nil +} + +func (a *aggregate) initializeTables(ctx context.Context) error { + var ( + tables []aggregateTable + series []labels.Labels + err error + ) + + if a.by && len(a.labels) == 0 { + tables, series, err = a.initializeVectorizedTables(ctx) + } else { + tables, series, err = a.initializeScalarTables(ctx) + } + if err != nil { + return err + } + a.tables = tables + a.series = series + a.workers.Start(ctx) + + return nil +} + +func (a *aggregate) workerTask(workerID int, arg float64, vector model.StepVector) model.StepVector { + table := a.tables[workerID] + table.aggregate(arg, vector) + return table.toVector(a.vectorPool) +} + +func (a *aggregate) initializeVectorizedTables(ctx context.Context) ([]aggregateTable, []labels.Labels, error) { + tables, err := newVectorizedTables(a.stepsBatch, a.aggregation) + if errors.Is(err, parse.ErrNotSupportedExpr) { + return a.initializeScalarTables(ctx) + } + + if err != nil { + return nil, nil, err + } + + return tables, []labels.Labels{{}}, nil +} + +func (a *aggregate) initializeScalarTables(ctx context.Context) ([]aggregateTable, []labels.Labels, error) { + series, err := a.next.Series(ctx) + if err != nil { + return nil, nil, err + } + + inputCache := make([]uint64, len(series)) + outputMap := make(map[uint64]*model.Series) + outputCache := make([]*model.Series, 0) + buf := make([]byte, 1024) + for i := 0; i < len(series); i++ { + hash, _, lbls := hashMetric(series[i], !a.by, a.labels, buf) + output, ok := outputMap[hash] + if !ok { + output = &model.Series{ + Metric: lbls, + ID: uint64(len(outputCache)), + } + outputMap[hash] = output + outputCache = append(outputCache, output) + } + + inputCache[i] = output.ID + } + a.vectorPool.SetStepSize(len(outputCache)) + tables := newScalarTables(a.stepsBatch, inputCache, outputCache, a.newAccumulator) + + series = make([]labels.Labels, len(outputCache)) + for i := 0; i < len(outputCache); i++ { + series[i] = outputCache[i].Metric + } + + return tables, series, nil +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/aggregate/khashaggregate.go b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/khashaggregate.go new file mode 100644 index 00000000000..af6ad362abe --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/khashaggregate.go @@ -0,0 +1,233 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package aggregate + +import ( + "container/heap" + "context" + "fmt" + "math" + "sort" + "sync" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "golang.org/x/exp/slices" + + "github.com/thanos-community/promql-engine/execution/model" +) + +type kAggregate struct { + next model.VectorOperator + paramOp model.VectorOperator + // params holds the aggregate parameter for each step. + params []float64 + + vectorPool *model.VectorPool + + by bool + labels []string + aggregation parser.ItemType + + once sync.Once + series []labels.Labels + inputToHeap []*samplesHeap + heaps []*samplesHeap + compare func(float64, float64) bool +} + +func NewKHashAggregate( + points *model.VectorPool, + next model.VectorOperator, + paramOp model.VectorOperator, + aggregation parser.ItemType, + by bool, + labels []string, + stepsBatch int, +) (model.VectorOperator, error) { + var compare func(float64, float64) bool + + if aggregation == parser.TOPK { + compare = func(f float64, s float64) bool { + return f < s + } + } else { + compare = func(f float64, s float64) bool { + return s < f + } + } + // Grouping labels need to be sorted in order for metric hashing to work. + // https://github.com/prometheus/prometheus/blob/8ed39fdab1ead382a354e45ded999eb3610f8d5f/model/labels/labels.go#L162-L181 + slices.Sort(labels) + + a := &kAggregate{ + next: next, + vectorPool: points, + by: by, + aggregation: aggregation, + labels: labels, + paramOp: paramOp, + compare: compare, + params: make([]float64, stepsBatch), + } + + return a, nil +} + +func (a *kAggregate) Next(ctx context.Context) ([]model.StepVector, error) { + in, err := a.next.Next(ctx) + if err != nil { + return nil, err + } + if in == nil { + return nil, nil + } + + defer a.next.GetPool().PutVectors(in) + + args, err := a.paramOp.Next(ctx) + if err != nil { + return nil, err + } + for i := range a.params { + a.params[i] = math.NaN() + if i < len(args) { + a.params[i] = args[i].Samples[0] + a.paramOp.GetPool().PutStepVector(args[i]) + } + } + a.paramOp.GetPool().PutVectors(args) + + if len(args) < len(in) { + return nil, errors.New("scalar argument not found") + } + + a.once.Do(func() { err = a.init(ctx) }) + if err != nil { + return nil, err + } + + result := a.vectorPool.GetVectorBatch() + for i, vector := range in { + a.aggregate(vector.T, &result, int(a.params[i]), vector.SampleIDs, vector.Samples) + a.next.GetPool().PutStepVector(vector) + } + + return result, nil +} + +func (a *kAggregate) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + a.once.Do(func() { err = a.init(ctx) }) + if err != nil { + return nil, err + } + + return a.series, nil +} + +func (a *kAggregate) GetPool() *model.VectorPool { + return a.vectorPool +} + +func (a *kAggregate) Explain() (me string, next []model.VectorOperator) { + if a.by { + return fmt.Sprintf("[*kaggregate] %v by (%v)", a.aggregation.String(), a.labels), []model.VectorOperator{a.paramOp, a.next} + } + return fmt.Sprintf("[*kaggregate] %v without (%v)", a.aggregation.String(), a.labels), []model.VectorOperator{a.paramOp, a.next} +} + +func (a *kAggregate) init(ctx context.Context) error { + series, err := a.next.Series(ctx) + if err != nil { + return err + } + hapsHash := make(map[uint64]*samplesHeap) + buf := make([]byte, 1024) + for i := 0; i < len(series); i++ { + hash, _, _ := hashMetric(series[i], !a.by, a.labels, buf) + h, ok := hapsHash[hash] + if !ok { + h = &samplesHeap{compare: a.compare} + hapsHash[hash] = h + a.heaps = append(a.heaps, h) + } + a.inputToHeap = append(a.inputToHeap, h) + } + a.vectorPool.SetStepSize(len(series)) + a.series = series + return nil +} + +func (a *kAggregate) aggregate(t int64, result *[]model.StepVector, k int, SampleIDs []uint64, samples []float64) { + for i, sId := range SampleIDs { + h := a.inputToHeap[sId] + if h.Len() < k || h.compare(h.entries[0].total, samples[i]) || math.IsNaN(h.entries[0].total) { + if k == 1 && h.Len() == 1 { + h.entries[0].sId = sId + h.entries[0].total = samples[i] + continue + } + + if h.Len() == k { + heap.Pop(h) + } + + heap.Push(h, &entry{sId: sId, total: samples[i]}) + } + } + + for _, h := range a.heaps { + s := a.vectorPool.GetStepVector(t) + // The heap keeps the lowest value on top, so reverse it. + if len(h.entries) > 1 { + sort.Sort(sort.Reverse(h)) + } + + for _, e := range h.entries { + s.SampleIDs = append(s.SampleIDs, e.sId) + s.Samples = append(s.Samples, e.total) + } + *result = append(*result, s) + h.entries = h.entries[:0] + } +} + +type entry struct { + sId uint64 + total float64 +} + +type samplesHeap struct { + entries []entry + compare func(float64, float64) bool +} + +func (s samplesHeap) Len() int { + return len(s.entries) +} + +func (s samplesHeap) Less(i, j int) bool { + if math.IsNaN(s.entries[i].total) { + return true + } + return s.compare(s.entries[i].total, s.entries[j].total) +} + +func (s samplesHeap) Swap(i, j int) { + s.entries[i], s.entries[j] = s.entries[j], s.entries[i] +} + +func (s *samplesHeap) Push(x interface{}) { + s.entries = append(s.entries, *(x.(*entry))) +} + +func (s *samplesHeap) Pop() interface{} { + old := (*s).entries + n := len(old) + el := old[n-1] + (*s).entries = old[0 : n-1] + return el +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/aggregate/scalar_table.go b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/scalar_table.go new file mode 100644 index 00000000000..07750a1fc7a --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/scalar_table.go @@ -0,0 +1,335 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package aggregate + +import ( + "fmt" + "math" + "sort" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/execution/function" + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/parse" +) + +type aggregateTable interface { + aggregate(arg float64, vector model.StepVector) + toVector(pool *model.VectorPool) model.StepVector + size() int +} + +type scalarTable struct { + timestamp int64 + inputs []uint64 + outputs []*model.Series + accumulators []*accumulator +} + +func newScalarTables(stepsBatch int, inputCache []uint64, outputCache []*model.Series, newAccumulator newAccumulatorFunc) []aggregateTable { + tables := make([]aggregateTable, stepsBatch) + for i := 0; i < len(tables); i++ { + tables[i] = newScalarTable(inputCache, outputCache, newAccumulator) + } + return tables +} + +func newScalarTable(inputSampleIDs []uint64, outputs []*model.Series, newAccumulator newAccumulatorFunc) *scalarTable { + accumulators := make([]*accumulator, len(outputs)) + for i := 0; i < len(accumulators); i++ { + accumulators[i] = newAccumulator() + } + return &scalarTable{ + inputs: inputSampleIDs, + outputs: outputs, + accumulators: accumulators, + } +} + +func (t *scalarTable) aggregate(arg float64, vector model.StepVector) { + t.reset(arg) + + for i := range vector.Samples { + t.addSample(vector.T, vector.SampleIDs[i], vector.Samples[i]) + } +} + +func (t *scalarTable) addSample(ts int64, sampleID uint64, sample float64) { + outputSampleID := t.inputs[sampleID] + output := t.outputs[outputSampleID] + + t.timestamp = ts + t.accumulators[output.ID].AddFunc(sample) +} + +func (t *scalarTable) reset(arg float64) { + for i := range t.outputs { + t.accumulators[i].Reset(arg) + } +} + +func (t *scalarTable) toVector(pool *model.VectorPool) model.StepVector { + result := pool.GetStepVector(t.timestamp) + for i, v := range t.outputs { + if t.accumulators[i].HasValue() { + result.SampleIDs = append(result.SampleIDs, v.ID) + result.Samples = append(result.Samples, t.accumulators[i].ValueFunc()) + } + } + return result +} + +func (t *scalarTable) size() int { + return len(t.outputs) +} + +func hashMetric(metric labels.Labels, without bool, grouping []string, buf []byte) (uint64, string, labels.Labels) { + buf = buf[:0] + if without { + lb := labels.NewBuilder(metric) + lb.Del(grouping...) + key, bytes := metric.HashWithoutLabels(buf, grouping...) + return key, string(bytes), lb.Labels(nil) + } + + if len(grouping) == 0 { + return 0, "", labels.Labels{} + } + + lb := labels.NewBuilder(metric) + lb.Keep(grouping...) + key, bytes := metric.HashForLabels(buf, grouping...) + return key, string(bytes), lb.Labels(nil) +} + +type newAccumulatorFunc func() *accumulator + +type accumulator struct { + AddFunc func(v float64) + ValueFunc func() float64 + HasValue func() bool + Reset func(arg float64) +} + +func makeAccumulatorFunc(expr parser.ItemType) (newAccumulatorFunc, error) { + t := parser.ItemTypeStr[expr] + switch t { + case "sum": + return func() *accumulator { + var value float64 + var hasValue bool + + return &accumulator{ + AddFunc: func(v float64) { + hasValue = true + value += v + }, + ValueFunc: func() float64 { return value }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + value = 0 + }, + } + }, nil + case "max": + return func() *accumulator { + var value float64 + var hasValue bool + + return &accumulator{ + AddFunc: func(v float64) { + if !hasValue { + value = v + } else { + value = math.Max(value, v) + } + hasValue = true + }, + ValueFunc: func() float64 { return value }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + value = 0 + }, + } + }, nil + case "min": + return func() *accumulator { + var value float64 + var hasValue bool + + return &accumulator{ + AddFunc: func(v float64) { + if !hasValue { + value = v + } else { + value = math.Min(value, v) + } + hasValue = true + }, + ValueFunc: func() float64 { return value }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + value = 0 + }, + } + }, nil + case "count": + return func() *accumulator { + var value float64 + var hasValue bool + + return &accumulator{ + AddFunc: func(v float64) { + hasValue = true + value += 1 + }, + ValueFunc: func() float64 { return value }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + value = 0 + }, + } + }, nil + case "avg": + return func() *accumulator { + var count, sum float64 + var hasValue bool + + return &accumulator{ + AddFunc: func(v float64) { + hasValue = true + count += 1 + sum += v + }, + ValueFunc: func() float64 { return sum / count }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + sum = 0 + count = 0 + }, + } + }, nil + case "group": + return func() *accumulator { + var hasValue bool + return &accumulator{ + AddFunc: func(v float64) { + hasValue = true + }, + ValueFunc: func() float64 { return 1 }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + }, + } + }, nil + case "stddev": + return func() *accumulator { + var count float64 + var mean, cMean float64 + var aux, cAux float64 + var hasValue bool + return &accumulator{ + AddFunc: func(v float64) { + hasValue = true + count++ + delta := v - (mean + cMean) + mean, cMean = function.KahanSumInc(delta/count, mean, cMean) + aux, cAux = function.KahanSumInc(delta*(v-(mean+cMean)), aux, cAux) + }, + ValueFunc: func() float64 { return math.Sqrt((aux + cAux) / count) }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + count = 0 + mean = 0 + cMean = 0 + aux = 0 + cAux = 0 + }, + } + }, nil + case "stdvar": + return func() *accumulator { + var count float64 + var mean, cMean float64 + var aux, cAux float64 + var hasValue bool + return &accumulator{ + AddFunc: func(v float64) { + hasValue = true + count++ + delta := v - (mean + cMean) + mean, cMean = function.KahanSumInc(delta/count, mean, cMean) + aux, cAux = function.KahanSumInc(delta*(v-(mean+cMean)), aux, cAux) + }, + ValueFunc: func() float64 { return (aux + cAux) / count }, + HasValue: func() bool { return hasValue }, + Reset: func(_ float64) { + hasValue = false + count = 0 + mean = 0 + cMean = 0 + aux = 0 + cAux = 0 + }, + } + }, nil + case "quantile": + return func() *accumulator { + var hasValue bool + var arg float64 + points := make([]float64, 0) + return &accumulator{ + AddFunc: func(v float64) { + hasValue = true + points = append(points, v) + }, + ValueFunc: func() float64 { + return quantile(arg, points) + }, + HasValue: func() bool { return hasValue }, + Reset: func(a float64) { + hasValue = false + arg = a + points = points[:0] + }, + } + }, nil + } + msg := fmt.Sprintf("unknown aggregation function %s", t) + return nil, errors.Wrap(parse.ErrNotSupportedExpr, msg) +} + +func quantile(q float64, points []float64) float64 { + if len(points) == 0 || math.IsNaN(q) { + return math.NaN() + } + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + sort.Float64s(points) + + n := float64(len(points)) + // When the quantile lies between two samples, + // we use a weighted average of the two samples. + rank := q * (n - 1) + + lowerIndex := math.Max(0, math.Floor(rank)) + upperIndex := math.Min(n-1, lowerIndex+1) + + weight := rank - math.Floor(rank) + return points[int(lowerIndex)]*(1-weight) + points[int(upperIndex)]*weight +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/aggregate/vector_table.go b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/vector_table.go new file mode 100644 index 00000000000..ef4b18fb6fc --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/aggregate/vector_table.go @@ -0,0 +1,96 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package aggregate + +import ( + "fmt" + + "github.com/efficientgo/core/errors" + + "github.com/prometheus/prometheus/promql/parser" + "gonum.org/v1/gonum/floats" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/parse" +) + +type vectorAccumulator func([]float64) float64 + +type vectorTable struct { + timestamp int64 + value float64 + hasValue bool + accumulator vectorAccumulator +} + +func newVectorizedTables(stepsBatch int, a parser.ItemType) ([]aggregateTable, error) { + tables := make([]aggregateTable, stepsBatch) + for i := 0; i < len(tables); i++ { + accumulator, err := newVectorAccumulator(a) + if err != nil { + return nil, err + } + tables[i] = newVectorizedTable(accumulator) + } + + return tables, nil +} + +func newVectorizedTable(a vectorAccumulator) *vectorTable { + return &vectorTable{ + accumulator: a, + } +} + +func (t *vectorTable) aggregate(_ float64, vector model.StepVector) { + if len(vector.SampleIDs) == 0 { + t.hasValue = false + return + } + t.hasValue = true + t.timestamp = vector.T + t.value = t.accumulator(vector.Samples) +} + +func (t *vectorTable) toVector(pool *model.VectorPool) model.StepVector { + result := pool.GetStepVector(t.timestamp) + if !t.hasValue { + return result + } + + result.T = t.timestamp + result.SampleIDs = append(result.SampleIDs, 0) + result.Samples = append(result.Samples, t.value) + return result +} + +func (t *vectorTable) size() int { + return 1 +} + +func newVectorAccumulator(expr parser.ItemType) (vectorAccumulator, error) { + t := parser.ItemTypeStr[expr] + switch t { + case "sum": + return floats.Sum, nil + case "max": + return floats.Max, nil + case "min": + return floats.Min, nil + case "count": + return func(in []float64) float64 { + return float64(len(in)) + }, nil + case "avg": + return func(in []float64) float64 { + return floats.Sum(in) / float64(len(in)) + }, nil + case "group": + return func(in []float64) float64 { + return 1 + }, nil + } + msg := fmt.Sprintf("unknown aggregation function %s", t) + return nil, errors.Wrap(parse.ErrNotSupportedExpr, msg) +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/binary/index.go b/vendor/github.com/thanos-community/promql-engine/execution/binary/index.go new file mode 100644 index 00000000000..4fb4c7a3e0d --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/binary/index.go @@ -0,0 +1,35 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package binary + +type outputIndex interface { + outputSamples(inputSampleID uint64) []uint64 +} + +type highCardinalityIndex struct { + result []uint64 + index []*uint64 +} + +func newHighCardIndex(index []*uint64) *highCardinalityIndex { + return &highCardinalityIndex{ + result: make([]uint64, 1), + index: index, + } +} + +func (h *highCardinalityIndex) outputSamples(inputSampleID uint64) []uint64 { + outputSampleID := h.index[inputSampleID] + if outputSampleID == nil { + return nil + } + h.result[0] = *outputSampleID + return h.result +} + +type lowCardinalityIndex [][]uint64 + +func (l lowCardinalityIndex) outputSamples(inputSampleID uint64) []uint64 { + return l[inputSampleID] +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/binary/scalar.go b/vendor/github.com/thanos-community/promql-engine/execution/binary/scalar.go new file mode 100644 index 00000000000..8b614df0203 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/binary/scalar.go @@ -0,0 +1,188 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package binary + +import ( + "context" + "fmt" + "math" + "sync" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/execution/function" + "github.com/thanos-community/promql-engine/execution/model" +) + +type ScalarSide int + +const ( + ScalarSideBoth ScalarSide = iota + ScalarSideLeft + ScalarSideRight +) + +// scalarOperator evaluates expressions where one operand is a scalarOperator. +type scalarOperator struct { + seriesOnce sync.Once + series []labels.Labels + + pool *model.VectorPool + scalar model.VectorOperator + next model.VectorOperator + getOperands getOperandsFunc + operandValIdx int + operation operation + opType parser.ItemType + + // If true then return the comparison result as 0/1. + returnBool bool + + // Keep the result if both sides are scalars. + bothScalars bool +} + +func NewScalar( + pool *model.VectorPool, + next model.VectorOperator, + scalar model.VectorOperator, + op parser.ItemType, + scalarSide ScalarSide, + returnBool bool, +) (*scalarOperator, error) { + binaryOperation, err := newOperation(op, scalarSide != ScalarSideBoth) + if err != nil { + return nil, err + } + // operandValIdx 0 means to get lhs as the return value + // while 1 means to get rhs as the return value. + operandValIdx := 0 + getOperands := getOperandsScalarRight + if scalarSide == ScalarSideLeft { + getOperands = getOperandsScalarLeft + operandValIdx = 1 + } + + return &scalarOperator{ + pool: pool, + next: next, + scalar: scalar, + operation: binaryOperation, + opType: op, + getOperands: getOperands, + operandValIdx: operandValIdx, + returnBool: returnBool, + bothScalars: scalarSide == ScalarSideBoth, + }, nil +} + +func (o *scalarOperator) Explain() (me string, next []model.VectorOperator) { + return fmt.Sprintf("[*scalarOperator] %s", parser.ItemTypeStr[o.opType]), []model.VectorOperator{o.next, o.scalar} +} + +func (o *scalarOperator) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + o.seriesOnce.Do(func() { err = o.loadSeries(ctx) }) + if err != nil { + return nil, err + } + return o.series, nil +} + +func (o *scalarOperator) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + in, err := o.next.Next(ctx) + if err != nil { + return nil, err + } + if in == nil { + return nil, nil + } + o.seriesOnce.Do(func() { err = o.loadSeries(ctx) }) + if err != nil { + return nil, err + } + + scalarIn, err := o.scalar.Next(ctx) + if err != nil { + return nil, err + } + + out := o.pool.GetVectorBatch() + for v, vector := range in { + step := o.pool.GetStepVector(vector.T) + for i := range vector.Samples { + scalarVal := math.NaN() + if len(scalarIn) > v && len(scalarIn[v].Samples) > 0 { + scalarVal = scalarIn[v].Samples[0] + } + + operands := o.getOperands(vector, i, scalarVal) + val, keep := o.operation(operands, o.operandValIdx) + if o.returnBool { + if !o.bothScalars { + val = 0.0 + if keep { + val = 1.0 + } + } + } else if !keep { + continue + } + step.Samples = append(step.Samples, val) + step.SampleIDs = append(step.SampleIDs, vector.SampleIDs[i]) + } + out = append(out, step) + o.next.GetPool().PutStepVector(vector) + } + + for i := range scalarIn { + o.scalar.GetPool().PutStepVector(scalarIn[i]) + } + + o.next.GetPool().PutVectors(in) + o.scalar.GetPool().PutVectors(scalarIn) + + return out, nil +} + +func (o *scalarOperator) GetPool() *model.VectorPool { + return o.pool +} + +func (o *scalarOperator) loadSeries(ctx context.Context) error { + vectorSeries, err := o.next.Series(ctx) + if err != nil { + return err + } + series := make([]labels.Labels, len(vectorSeries)) + for i := range vectorSeries { + if vectorSeries[i] != nil { + lbls := vectorSeries[i] + if !o.opType.IsComparisonOperator() { + lbls, _ = function.DropMetricName(lbls.Copy()) + } + series[i] = lbls + } + } + + o.series = series + return nil +} + +type getOperandsFunc func(v model.StepVector, i int, scalar float64) [2]float64 + +func getOperandsScalarLeft(v model.StepVector, i int, scalar float64) [2]float64 { + return [2]float64{scalar, v.Samples[i]} +} + +func getOperandsScalarRight(v model.StepVector, i int, scalar float64) [2]float64 { + return [2]float64{v.Samples[i], scalar} +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/binary/table.go b/vendor/github.com/thanos-community/promql-engine/execution/binary/table.go new file mode 100644 index 00000000000..0ccf70e00fa --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/binary/table.go @@ -0,0 +1,212 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package binary + +import ( + "math" + + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/parse" +) + +type binOpSide string + +const ( + lhBinOpSide binOpSide = "left" + rhBinOpSide binOpSide = "right" +) + +type errManyToManyMatch struct { + sampleID uint64 + duplicateSampleID uint64 + side binOpSide +} + +func newManyToManyMatchError(sampleID, duplicateSampleID uint64, side binOpSide) *errManyToManyMatch { + return &errManyToManyMatch{ + sampleID: sampleID, + duplicateSampleID: duplicateSampleID, + side: side, + } +} + +type outputSample struct { + lhT int64 + rhT int64 + lhSampleID uint64 + rhSampleID uint64 + v float64 +} + +type table struct { + pool *model.VectorPool + + operation operation + card parser.VectorMatchCardinality + + outputValues []outputSample + // highCardOutputIndex is a mapping from series ID of the high cardinality + // operator to an output series ID. + // During joins, each high cardinality series that has a matching + // low cardinality series will map to exactly one output series. + highCardOutputIndex outputIndex + // lowCardOutputIndex is a mapping from series ID of the low cardinality + // operator to an output series ID. + // Each series from the low cardinality operator can join with many + // series of the high cardinality operator. + lowCardOutputIndex outputIndex +} + +func newTable( + pool *model.VectorPool, + card parser.VectorMatchCardinality, + operation operation, + outputValues []outputSample, + highCardOutputCache outputIndex, + lowCardOutputCache outputIndex, +) *table { + for i := range outputValues { + outputValues[i].lhT = -1 + outputValues[i].rhT = -1 + } + return &table{ + pool: pool, + card: card, + + operation: operation, + outputValues: outputValues, + highCardOutputIndex: highCardOutputCache, + lowCardOutputIndex: lowCardOutputCache, + } +} + +func (t *table) execBinaryOperation(lhs model.StepVector, rhs model.StepVector, returnBool bool) (model.StepVector, *errManyToManyMatch) { + ts := lhs.T + step := t.pool.GetStepVector(ts) + + lhsIndex, rhsIndex := t.highCardOutputIndex, t.lowCardOutputIndex + if t.card == parser.CardOneToMany { + lhsIndex, rhsIndex = rhsIndex, lhsIndex + } + + for i, sampleID := range lhs.SampleIDs { + lhsVal := lhs.Samples[i] + outputSampleIDs := lhsIndex.outputSamples(sampleID) + for _, outputSampleID := range outputSampleIDs { + if t.card != parser.CardManyToOne && t.outputValues[outputSampleID].lhT == ts { + prevSampleID := t.outputValues[outputSampleID].lhSampleID + return model.StepVector{}, newManyToManyMatchError(prevSampleID, sampleID, lhBinOpSide) + } + + t.outputValues[outputSampleID].lhSampleID = sampleID + t.outputValues[outputSampleID].lhT = lhs.T + t.outputValues[outputSampleID].v = lhsVal + } + } + + for i, sampleID := range rhs.SampleIDs { + rhVal := rhs.Samples[i] + outputSampleIDs := rhsIndex.outputSamples(sampleID) + for _, outputSampleID := range outputSampleIDs { + outputSample := t.outputValues[outputSampleID] + if rhs.T != outputSample.lhT { + continue + } + if t.card != parser.CardOneToMany && outputSample.rhT == rhs.T { + prevSampleID := t.outputValues[outputSampleID].rhSampleID + return model.StepVector{}, newManyToManyMatchError(prevSampleID, sampleID, rhBinOpSide) + } + t.outputValues[outputSampleID].rhSampleID = sampleID + t.outputValues[outputSampleID].rhT = rhs.T + + outputVal, keep := t.operation([2]float64{outputSample.v, rhVal}, 0) + if returnBool { + outputVal = 0 + if keep { + outputVal = 1 + } + } else if !keep { + continue + } + step.SampleIDs = append(step.SampleIDs, outputSampleID) + step.Samples = append(step.Samples, outputVal) + } + } + + return step, nil +} + +// operands is a length 2 array which contains lhs and rhs. +// valueIdx is used in vector comparison operator to decide +// which operand value we should return. +type operation func(operands [2]float64, valueIdx int) (float64, bool) + +var operations = map[string]operation{ + "+": func(operands [2]float64, valueIdx int) (float64, bool) { return operands[0] + operands[1], true }, + "-": func(operands [2]float64, valueIdx int) (float64, bool) { return operands[0] - operands[1], true }, + "*": func(operands [2]float64, valueIdx int) (float64, bool) { return operands[0] * operands[1], true }, + "/": func(operands [2]float64, valueIdx int) (float64, bool) { return operands[0] / operands[1], true }, + "^": func(operands [2]float64, valueIdx int) (float64, bool) { + return math.Pow(operands[0], operands[1]), true + }, + "%": func(operands [2]float64, valueIdx int) (float64, bool) { + return math.Mod(operands[0], operands[1]), true + }, + "==": func(operands [2]float64, valueIdx int) (float64, bool) { return btof(operands[0] == operands[1]), true }, + "!=": func(operands [2]float64, valueIdx int) (float64, bool) { return btof(operands[0] != operands[1]), true }, + ">": func(operands [2]float64, valueIdx int) (float64, bool) { return btof(operands[0] > operands[1]), true }, + "<": func(operands [2]float64, valueIdx int) (float64, bool) { return btof(operands[0] < operands[1]), true }, + ">=": func(operands [2]float64, valueIdx int) (float64, bool) { return btof(operands[0] >= operands[1]), true }, + "<=": func(operands [2]float64, valueIdx int) (float64, bool) { return btof(operands[0] <= operands[1]), true }, + "atan2": func(operands [2]float64, valueIdx int) (float64, bool) { + return math.Atan2(operands[0], operands[1]), true + }, +} + +// For vector, those operations are handled differently to check whether to keep +// the value or not. https://github.com/prometheus/prometheus/blob/main/promql/engine.go#L2229 +var vectorBinaryOperations = map[string]operation{ + "==": func(operands [2]float64, valueIdx int) (float64, bool) { + return operands[valueIdx], operands[0] == operands[1] + }, + "!=": func(operands [2]float64, valueIdx int) (float64, bool) { + return operands[valueIdx], operands[0] != operands[1] + }, + ">": func(operands [2]float64, valueIdx int) (float64, bool) { + return operands[valueIdx], operands[0] > operands[1] + }, + "<": func(operands [2]float64, valueIdx int) (float64, bool) { + return operands[valueIdx], operands[0] < operands[1] + }, + ">=": func(operands [2]float64, valueIdx int) (float64, bool) { + return operands[valueIdx], operands[0] >= operands[1] + }, + "<=": func(operands [2]float64, valueIdx int) (float64, bool) { + return operands[valueIdx], operands[0] <= operands[1] + }, +} + +func newOperation(expr parser.ItemType, vectorBinOp bool) (operation, error) { + t := parser.ItemTypeStr[expr] + if expr.IsComparisonOperator() && vectorBinOp { + if o, ok := vectorBinaryOperations[t]; ok { + return o, nil + } + return nil, parse.UnsupportedOperationErr(expr) + } + if o, ok := operations[t]; ok { + return o, nil + } + return nil, parse.UnsupportedOperationErr(expr) +} + +// btof returns 1 if b is true, 0 otherwise. +func btof(b bool) float64 { + if b { + return 1 + } + return 0 +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/binary/vector.go b/vendor/github.com/thanos-community/promql-engine/execution/binary/vector.go new file mode 100644 index 00000000000..58015e36384 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/binary/vector.go @@ -0,0 +1,342 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package binary + +import ( + "context" + "fmt" + "sync" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "golang.org/x/exp/slices" + + "github.com/thanos-community/promql-engine/execution/model" +) + +// vectorOperator evaluates an expression between two step vectors. +type vectorOperator struct { + pool *model.VectorPool + once sync.Once + + lhs model.VectorOperator + rhs model.VectorOperator + matching *parser.VectorMatching + groupingLabels []string + operation operation + opType parser.ItemType + + lhSampleIDs []labels.Labels + rhSampleIDs []labels.Labels + + // series contains the output series of the operator + series []labels.Labels + // The outputCache is an internal cache used to calculate + // the binary operation of the lhs and rhs operator. + outputCache []outputSample + // table is used to calculate the binary operation of two step vectors between + // the lhs and rhs operator. + table *table + + // If true then 1/0 needs to be returned instead of the value. + returnBool bool +} + +func NewVectorOperator( + pool *model.VectorPool, + lhs model.VectorOperator, + rhs model.VectorOperator, + matching *parser.VectorMatching, + operation parser.ItemType, + returnBool bool, +) (model.VectorOperator, error) { + op, err := newOperation(operation, true) + if err != nil { + return nil, err + } + + // Make a copy of MatchingLabels to avoid potential side-effects + // in some downstream operation. + groupings := make([]string, len(matching.MatchingLabels)) + copy(groupings, matching.MatchingLabels) + slices.Sort(groupings) + + return &vectorOperator{ + pool: pool, + lhs: lhs, + rhs: rhs, + matching: matching, + groupingLabels: groupings, + operation: op, + opType: operation, + returnBool: returnBool, + }, nil +} + +func (o *vectorOperator) Explain() (me string, next []model.VectorOperator) { + if o.matching.On { + return fmt.Sprintf("[*vectorOperator] %s %v on %v group %v", parser.ItemTypeStr[o.opType], o.matching.Card.String(), o.matching.MatchingLabels, o.matching.Include), []model.VectorOperator{o.lhs, o.rhs} + } + return fmt.Sprintf("[*vectorOperator] %s %v ignoring %v group %v", parser.ItemTypeStr[o.opType], o.matching.Card.String(), o.matching.On, o.matching.Include), []model.VectorOperator{o.lhs, o.rhs} +} + +func (o *vectorOperator) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + o.once.Do(func() { err = o.initOutputs(ctx) }) + if err != nil { + return nil, err + } + + return o.series, nil +} + +func (o *vectorOperator) initOutputs(ctx context.Context) error { + var highCardSide []labels.Labels + var errChan = make(chan error, 1) + go func() { + var err error + highCardSide, err = o.lhs.Series(ctx) + if err != nil { + errChan <- err + } + close(errChan) + }() + + lowCardSide, err := o.rhs.Series(ctx) + if err != nil { + return err + } + if err := <-errChan; err != nil { + return err + } + + o.lhSampleIDs = highCardSide + o.rhSampleIDs = lowCardSide + + if o.matching.Card == parser.CardOneToMany { + highCardSide, lowCardSide = lowCardSide, highCardSide + } + + buf := make([]byte, 1024) + var includeLabels []string + if len(o.matching.Include) > 0 { + includeLabels = o.matching.Include + } + keepLabels := o.matching.Card != parser.CardOneToOne + keepName := o.opType.IsComparisonOperator() + highCardHashes, highCardInputMap := o.hashSeries(highCardSide, keepLabels, keepName, buf) + lowCardHashes, lowCardInputMap := o.hashSeries(lowCardSide, keepLabels, keepName, buf) + output, highCardOutputIndex, lowCardOutputIndex := o.join(highCardHashes, highCardInputMap, lowCardHashes, lowCardInputMap, includeLabels) + + series := make([]labels.Labels, len(output)) + for _, s := range output { + series[s.ID] = s.Metric + } + o.series = series + + o.outputCache = make([]outputSample, len(series)) + for i := range o.outputCache { + o.outputCache[i].lhT = -1 + } + o.pool.SetStepSize(len(highCardSide)) + + o.table = newTable( + o.pool, + o.matching.Card, + o.operation, + o.outputCache, + newHighCardIndex(highCardOutputIndex), + lowCardinalityIndex(lowCardOutputIndex), + ) + + return nil +} + +func (o *vectorOperator) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + lhs, err := o.lhs.Next(ctx) + if err != nil { + return nil, err + } + rhs, err := o.rhs.Next(ctx) + if err != nil { + return nil, err + } + + // TODO(fpetkovski): When one operator becomes empty, + // we might want to drain or close the other one. + // We don't have a concept of closing an operator yet. + if len(lhs) == 0 || len(rhs) == 0 { + return nil, nil + } + + o.once.Do(func() { err = o.initOutputs(ctx) }) + if err != nil { + return nil, err + } + + batch := o.pool.GetVectorBatch() + for i, vector := range lhs { + if i < len(rhs) { + step, err := o.table.execBinaryOperation(lhs[i], rhs[i], o.returnBool) + if err == nil { + batch = append(batch, step) + o.rhs.GetPool().PutStepVector(rhs[i]) + continue + } + + var sampleID, duplicateSampleID labels.Labels + switch err.side { + case lhBinOpSide: + sampleID = o.lhSampleIDs[err.sampleID] + duplicateSampleID = o.lhSampleIDs[err.duplicateSampleID] + case rhBinOpSide: + sampleID = o.rhSampleIDs[err.sampleID] + duplicateSampleID = o.rhSampleIDs[err.duplicateSampleID] + } + group := sampleID.MatchLabels(o.matching.On, o.matching.MatchingLabels...) + msg := "found duplicate series for the match group %s on the %s hand-side of the operation: [%s, %s]" + + ";many-to-many matching not allowed: matching labels must be unique on one side" + return nil, errors.Newf(msg, group, err.side, sampleID.String(), duplicateSampleID.String()) + } + o.lhs.GetPool().PutStepVector(vector) + } + o.lhs.GetPool().PutVectors(lhs) + o.rhs.GetPool().PutVectors(rhs) + + return batch, nil +} + +func (o *vectorOperator) GetPool() *model.VectorPool { + return o.pool +} + +// hashSeries calculates the hash of each series from an input operator. +// Since series from the high cardinality operator can map to multiple output series, +// hashSeries returns an index from hash to a slice of resulting series, and +// a map from input series ID to output series ID. +// The latter can be used to build an array backed index from input model.Series to output model.Series, +// avoiding expensive hashmap lookups. +func (o *vectorOperator) hashSeries(series []labels.Labels, keepLabels, keepName bool, buf []byte) (map[uint64][]model.Series, map[uint64][]uint64) { + hashes := make(map[uint64][]model.Series) + inputIndex := make(map[uint64][]uint64) + for i, s := range series { + sig, lbls := signature(s, !o.matching.On, o.groupingLabels, keepLabels, keepName, buf) + if _, ok := hashes[sig]; !ok { + hashes[sig] = make([]model.Series, 0, 1) + inputIndex[sig] = make([]uint64, 0, 1) + } + hashes[sig] = append(hashes[sig], model.Series{ + ID: uint64(i), + Metric: lbls, + }) + inputIndex[sig] = append(inputIndex[sig], uint64(i)) + } + + return hashes, inputIndex +} + +// join performs a join between series from the high cardinality and low cardinality operators. +// It does that by using hash maps which point from series hash to the output series. +// It also returns array backed indices for the high cardinality and low cardinality operators, +// pointing from input model.Series ID to output model.Series ID. +// The high cardinality operator can fail to join, which is why its index contains nullable values. +// The low cardinality operator can join to multiple high cardinality series, which is why its index +// points to an array of output series. +func (o *vectorOperator) join( + highCardHashes map[uint64][]model.Series, + highCardInputIndex map[uint64][]uint64, + lowCardHashes map[uint64][]model.Series, + lowCardInputIndex map[uint64][]uint64, + includeLabels []string, +) ([]model.Series, []*uint64, [][]uint64) { + // Output index points from output series ID + // to the actual series. + outputIndex := make([]model.Series, 0) + + // Prune high cardinality series which do not have a + // matching low cardinality series. + outputSize := 0 + for hash, series := range highCardHashes { + outputSize += len(series) + if _, ok := lowCardHashes[hash]; !ok { + delete(highCardHashes, hash) + continue + } + } + lowCardOutputSize := 0 + for _, lowCardOutputs := range lowCardInputIndex { + lowCardOutputSize += len(lowCardOutputs) + } + + highCardOutputIndex := make([]*uint64, outputSize) + lowCardOutputIndex := make([][]uint64, lowCardOutputSize) + for hash, highCardSeries := range highCardHashes { + for _, lowCardSeriesID := range lowCardInputIndex[hash] { + // Each low cardinality series can map to multiple output series. + lowCardOutputIndex[lowCardSeriesID] = make([]uint64, 0, len(highCardSeries)) + } + + lowCardSeries := lowCardHashes[hash][0] + for i, output := range highCardSeries { + outputSeries := buildOutputSeries(uint64(len(outputIndex)), output, lowCardSeries, includeLabels) + outputIndex = append(outputIndex, outputSeries) + + highCardSeriesID := highCardInputIndex[hash][i] + highCardOutputIndex[highCardSeriesID] = &outputSeries.ID + + for _, lowCardSeriesID := range lowCardInputIndex[hash] { + lowCardOutputIndex[lowCardSeriesID] = append(lowCardOutputIndex[lowCardSeriesID], outputSeries.ID) + } + } + } + + return outputIndex, highCardOutputIndex, lowCardOutputIndex +} + +func signature(metric labels.Labels, without bool, grouping []string, keepOriginalLabels, keepName bool, buf []byte) (uint64, labels.Labels) { + buf = buf[:0] + lb := labels.NewBuilder(metric) + if !keepName { + lb = lb.Del(labels.MetricName) + } + if without { + dropLabels := grouping + if !keepName { + dropLabels = append(grouping, labels.MetricName) + } + key, _ := metric.HashWithoutLabels(buf, dropLabels...) + if !keepOriginalLabels { + lb.Del(dropLabels...) + } + return key, lb.Labels(nil) + } + + if !keepOriginalLabels { + lb.Keep(grouping...) + } + if len(grouping) == 0 { + return 0, lb.Labels(nil) + } + + key, _ := metric.HashForLabels(buf, grouping...) + return key, lb.Labels(nil) +} + +func buildOutputSeries(seriesID uint64, highCardSeries, lowCardSeries model.Series, includeLabels []string) model.Series { + metric := highCardSeries.Metric + if len(includeLabels) > 0 { + lowCardLabels := labels.NewBuilder(lowCardSeries.Metric). + Keep(includeLabels...). + Labels(nil) + metric = append(metric, lowCardLabels...) + } + return model.Series{ID: seriesID, Metric: metric} +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go b/vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go new file mode 100644 index 00000000000..23b45ae86ef --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go @@ -0,0 +1,186 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package exchange + +import ( + "context" + "sync" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + + "github.com/thanos-community/promql-engine/execution/model" +) + +type errorChan chan error + +func (c errorChan) getError() error { + for err := range c { + if err != nil { + return err + } + } + + return nil +} + +type coalesceOperator struct { + once sync.Once + series []labels.Labels + + pool *model.VectorPool + mu sync.Mutex + wg sync.WaitGroup + operators []model.VectorOperator + sampleOffsets []uint64 +} + +func NewCoalesce(pool *model.VectorPool, operators ...model.VectorOperator) model.VectorOperator { + return &coalesceOperator{ + pool: pool, + operators: operators, + sampleOffsets: make([]uint64, len(operators)), + } +} + +func (c *coalesceOperator) Explain() (me string, next []model.VectorOperator) { + return "[*coalesceOperator]", c.operators +} + +func (c *coalesceOperator) GetPool() *model.VectorPool { + return c.pool +} + +func (c *coalesceOperator) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + c.once.Do(func() { err = c.loadSeries(ctx) }) + if err != nil { + return nil, err + } + return c.series, nil +} + +func (c *coalesceOperator) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + var err error + c.once.Do(func() { err = c.loadSeries(ctx) }) + if err != nil { + return nil, err + } + + var out []model.StepVector = nil + var errChan = make(errorChan, len(c.operators)) + for idx, o := range c.operators { + c.wg.Add(1) + go func(opIdx int, o model.VectorOperator) { + defer c.wg.Done() + + in, err := o.Next(ctx) + if err != nil { + errChan <- err + return + } + if in == nil { + return + } + + for _, vector := range in { + for i := range vector.SampleIDs { + vector.SampleIDs[i] += c.sampleOffsets[opIdx] + } + } + + c.mu.Lock() + defer c.mu.Unlock() + + if len(in) > 0 && out == nil { + out = c.pool.GetVectorBatch() + for i := 0; i < len(in); i++ { + out = append(out, c.pool.GetStepVector(in[i].T)) + } + } + + for i := 0; i < len(in); i++ { + if len(in[i].Samples) > 0 { + out[i].T = in[i].T + } + + out[i].Samples = append(out[i].Samples, in[i].Samples...) + out[i].SampleIDs = append(out[i].SampleIDs, in[i].SampleIDs...) + o.GetPool().PutStepVector(in[i]) + } + o.GetPool().PutVectors(in) + }(idx, o) + } + c.wg.Wait() + close(errChan) + + if err := errChan.getError(); err != nil { + return nil, err + } + + if out == nil { + return nil, nil + } + + return out, nil +} + +func (c *coalesceOperator) loadSeries(ctx context.Context) error { + var wg sync.WaitGroup + var mu sync.Mutex + var numSeries uint64 + allSeries := make([][]labels.Labels, len(c.operators)) + errChan := make(errorChan, len(c.operators)) + for i := 0; i < len(c.operators); i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + defer func() { + e := recover() + if e == nil { + return + } + + switch err := e.(type) { + case error: + errChan <- errors.Wrapf(err, "unexpected error") + } + + }() + series, err := c.operators[i].Series(ctx) + if err != nil { + errChan <- err + return + } + + allSeries[i] = series + mu.Lock() + numSeries += uint64(len(series)) + mu.Unlock() + }(i) + } + wg.Wait() + close(errChan) + if err := errChan.getError(); err != nil { + return err + } + + var offset uint64 + c.sampleOffsets = make([]uint64, len(c.operators)) + c.series = make([]labels.Labels, 0, numSeries) + for i, series := range allSeries { + c.sampleOffsets[i] = offset + c.series = append(c.series, series...) + offset += uint64(len(series)) + } + + c.pool.SetStepSize(len(c.series)) + return nil +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/exchange/concurrent.go b/vendor/github.com/thanos-community/promql-engine/execution/exchange/concurrent.go new file mode 100644 index 00000000000..7a64fda9a1a --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/exchange/concurrent.go @@ -0,0 +1,97 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package exchange + +import ( + "context" + "fmt" + "sync" + + "github.com/thanos-community/promql-engine/execution/model" + + "github.com/prometheus/prometheus/model/labels" +) + +type maybeStepVector struct { + err error + stepVector []model.StepVector +} + +type concurrencyOperator struct { + once sync.Once + next model.VectorOperator + buffer chan maybeStepVector + bufferSize int +} + +func NewConcurrent(next model.VectorOperator, bufferSize int) model.VectorOperator { + return &concurrencyOperator{ + next: next, + buffer: make(chan maybeStepVector, bufferSize), + bufferSize: bufferSize, + } +} + +func (c *concurrencyOperator) Explain() (me string, next []model.VectorOperator) { + return fmt.Sprintf("[*concurrencyOperator(buff=%v)]", c.bufferSize), []model.VectorOperator{c.next} +} + +func (c *concurrencyOperator) Series(ctx context.Context) ([]labels.Labels, error) { + return c.next.Series(ctx) +} + +func (c *concurrencyOperator) GetPool() *model.VectorPool { + return c.next.GetPool() +} + +func (c *concurrencyOperator) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + c.once.Do(func() { + go c.pull(ctx) + go c.drainBufferOnCancel(ctx) + }) + + r, ok := <-c.buffer + if !ok { + return nil, nil + } + if r.err != nil { + return nil, r.err + } + + return r.stepVector, nil +} + +func (c *concurrencyOperator) pull(ctx context.Context) { + defer close(c.buffer) + + for { + select { + case <-ctx.Done(): + c.buffer <- maybeStepVector{err: ctx.Err()} + return + default: + r, err := c.next.Next(ctx) + if err != nil { + c.buffer <- maybeStepVector{err: err} + return + } + if r == nil { + return + } + c.buffer <- maybeStepVector{stepVector: r} + } + } +} + +func (c *concurrencyOperator) drainBufferOnCancel(ctx context.Context) { + <-ctx.Done() + for range c.buffer { + } +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/execution.go b/vendor/github.com/thanos-community/promql-engine/execution/execution.go new file mode 100644 index 00000000000..c563d1bca2a --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/execution.go @@ -0,0 +1,338 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package execution + +import ( + "runtime" + "time" + + "github.com/prometheus/prometheus/promql" + + "github.com/thanos-community/promql-engine/execution/remote" + + "github.com/efficientgo/core/errors" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + + "github.com/prometheus/prometheus/model/labels" + + "github.com/thanos-community/promql-engine/execution/aggregate" + "github.com/thanos-community/promql-engine/execution/binary" + "github.com/thanos-community/promql-engine/execution/exchange" + "github.com/thanos-community/promql-engine/execution/function" + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/parse" + "github.com/thanos-community/promql-engine/execution/scan" + "github.com/thanos-community/promql-engine/execution/step_invariant" + engstore "github.com/thanos-community/promql-engine/execution/storage" + "github.com/thanos-community/promql-engine/execution/unary" + "github.com/thanos-community/promql-engine/logicalplan" + "github.com/thanos-community/promql-engine/query" +) + +const stepsBatch = 10 + +// New creates new physical query execution for a given query expression which represents logical plan. +// TODO(bwplotka): Add definition (could be parameters for each execution operator) we can optimize - it would represent physical plan. +func New(expr parser.Expr, queryable storage.Queryable, mint, maxt time.Time, step, lookbackDelta time.Duration) (model.VectorOperator, error) { + opts := &query.Options{ + Start: mint, + End: maxt, + Step: step, + LookbackDelta: lookbackDelta, + StepsBatch: stepsBatch, + } + selectorPool := engstore.NewSelectorPool(queryable) + hints := storage.SelectHints{ + Start: mint.UnixMilli(), + End: maxt.UnixMilli(), + // TODO(fpetkovski): Adjust the step for sub-queries once they are supported. + Step: step.Milliseconds(), + } + return newOperator(expr, selectorPool, opts, hints) +} + +func newOperator(expr parser.Expr, storage *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + switch e := expr.(type) { + case *parser.NumberLiteral: + return scan.NewNumberLiteralSelector(model.NewVectorPool(stepsBatch), opts, e.Val), nil + + case *parser.VectorSelector: + start, end := getTimeRangesForVectorSelector(e, opts, 0) + hints.Start = start + hints.End = end + filter := storage.GetSelector(start, end, opts.Step.Milliseconds(), e.LabelMatchers, hints) + return newShardedVectorSelector(filter, opts, e.Offset) + + case *logicalplan.FilteredSelector: + start, end := getTimeRangesForVectorSelector(e.VectorSelector, opts, 0) + hints.Start = start + hints.End = end + selector := storage.GetFilteredSelector(start, end, opts.Step.Milliseconds(), e.LabelMatchers, e.Filters, hints) + return newShardedVectorSelector(selector, opts, e.Offset) + + case *parser.Call: + hints.Func = e.Func.Name + hints.Grouping = nil + hints.By = false + + if e.Func.Name == "histogram_quantile" { + nextOperators := make([]model.VectorOperator, len(e.Args)) + for i := range e.Args { + next, err := newOperator(e.Args[i], storage, opts, hints) + if err != nil { + return nil, err + } + nextOperators[i] = next + } + + return function.NewHistogramOperator(model.NewVectorPool(stepsBatch), e.Args, nextOperators, stepsBatch) + } + + // TODO(saswatamcode): Tracked in https://github.com/thanos-community/promql-engine/issues/23 + // Based on the category we can create an apt query plan. + call, err := function.NewFunctionCall(e.Func) + if err != nil { + return nil, err + } + + if e.Func.Variadic != 0 { + return nil, errors.Wrapf(parse.ErrNotImplemented, "got variadic function: %s", e) + } + + // TODO(saswatamcode): Range vector result might need new operator + // before it can be non-nested. https://github.com/thanos-community/promql-engine/issues/39 + for i := range e.Args { + switch t := e.Args[i].(type) { + case *parser.MatrixSelector: + if call == nil { + return nil, parse.ErrNotImplemented + } + + vs, filters, err := unpackVectorSelector(t) + if err != nil { + return nil, err + } + + start, end := getTimeRangesForVectorSelector(vs, opts, t.Range) + hints.Start = start + hints.End = end + hints.Range = t.Range.Milliseconds() + filter := storage.GetFilteredSelector(start, end, opts.Step.Milliseconds(), vs.LabelMatchers, filters, hints) + + numShards := runtime.GOMAXPROCS(0) / 2 + if numShards < 1 { + numShards = 1 + } + + operators := make([]model.VectorOperator, 0, numShards) + for i := 0; i < numShards; i++ { + operator := exchange.NewConcurrent( + scan.NewMatrixSelector(model.NewVectorPool(stepsBatch), filter, call, e, opts, t.Range, vs.Offset, i, numShards), + 2, + ) + operators = append(operators, operator) + } + + return exchange.NewCoalesce(model.NewVectorPool(stepsBatch), operators...), nil + } + } + + // Does not have matrix arg so create functionOperator normally. + nextOperators := make([]model.VectorOperator, len(e.Args)) + for i := range e.Args { + next, err := newOperator(e.Args[i], storage, opts, hints) + if err != nil { + return nil, err + } + nextOperators[i] = next + } + + return function.NewFunctionOperator(e, call, nextOperators, stepsBatch, opts) + + case *parser.AggregateExpr: + hints.Func = e.Op.String() + hints.Grouping = e.Grouping + hints.By = !e.Without + var paramOp model.VectorOperator + + next, err := newOperator(e.Expr, storage, opts, hints) + if err != nil { + return nil, err + } + + if e.Param != nil { + paramOp, err = newOperator(e.Param, storage, opts, hints) + if err != nil { + return nil, err + } + } + + if e.Op == parser.TOPK || e.Op == parser.BOTTOMK { + next, err = aggregate.NewKHashAggregate(model.NewVectorPool(stepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, stepsBatch) + } else { + next, err = aggregate.NewHashAggregate(model.NewVectorPool(stepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, stepsBatch) + } + + if err != nil { + return nil, err + } + + return exchange.NewConcurrent(next, 2), nil + + case *parser.BinaryExpr: + if e.LHS.Type() == parser.ValueTypeScalar || e.RHS.Type() == parser.ValueTypeScalar { + return newScalarBinaryOperator(e, storage, opts, hints) + } + + return newVectorBinaryOperator(e, storage, opts, hints) + + case *parser.ParenExpr: + return newOperator(e.Expr, storage, opts, hints) + + case *parser.StringLiteral: + // TODO(saswatamcode): This requires separate model with strings. + return nil, errors.Wrapf(parse.ErrNotImplemented, "got: %s", e) + + case *parser.UnaryExpr: + next, err := newOperator(e.Expr, storage, opts, hints) + if err != nil { + return nil, err + } + switch e.Op { + case parser.ADD: + return next, nil + case parser.SUB: + return unary.NewUnaryNegation(next, stepsBatch) + default: + // This shouldn't happen as Op was validated when parsing already + // https://github.com/prometheus/prometheus/blob/v2.38.0/promql/parser/parse.go#L573. + return nil, errors.Wrapf(parse.ErrNotSupportedExpr, "got: %s", e) + } + + case *parser.StepInvariantExpr: + switch t := e.Expr.(type) { + case *parser.NumberLiteral: + return scan.NewNumberLiteralSelector(model.NewVectorPool(stepsBatch), opts, t.Val), nil + } + next, err := newOperator(e.Expr, storage, opts.WithEndTime(opts.Start), hints) + if err != nil { + return nil, err + } + return step_invariant.NewStepInvariantOperator(model.NewVectorPool(stepsBatch), next, e.Expr, opts, stepsBatch) + + case logicalplan.Coalesce: + operators := make([]model.VectorOperator, len(e.Expressions)) + for i, expr := range e.Expressions { + operator, err := newOperator(expr, storage, opts, hints) + if err != nil { + return nil, err + } + operators[i] = operator + } + return exchange.NewCoalesce(model.NewVectorPool(stepsBatch), operators...), nil + + case *logicalplan.RemoteExecution: + qry, err := e.Engine.NewRangeQuery(&promql.QueryOpts{}, e.Query, opts.Start, opts.End, opts.Step) + if err != nil { + return nil, err + } + + return exchange.NewConcurrent(remote.NewExecution(qry, model.NewVectorPool(stepsBatch), opts), 2), nil + + default: + return nil, errors.Wrapf(parse.ErrNotSupportedExpr, "got: %s", e) + } +} + +func unpackVectorSelector(t *parser.MatrixSelector) (*parser.VectorSelector, []*labels.Matcher, error) { + switch t := t.VectorSelector.(type) { + case *parser.VectorSelector: + return t, nil, nil + case *logicalplan.FilteredSelector: + return t.VectorSelector, t.Filters, nil + default: + return nil, nil, parse.ErrNotSupportedExpr + } +} + +func newShardedVectorSelector(selector engstore.SeriesSelector, opts *query.Options, offset time.Duration) (model.VectorOperator, error) { + numShards := runtime.GOMAXPROCS(0) / 2 + if numShards < 1 { + numShards = 1 + } + operators := make([]model.VectorOperator, 0, numShards) + for i := 0; i < numShards; i++ { + operator := exchange.NewConcurrent( + scan.NewVectorSelector( + model.NewVectorPool(stepsBatch), selector, opts, offset, i, numShards), 2) + operators = append(operators, operator) + } + + return exchange.NewCoalesce(model.NewVectorPool(stepsBatch), operators...), nil +} + +func newVectorBinaryOperator(e *parser.BinaryExpr, selectorPool *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + leftOperator, err := newOperator(e.LHS, selectorPool, opts, hints) + if err != nil { + return nil, err + } + rightOperator, err := newOperator(e.RHS, selectorPool, opts, hints) + if err != nil { + return nil, err + } + return binary.NewVectorOperator(model.NewVectorPool(stepsBatch), leftOperator, rightOperator, e.VectorMatching, e.Op, e.ReturnBool) +} + +func newScalarBinaryOperator(e *parser.BinaryExpr, selectorPool *engstore.SelectorPool, opts *query.Options, hints storage.SelectHints) (model.VectorOperator, error) { + lhs, err := newOperator(e.LHS, selectorPool, opts, hints) + if err != nil { + return nil, err + } + rhs, err := newOperator(e.RHS, selectorPool, opts, hints) + if err != nil { + return nil, err + } + + scalarSide := binary.ScalarSideRight + if e.LHS.Type() == parser.ValueTypeScalar && e.RHS.Type() == parser.ValueTypeScalar { + scalarSide = binary.ScalarSideBoth + } else if e.LHS.Type() == parser.ValueTypeScalar { + rhs, lhs = lhs, rhs + scalarSide = binary.ScalarSideLeft + } + + return binary.NewScalar(model.NewVectorPool(stepsBatch), lhs, rhs, e.Op, scalarSide, e.ReturnBool) +} + +// Copy from https://github.com/prometheus/prometheus/blob/v2.39.1/promql/engine.go#L791. +func getTimeRangesForVectorSelector(n *parser.VectorSelector, opts *query.Options, evalRange time.Duration) (int64, int64) { + start := opts.Start.UnixMilli() + end := opts.End.UnixMilli() + if n.Timestamp != nil { + start = *n.Timestamp + end = *n.Timestamp + } + if evalRange == 0 { + start -= opts.LookbackDelta.Milliseconds() + } else { + start -= evalRange.Milliseconds() + } + offset := n.OriginalOffset.Milliseconds() + return start - offset, end - offset +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go b/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go new file mode 100644 index 00000000000..1229f2da2d4 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go @@ -0,0 +1,668 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package function + +import ( + "fmt" + "math" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/execution/parse" +) + +var InvalidSample = promql.Sample{Point: promql.Point{T: -1, V: 0}} + +type FunctionArgs struct { + Labels labels.Labels + Points []promql.Point + StepTime int64 + SelectRange int64 + ScalarPoints []float64 + Offset int64 +} + +// FunctionCall represents functions as defined in https://prometheus.io/docs/prometheus/latest/querying/functions/ +type FunctionCall func(f FunctionArgs) promql.Sample + +func simpleFunc(f func(float64) float64) FunctionCall { + return func(fa FunctionArgs) promql.Sample { + if len(fa.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: fa.Labels, + Point: promql.Point{ + T: fa.StepTime, + V: f(fa.Points[0].V), + }, + } + } + +} + +var Funcs = map[string]FunctionCall{ + "abs": simpleFunc(math.Abs), + "ceil": simpleFunc(math.Ceil), + "exp": simpleFunc(math.Exp), + "floor": simpleFunc(math.Floor), + "sqrt": simpleFunc(math.Sqrt), + "ln": simpleFunc(math.Log), + "log2": simpleFunc(math.Log2), + "log10": simpleFunc(math.Log10), + "sin": simpleFunc(math.Sin), + "cos": simpleFunc(math.Cos), + "tan": simpleFunc(math.Tan), + "asin": simpleFunc(math.Asin), + "acos": simpleFunc(math.Acos), + "atan": simpleFunc(math.Atan), + "sinh": simpleFunc(math.Sinh), + "cosh": simpleFunc(math.Cosh), + "tanh": simpleFunc(math.Tanh), + "asinh": simpleFunc(math.Asinh), + "acosh": simpleFunc(math.Acosh), + "atanh": simpleFunc(math.Atanh), + "rad": simpleFunc(func(v float64) float64 { + return v * math.Pi / 180 + }), + "deg": simpleFunc(func(v float64) float64 { + return v * 180 / math.Pi + }), + "timestamp": func(f FunctionArgs) promql.Sample { + return promql.Sample{ + Point: promql.Point{ + T: f.StepTime, + V: float64(f.Points[0].T) / 1000, + }, + } + }, + "pi": func(f FunctionArgs) promql.Sample { + return promql.Sample{ + Point: promql.Point{ + T: f.StepTime, + V: math.Pi, + }, + } + }, + "sum_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: sumOverTime(f.Points), + }, + } + }, + "max_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: maxOverTime(f.Points), + }, + } + }, + "min_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: minOverTime(f.Points), + }, + } + }, + "avg_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: avgOverTime(f.Points), + }, + } + }, + "stddev_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: stddevOverTime(f.Points), + }, + } + }, + "stdvar_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: stdvarOverTime(f.Points), + }, + } + }, + "count_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: countOverTime(f.Points), + }, + } + }, + "last_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: f.Points[len(f.Points)-1].V, + }, + } + }, + "present_over_time": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: 1, + }, + } + }, + "time": func(f FunctionArgs) promql.Sample { + return promql.Sample{ + Point: promql.Point{ + T: f.StepTime, + V: float64(f.StepTime) / 1000, + }, + } + }, + "changes": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: changes(f.Points), + }, + } + }, + "resets": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: resets(f.Points), + }, + } + }, + "deriv": func(f FunctionArgs) promql.Sample { + if len(f.Points) < 2 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: deriv(f.Points), + }, + } + }, + "irate": func(f FunctionArgs) promql.Sample { + if len(f.Points) < 2 { + return InvalidSample + } + val, ok := instantValue(f.Points, true) + if !ok { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: val, + }, + } + }, + "idelta": func(f FunctionArgs) promql.Sample { + if len(f.Points) < 2 { + return InvalidSample + } + val, ok := instantValue(f.Points, false) + if !ok { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: val, + }, + } + }, + "vector": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: f.Points[0].V, + }, + } + }, + "scalar": func(f FunctionArgs) promql.Sample { + // This is handled specially by operator. + return promql.Sample{} + }, + "rate": func(f FunctionArgs) promql.Sample { + if len(f.Points) < 2 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: extrapolatedRate(f.Points, true, true, f.StepTime, f.SelectRange, f.Offset), + }, + } + }, + "delta": func(f FunctionArgs) promql.Sample { + if len(f.Points) < 2 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: extrapolatedRate(f.Points, false, false, f.StepTime, f.SelectRange, f.Offset), + }, + } + }, + "increase": func(f FunctionArgs) promql.Sample { + if len(f.Points) < 2 { + return InvalidSample + } + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: extrapolatedRate(f.Points, true, false, f.StepTime, f.SelectRange, f.Offset), + }, + } + }, + "clamp": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 || len(f.ScalarPoints) < 2 { + return InvalidSample + } + + v := f.Points[0].V + min := f.ScalarPoints[0] + max := f.ScalarPoints[1] + + if max < min { + return InvalidSample + } + + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: math.Max(min, math.Min(max, v)), + }, + } + }, + "clamp_min": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 || len(f.ScalarPoints) == 0 { + return InvalidSample + } + + v := f.Points[0].V + min := f.ScalarPoints[0] + + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: math.Max(min, v), + }, + } + }, + "clamp_max": func(f FunctionArgs) promql.Sample { + if len(f.Points) == 0 || len(f.ScalarPoints) == 0 { + return InvalidSample + } + + v := f.Points[0].V + max := f.ScalarPoints[0] + + return promql.Sample{ + Metric: f.Labels, + Point: promql.Point{ + T: f.StepTime, + V: math.Min(max, v), + }, + } + }, +} + +func NewFunctionCall(f *parser.Function) (FunctionCall, error) { + if call, ok := Funcs[f.Name]; ok { + return call, nil + } + + msg := fmt.Sprintf("unknown function: %s", f.Name) + if _, ok := parser.Functions[f.Name]; ok { + return nil, errors.Wrap(parse.ErrNotImplemented, msg) + } + + return nil, errors.Wrap(parse.ErrNotSupportedExpr, msg) +} + +// extrapolatedRate is a utility function for rate/increase/delta. +// It calculates the rate (allowing for counter resets if isCounter is true), +// extrapolates if the first/last sample is close to the boundary, and returns +// the result as either per-second (if isRate is true) or overall. +func extrapolatedRate(samples []promql.Point, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) float64 { + var ( + rangeStart = stepTime - (selectRange + offset) + rangeEnd = stepTime - offset + ) + + resultValue := samples[len(samples)-1].V - samples[0].V + if isCounter { + var lastValue float64 + for _, sample := range samples { + if sample.V < lastValue { + resultValue += lastValue + } + lastValue = sample.V + } + } + + // Duration between first/last samples and boundary of range. + durationToStart := float64(samples[0].T-rangeStart) / 1000 + durationToEnd := float64(rangeEnd-samples[len(samples)-1].T) / 1000 + + sampledInterval := float64(samples[len(samples)-1].T-samples[0].T) / 1000 + averageDurationBetweenSamples := sampledInterval / float64(len(samples)-1) + + if isCounter && resultValue > 0 && samples[0].V >= 0 { + // Counters cannot be negative. If we have any slope at + // all (i.e. resultValue went up), we can extrapolate + // the zero point of the counter. If the duration to the + // zero point is shorter than the durationToStart, we + // take the zero point as the start of the series, + // thereby avoiding extrapolation to negative counter + // values. + durationToZero := sampledInterval * (samples[0].V / resultValue) + if durationToZero < durationToStart { + durationToStart = durationToZero + } + } + + // If the first/last samples are close to the boundaries of the range, + // extrapolate the result. This is as we expect that another sample + // will exist given the spacing between samples we've seen thus far, + // with an allowance for noise. + extrapolationThreshold := averageDurationBetweenSamples * 1.1 + extrapolateToInterval := sampledInterval + + if durationToStart < extrapolationThreshold { + extrapolateToInterval += durationToStart + } else { + extrapolateToInterval += averageDurationBetweenSamples / 2 + } + if durationToEnd < extrapolationThreshold { + extrapolateToInterval += durationToEnd + } else { + extrapolateToInterval += averageDurationBetweenSamples / 2 + } + factor := extrapolateToInterval / sampledInterval + if isRate { + factor /= float64(selectRange / 1000) + } + resultValue *= factor + + return resultValue +} + +func instantValue(samples []promql.Point, isRate bool) (float64, bool) { + lastSample := samples[len(samples)-1] + previousSample := samples[len(samples)-2] + + var resultValue float64 + if isRate && lastSample.V < previousSample.V { + // Counter reset. + resultValue = lastSample.V + } else { + resultValue = lastSample.V - previousSample.V + } + + sampledInterval := lastSample.T - previousSample.T + if sampledInterval == 0 { + // Avoid dividing by 0. + return 0, false + } + + if isRate { + // Convert to per-second. + resultValue /= float64(sampledInterval) / 1000 + } + + return resultValue, true +} + +func maxOverTime(points []promql.Point) float64 { + max := points[0].V + for _, v := range points { + if v.V > max || math.IsNaN(max) { + max = v.V + } + } + return max +} + +func minOverTime(points []promql.Point) float64 { + min := points[0].V + for _, v := range points { + if v.V < min || math.IsNaN(min) { + min = v.V + } + } + return min +} + +func countOverTime(points []promql.Point) float64 { + return float64(len(points)) +} + +func avgOverTime(points []promql.Point) float64 { + var mean, count, c float64 + for _, v := range points { + count++ + if math.IsInf(mean, 0) { + if math.IsInf(v.V, 0) && (mean > 0) == (v.V > 0) { + // The `mean` and `v.V` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `mean` is correct + // already. + continue + } + if !math.IsInf(v.V, 0) && !math.IsNaN(v.V) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + continue + } + } + mean, c = KahanSumInc(v.V/count-mean/count, mean, c) + } + + if math.IsInf(mean, 0) { + return mean + } + return mean + c +} + +func sumOverTime(points []promql.Point) float64 { + var sum, c float64 + for _, v := range points { + sum, c = KahanSumInc(v.V, sum, c) + } + if math.IsInf(sum, 0) { + return sum + } + return sum + c +} + +func stddevOverTime(points []promql.Point) float64 { + var count float64 + var mean, cMean float64 + var aux, cAux float64 + for _, v := range points { + count++ + delta := v.V - (mean + cMean) + mean, cMean = KahanSumInc(delta/count, mean, cMean) + aux, cAux = KahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + } + return math.Sqrt((aux + cAux) / count) +} + +func stdvarOverTime(points []promql.Point) float64 { + var count float64 + var mean, cMean float64 + var aux, cAux float64 + for _, v := range points { + count++ + delta := v.V - (mean + cMean) + mean, cMean = KahanSumInc(delta/count, mean, cMean) + aux, cAux = KahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + } + return (aux + cAux) / count +} + +func changes(points []promql.Point) float64 { + var count float64 + prev := points[0].V + count = 0 + for _, sample := range points[1:] { + current := sample.V + if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { + count++ + } + prev = current + } + return count +} + +func deriv(points []promql.Point) float64 { + // We pass in an arbitrary timestamp that is near the values in use + // to avoid floating point accuracy issues, see + // https://github.com/prometheus/prometheus/issues/2674 + slope, _ := linearRegression(points, points[0].T) + return slope +} + +func resets(points []promql.Point) float64 { + count := 0 + prev := points[0].V + for _, sample := range points[1:] { + current := sample.V + if current < prev { + count++ + } + prev = current + } + + return float64(count) +} + +func linearRegression(samples []promql.Point, interceptTime int64) (slope, intercept float64) { + var ( + n float64 + sumX, cX float64 + sumY, cY float64 + sumXY, cXY float64 + sumX2, cX2 float64 + initY float64 + constY bool + ) + initY = samples[0].V + constY = true + for i, sample := range samples { + // Set constY to false if any new y values are encountered. + if constY && i > 0 && sample.V != initY { + constY = false + } + n += 1.0 + x := float64(sample.T-interceptTime) / 1e3 + sumX, cX = KahanSumInc(x, sumX, cX) + sumY, cY = KahanSumInc(sample.V, sumY, cY) + sumXY, cXY = KahanSumInc(x*sample.V, sumXY, cXY) + sumX2, cX2 = KahanSumInc(x*x, sumX2, cX2) + } + if constY { + if math.IsInf(initY, 0) { + return math.NaN(), math.NaN() + } + return 0, initY + } + sumX = sumX + cX + sumY = sumY + cY + sumXY = sumXY + cXY + sumX2 = sumX2 + cX2 + + covXY := sumXY - sumX*sumY/n + varX := sumX2 - sumX*sumX/n + + slope = covXY / varX + intercept = sumY/n - slope*sumX/n + return slope, intercept +} + +func KahanSumInc(inc, sum, c float64) (newSum, newC float64) { + t := sum + inc + // Using Neumaier improvement, swap if next term larger than sum. + if math.Abs(sum) >= math.Abs(inc) { + c += (sum - t) + inc + } else { + c += (inc - t) + sum + } + return t, c +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/function/histogram.go b/vendor/github.com/thanos-community/promql-engine/execution/function/histogram.go new file mode 100644 index 00000000000..d347fa5a00e --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/function/histogram.go @@ -0,0 +1,212 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package function + +import ( + "context" + "fmt" + "math" + "strconv" + "sync" + + "github.com/cespare/xxhash/v2" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/execution/model" +) + +type histogramSeries struct { + outputID int + upperBound float64 +} + +// histogramOperator is a function operator that calculates percentiles. +type histogramOperator struct { + pool *model.VectorPool + + funcArgs parser.Expressions + + once sync.Once + series []labels.Labels + scalarOp model.VectorOperator + vectorOp model.VectorOperator + + // scalarPoints is a reusable buffer for points from the first argument of histogram_quantile. + scalarPoints []float64 + + // outputIndex is a mapping from input series ID to the output series ID and its upper boundary value + // parsed from the le label. + // If outputIndex[i] is nil then series[i] has no valid `le` label. + outputIndex []*histogramSeries + + // seriesBuckets are the buckets for each individual series. + seriesBuckets []buckets +} + +func NewHistogramOperator(pool *model.VectorPool, args parser.Expressions, nextOps []model.VectorOperator, stepsBatch int) (model.VectorOperator, error) { + return &histogramOperator{ + pool: pool, + funcArgs: args, + once: sync.Once{}, + scalarOp: nextOps[0], + vectorOp: nextOps[1], + scalarPoints: make([]float64, stepsBatch), + }, nil +} + +func (o *histogramOperator) Explain() (me string, next []model.VectorOperator) { + next = []model.VectorOperator{o.scalarOp, o.vectorOp} + return fmt.Sprintf("[*functionOperator] histogram_quantile(%v)", o.funcArgs), next +} + +func (o *histogramOperator) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + o.once.Do(func() { err = o.loadSeries(ctx) }) + if err != nil { + return nil, err + } + + return o.series, nil +} + +func (o *histogramOperator) GetPool() *model.VectorPool { + return o.pool +} + +func (o *histogramOperator) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + var err error + o.once.Do(func() { err = o.loadSeries(ctx) }) + if err != nil { + return nil, err + } + + scalars, err := o.scalarOp.Next(ctx) + if err != nil { + return nil, err + } + + if len(scalars) == 0 { + return nil, nil + } + + vectors, err := o.vectorOp.Next(ctx) + if err != nil { + return nil, err + } + + o.scalarPoints = o.scalarPoints[:0] + for _, scalar := range scalars { + if len(scalar.Samples) > 0 { + o.scalarPoints = append(o.scalarPoints, scalar.Samples[0]) + } + o.scalarOp.GetPool().PutStepVector(scalar) + } + o.scalarOp.GetPool().PutVectors(scalars) + + return o.processInputSeries(vectors) +} + +func (o *histogramOperator) processInputSeries(vectors []model.StepVector) ([]model.StepVector, error) { + out := o.pool.GetVectorBatch() + for stepIndex, vector := range vectors { + o.resetBuckets() + for i, seriesID := range vector.SampleIDs { + outputSeries := o.outputIndex[seriesID] + // This means that it has an invalid `le` label. + if outputSeries == nil { + continue + } + outputSeriesID := outputSeries.outputID + bucket := le{ + upperBound: outputSeries.upperBound, + count: vector.Samples[i], + } + o.seriesBuckets[outputSeriesID] = append(o.seriesBuckets[outputSeriesID], bucket) + } + + step := o.pool.GetStepVector(vector.T) + for i, stepBuckets := range o.seriesBuckets { + // It could be zero if multiple input series map to the same output series ID. + if len(stepBuckets) == 0 { + continue + } + // If there is only bucket or if we are after how many + // scalar points we have then it needs to be NaN. + if len(stepBuckets) == 1 || stepIndex >= len(o.scalarPoints) { + step.SampleIDs = append(step.SampleIDs, uint64(i)) + step.Samples = append(step.Samples, math.NaN()) + continue + } + + val := bucketQuantile(o.scalarPoints[stepIndex], stepBuckets) + step.SampleIDs = append(step.SampleIDs, uint64(i)) + step.Samples = append(step.Samples, val) + } + out = append(out, step) + o.vectorOp.GetPool().PutStepVector(vector) + } + + o.vectorOp.GetPool().PutVectors(vectors) + return out, nil +} + +func (o *histogramOperator) loadSeries(ctx context.Context) error { + series, err := o.vectorOp.Series(ctx) + if err != nil { + return err + } + + var ( + hashBuf = make([]byte, 0, 256) + hasher = xxhash.New() + seriesHashes = make(map[uint64]int, len(series)) + ) + + o.series = make([]labels.Labels, 0) + o.outputIndex = make([]*histogramSeries, len(series)) + + for i, s := range series { + lbls, bucketLabel := dropLabel(s.Copy(), "le") + value, err := strconv.ParseFloat(bucketLabel.Value, 64) + if err != nil { + continue + } + lbls, _ = DropMetricName(lbls) + + hasher.Reset() + hashBuf = lbls.Bytes(hashBuf) + if _, err := hasher.Write(hashBuf); err != nil { + return err + } + + seriesHash := hasher.Sum64() + seriesID, ok := seriesHashes[seriesHash] + if !ok { + o.series = append(o.series, lbls) + seriesID = len(o.series) - 1 + seriesHashes[seriesHash] = seriesID + } + + o.outputIndex[i] = &histogramSeries{ + outputID: seriesID, + upperBound: value, + } + } + o.seriesBuckets = make([]buckets, len(o.series)) + o.pool.SetStepSize(len(o.series)) + return nil +} + +func (o *histogramOperator) resetBuckets() { + for i := range o.seriesBuckets { + o.seriesBuckets[i] = o.seriesBuckets[i][:0] + } +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/function/operator.go b/vendor/github.com/thanos-community/promql-engine/execution/function/operator.go new file mode 100644 index 00000000000..2beefcf1541 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/function/operator.go @@ -0,0 +1,281 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package function + +import ( + "context" + "fmt" + "math" + "sync" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/parse" + "github.com/thanos-community/promql-engine/query" +) + +// functionOperator returns []model.StepVector after processing input with desired function. +type functionOperator struct { + funcExpr *parser.Call + series []labels.Labels + once sync.Once + + vectorIndex int + nextOps []model.VectorOperator + + call FunctionCall + scalarPoints [][]float64 + pointBuf []promql.Point +} + +type noArgFunctionOperator struct { + mint int64 + maxt int64 + step int64 + currentStep int64 + stepsBatch int + funcExpr *parser.Call + call FunctionCall + vectorPool *model.VectorPool +} + +func (o *noArgFunctionOperator) Explain() (me string, next []model.VectorOperator) { + return fmt.Sprintf("[*noArgFunctionOperator] %v()", o.funcExpr.Func.Name), []model.VectorOperator{} +} + +func (o *noArgFunctionOperator) Series(ctx context.Context) ([]labels.Labels, error) { + return []labels.Labels{}, nil +} + +func (o *noArgFunctionOperator) GetPool() *model.VectorPool { + return o.vectorPool +} + +func (o *noArgFunctionOperator) Next(ctx context.Context) ([]model.StepVector, error) { + if o.currentStep > o.maxt { + return nil, nil + } + ret := o.vectorPool.GetVectorBatch() + for i := 0; i < o.stepsBatch && o.currentStep <= o.maxt; i++ { + sv := o.vectorPool.GetStepVector(o.currentStep) + result := o.call(FunctionArgs{ + StepTime: o.currentStep, + }) + sv.T = o.currentStep + sv.Samples = []float64{result.V} + sv.SampleIDs = []uint64{} + + ret = append(ret, sv) + o.currentStep += o.step + } + + return ret, nil +} + +func NewFunctionOperator(funcExpr *parser.Call, call FunctionCall, nextOps []model.VectorOperator, stepsBatch int, opts *query.Options) (model.VectorOperator, error) { + // Short-circuit functions that take no args. Their only input is the step's timestamp. + if len(nextOps) == 0 { + interval := opts.Step.Milliseconds() + // We set interval to be at least 1. + if interval == 0 { + interval = 1 + } + + return &noArgFunctionOperator{ + currentStep: opts.Start.UnixMilli(), + mint: opts.Start.UnixMilli(), + maxt: opts.End.UnixMilli(), + step: interval, + stepsBatch: stepsBatch, + funcExpr: funcExpr, + call: call, + vectorPool: model.NewVectorPool(stepsBatch), + }, nil + } + scalarPoints := make([][]float64, stepsBatch) + for i := 0; i < stepsBatch; i++ { + scalarPoints[i] = make([]float64, len(nextOps)-1) + } + f := &functionOperator{ + nextOps: nextOps, + call: call, + funcExpr: funcExpr, + vectorIndex: 0, + scalarPoints: scalarPoints, + pointBuf: make([]promql.Point, 1), + } + + for i := range funcExpr.Args { + if funcExpr.Args[i].Type() == parser.ValueTypeVector { + f.vectorIndex = i + break + } + } + + // Check selector type. + // TODO(saswatamcode): Add support for string and matrix. + switch funcExpr.Args[f.vectorIndex].Type() { + case parser.ValueTypeVector, parser.ValueTypeScalar: + return f, nil + default: + return nil, errors.Wrapf(parse.ErrNotImplemented, "got %s:", funcExpr.String()) + } +} + +func (o *functionOperator) Explain() (me string, next []model.VectorOperator) { + return fmt.Sprintf("[*functionOperator] %v(%v)", o.funcExpr.Func.Name, o.funcExpr.Args), o.nextOps +} + +func (o *functionOperator) Series(ctx context.Context) ([]labels.Labels, error) { + if err := o.loadSeries(ctx); err != nil { + return nil, err + } + + return o.series, nil +} + +func (o *functionOperator) GetPool() *model.VectorPool { + return o.nextOps[o.vectorIndex].GetPool() +} + +func (o *functionOperator) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if err := o.loadSeries(ctx); err != nil { + return nil, err + } + + // Process non-variadic single/multi-arg instant vector and scalar input functions. + // Call next on vector input. + vectors, err := o.nextOps[o.vectorIndex].Next(ctx) + if err != nil { + return nil, err + } + + if len(vectors) == 0 { + return nil, nil + } + + scalarIndex := 0 + for i := range o.nextOps { + if i == o.vectorIndex { + continue + } + + scalarVectors, err := o.nextOps[i].Next(ctx) + if err != nil { + return nil, err + } + + for batchIndex := range vectors { + val := math.NaN() + if len(scalarVectors) > 0 && len(scalarVectors[batchIndex].Samples) > 0 { + val = scalarVectors[batchIndex].Samples[0] + o.nextOps[i].GetPool().PutStepVector(scalarVectors[batchIndex]) + } + o.scalarPoints[batchIndex][scalarIndex] = val + } + o.nextOps[i].GetPool().PutVectors(scalarVectors) + scalarIndex++ + } + + for batchIndex, vector := range vectors { + // scalar() depends on number of samples per vector and returns NaN if len(samples) != 1. + // So need to handle this separately here, instead of going via call which is per point. + if o.funcExpr.Func.Name == "scalar" { + if len(vector.Samples) <= 1 { + continue + } + + vectors[batchIndex].Samples = vector.Samples[:1] + vectors[batchIndex].SampleIDs = vector.SampleIDs[:1] + vector.Samples[0] = math.NaN() + continue + } + + for i := range vector.Samples { + o.pointBuf[0].V = vector.Samples[i] + // Call function by separately passing major input and scalars. + result := o.call(FunctionArgs{ + Labels: o.series[0], + Points: o.pointBuf, + StepTime: vector.T, + ScalarPoints: o.scalarPoints[batchIndex], + }) + + vector.Samples[i] = result.V + } + } + + return vectors, nil +} + +func (o *functionOperator) loadSeries(ctx context.Context) error { + var err error + o.once.Do(func() { + if o.funcExpr.Func.Name == "vector" { + o.series = []labels.Labels{labels.New()} + return + } + + if o.funcExpr.Func.Name == "scalar" { + o.series = []labels.Labels{} + return + } + + series, loadErr := o.nextOps[o.vectorIndex].Series(ctx) + if loadErr != nil { + err = loadErr + return + } + + o.series = make([]labels.Labels, len(series)) + for i, s := range series { + lbls := s + if o.funcExpr.Func.Name != "last_over_time" { + lbls, _ = DropMetricName(s.Copy()) + } + + o.series[i] = lbls + } + }) + + return err +} + +func DropMetricName(l labels.Labels) (labels.Labels, labels.Label) { + return dropLabel(l, labels.MetricName) +} + +// dropLabel removes the label with name from l and returns the dropped label. +func dropLabel(l labels.Labels, name string) (labels.Labels, labels.Label) { + if len(l) == 0 { + return l, labels.Label{} + } + + if len(l) == 1 { + if l[0].Name == name { + return l[:0], l[0] + + } + return l, labels.Label{} + } + + for i := range l { + if l[i].Name == name { + lbl := l[i] + return append(l[:i], l[i+1:]...), lbl + } + } + + return l, labels.Label{} +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/function/quantile.go b/vendor/github.com/thanos-community/promql-engine/execution/function/quantile.go new file mode 100644 index 00000000000..7b6d54d211f --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/function/quantile.go @@ -0,0 +1,165 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +package function + +import ( + "math" + "sort" +) + +type le struct { + upperBound float64 + count float64 +} + +// buckets implements sort.Interface. +type buckets []le + +func (b buckets) Len() int { return len(b) } +func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound } + +// bucketQuantile calculates the quantile 'q' based on the given buckets. The +// buckets will be sorted by upperBound by this function (i.e. no sorting +// needed before calling this function). The quantile value is interpolated +// assuming a linear distribution within a bucket. However, if the quantile +// falls into the highest bucket, the upper bound of the 2nd highest bucket is +// returned. A natural lower bound of 0 is assumed if the upper bound of the +// lowest bucket is greater 0. In that case, interpolation in the lowest bucket +// happens linearly between 0 and the upper bound of the lowest bucket. +// However, if the lowest bucket has an upper bound less or equal 0, this upper +// bound is returned if the quantile falls into the lowest bucket. +// +// There are a number of special cases (once we have a way to report errors +// happening during evaluations of AST functions, we should report those +// explicitly): +// +// If 'buckets' has 0 observations, NaN is returned. +// +// If 'buckets' has fewer than 2 elements, NaN is returned. +// +// If the highest bucket is not +Inf, NaN is returned. +// +// If q==NaN, NaN is returned. +// +// If q<0, -Inf is returned. +// +// If q>1, +Inf is returned. +func bucketQuantile(q float64, buckets buckets) float64 { + if math.IsNaN(q) { + return math.NaN() + } + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + sort.Sort(buckets) + if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { + return math.NaN() + } + + buckets = coalesceBuckets(buckets) + ensureMonotonic(buckets) + + if len(buckets) < 2 { + return math.NaN() + } + observations := buckets[len(buckets)-1].count + if observations == 0 { + return math.NaN() + } + rank := q * observations + b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) + + if b == len(buckets)-1 { + return buckets[len(buckets)-2].upperBound + } + if b == 0 && buckets[0].upperBound <= 0 { + return buckets[0].upperBound + } + var ( + bucketStart float64 + bucketEnd = buckets[b].upperBound + count = buckets[b].count + ) + if b > 0 { + bucketStart = buckets[b-1].upperBound + count -= buckets[b-1].count + rank -= buckets[b-1].count + } + return bucketStart + (bucketEnd-bucketStart)*(rank/count) +} + +// coalesceBuckets merges buckets with the same upper bound. +// +// The input buckets must be sorted. +func coalesceBuckets(buckets buckets) buckets { + last := buckets[0] + i := 0 + for _, b := range buckets[1:] { + if b.upperBound == last.upperBound { + last.count += b.count + } else { + buckets[i] = last + last = b + i++ + } + } + buckets[i] = last + return buckets[:i+1] +} + +// The assumption that bucket counts increase monotonically with increasing +// upperBound may be violated during: +// +// * Recording rule evaluation of histogram_quantile, especially when rate() +// has been applied to the underlying bucket timeseries. +// * Evaluation of histogram_quantile computed over federated bucket +// timeseries, especially when rate() has been applied. +// +// This is because scraped data is not made available to rule evaluation or +// federation atomically, so some buckets are computed with data from the +// most recent scrapes, but the other buckets are missing data from the most +// recent scrape. +// +// Monotonicity is usually guaranteed because if a bucket with upper bound +// u1 has count c1, then any bucket with a higher upper bound u > u1 must +// have counted all c1 observations and perhaps more, so that c >= c1. +// +// Randomly interspersed partial sampling breaks that guarantee, and rate() +// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from +// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The +// monotonicity is broken. It is exacerbated by rate() because under normal +// operation, cumulative counting of buckets will cause the bucket counts to +// diverge such that small differences from missing samples are not a problem. +// rate() removes this divergence.) +// +// bucketQuantile depends on that monotonicity to do a binary search for the +// bucket with the φ-quantile count, so breaking the monotonicity +// guarantee causes bucketQuantile() to return undefined (nonsense) results. +// +// As a somewhat hacky solution until ingestion is atomic per scrape, we +// calculate the "envelope" of the histogram buckets, essentially removing +// any decreases in the count between successive buckets. + +func ensureMonotonic(buckets buckets) { + max := buckets[0].count + for i := 1; i < len(buckets); i++ { + switch { + case buckets[i].count > max: + max = buckets[i].count + case buckets[i].count < max: + buckets[i].count = max + } + } +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/model/operator.go b/vendor/github.com/thanos-community/promql-engine/execution/model/operator.go new file mode 100644 index 00000000000..40d04eb6717 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/model/operator.go @@ -0,0 +1,27 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package model + +import ( + "context" + + "github.com/prometheus/prometheus/model/labels" +) + +// VectorOperator performs operations on series in step by step fashion. +type VectorOperator interface { + // Next yields vectors of samples from all series for one or more execution steps. + Next(ctx context.Context) ([]StepVector, error) + + // Series returns all series that the operator will process during Next results. + // The result can be used by upstream operators to allocate output tables and buffers + // before starting to process samples. + Series(ctx context.Context) ([]labels.Labels, error) + + // GetPool returns pool of vectors that can be shared across operators. + GetPool() *VectorPool + + // Explain returns human-readable explanation of the current operator and optional nested operators. + Explain() (me string, next []VectorOperator) +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/model/pool.go b/vendor/github.com/thanos-community/promql-engine/execution/model/pool.go new file mode 100644 index 00000000000..08d8bb00d30 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/model/pool.go @@ -0,0 +1,68 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package model + +import ( + "sync" +) + +type VectorPool struct { + vectors sync.Pool + + stepSize int + samples sync.Pool + sampleIDs sync.Pool +} + +func NewVectorPool(stepsBatch int) *VectorPool { + pool := &VectorPool{} + pool.vectors = sync.Pool{ + New: func() any { + sv := make([]StepVector, 0, stepsBatch) + return &sv + }, + } + pool.samples = sync.Pool{ + New: func() any { + samples := make([]float64, 0, pool.stepSize) + return &samples + }, + } + pool.sampleIDs = sync.Pool{ + New: func() any { + sampleIDs := make([]uint64, 0, pool.stepSize) + return &sampleIDs + }, + } + + return pool +} + +func (p *VectorPool) GetVectorBatch() []StepVector { + return *p.vectors.Get().(*[]StepVector) +} + +func (p *VectorPool) PutVectors(vector []StepVector) { + vector = vector[:0] + p.vectors.Put(&vector) +} + +func (p *VectorPool) GetStepVector(t int64) StepVector { + return StepVector{ + T: t, + SampleIDs: *p.sampleIDs.Get().(*[]uint64), + Samples: *p.samples.Get().(*[]float64), + } +} + +func (p *VectorPool) PutStepVector(v StepVector) { + v.SampleIDs = v.SampleIDs[:0] + v.Samples = v.Samples[:0] + p.sampleIDs.Put(&v.SampleIDs) + p.samples.Put(&v.Samples) +} + +func (p *VectorPool) SetStepSize(n int) { + p.stepSize = n +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/model/vector.go b/vendor/github.com/thanos-community/promql-engine/execution/model/vector.go new file mode 100644 index 00000000000..7fdce171b2d --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/model/vector.go @@ -0,0 +1,19 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package model + +import "github.com/prometheus/prometheus/model/labels" + +type Series struct { + // ID is a numerical, zero-based identifier for a series. + // It allows using slices instead of maps for fast lookups. + ID uint64 + Metric labels.Labels +} + +type StepVector struct { + T int64 + SampleIDs []uint64 + Samples []float64 +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/parse/errors.go b/vendor/github.com/thanos-community/promql-engine/execution/parse/errors.go new file mode 100644 index 00000000000..c5e29e1ca14 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/parse/errors.go @@ -0,0 +1,21 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package parse + +import ( + "fmt" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/promql/parser" +) + +var ErrNotSupportedExpr = errors.New("unsupported expression") + +func UnsupportedOperationErr(op parser.ItemType) error { + t := parser.ItemTypeStr[op] + msg := fmt.Sprintf("operation not supported: %s", t) + return errors.Wrap(ErrNotSupportedExpr, msg) +} + +var ErrNotImplemented = errors.New("expression not implemented") diff --git a/vendor/github.com/thanos-community/promql-engine/execution/remote/operator.go b/vendor/github.com/thanos-community/promql-engine/execution/remote/operator.go new file mode 100644 index 00000000000..9f425a5300e --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/remote/operator.go @@ -0,0 +1,102 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package remote + +import ( + "context" + "fmt" + "sync" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/execution/scan" + engstore "github.com/thanos-community/promql-engine/execution/storage" + "github.com/thanos-community/promql-engine/query" +) + +type Execution struct { + query promql.Query + vectorSelector model.VectorOperator +} + +func NewExecution(query promql.Query, pool *model.VectorPool, opts *query.Options) *Execution { + return &Execution{ + query: query, + vectorSelector: scan.NewVectorSelector(pool, newStorageFromQuery(query), opts, 0, 0, 1), + } +} + +func (e *Execution) Series(ctx context.Context) ([]labels.Labels, error) { + return e.vectorSelector.Series(ctx) +} + +func (e *Execution) Next(ctx context.Context) ([]model.StepVector, error) { + return e.vectorSelector.Next(ctx) +} + +func (e *Execution) GetPool() *model.VectorPool { + return e.vectorSelector.GetPool() +} + +func (e *Execution) Explain() (me string, next []model.VectorOperator) { + return fmt.Sprintf("[*remoteExec] %s", e.query), nil +} + +type storageAdapter struct { + query promql.Query + + once sync.Once + err error + series []engstore.SignedSeries +} + +func newStorageFromQuery(query promql.Query) *storageAdapter { + return &storageAdapter{ + query: query, + } +} + +func (s *storageAdapter) Matchers() []*labels.Matcher { return nil } + +func (s *storageAdapter) GetSeries(ctx context.Context, _, _ int) ([]engstore.SignedSeries, error) { + s.once.Do(func() { s.executeQuery(ctx) }) + if s.err != nil { + return nil, s.err + } + + return s.series, nil +} + +func (s *storageAdapter) executeQuery(ctx context.Context) { + defer s.query.Close() + result := s.query.Exec(ctx) + if result.Err != nil { + s.err = result.Err + return + } + + switch val := result.Value.(type) { + case promql.Matrix: + s.series = make([]engstore.SignedSeries, len(val)) + for i, series := range val { + s.series[i] = engstore.SignedSeries{ + Signature: uint64(i), + Series: promql.NewStorageSeries(series), + } + } + case promql.Vector: + s.series = make([]engstore.SignedSeries, len(val)) + for i, point := range val { + s.series[i] = engstore.SignedSeries{ + Signature: uint64(i), + Series: promql.NewStorageSeries(promql.Series{ + Metric: point.Metric, + Points: []promql.Point{{T: point.T, V: point.V}}, + }), + } + } + } +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/scan/literal_selector.go b/vendor/github.com/thanos-community/promql-engine/execution/scan/literal_selector.go new file mode 100644 index 00000000000..8d011130b32 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/scan/literal_selector.go @@ -0,0 +1,99 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package scan + +import ( + "context" + "fmt" + "sync" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/query" + + "github.com/prometheus/prometheus/model/labels" +) + +// numberLiteralSelector returns []model.StepVector with same sample value across time range. +type numberLiteralSelector struct { + vectorPool *model.VectorPool + + numSteps int + mint int64 + maxt int64 + step int64 + currentStep int64 + series []labels.Labels + once sync.Once + + val float64 +} + +func NewNumberLiteralSelector(pool *model.VectorPool, opts *query.Options, val float64) *numberLiteralSelector { + return &numberLiteralSelector{ + vectorPool: pool, + numSteps: opts.NumSteps(), + mint: opts.Start.UnixMilli(), + maxt: opts.End.UnixMilli(), + step: opts.Step.Milliseconds(), + currentStep: opts.Start.UnixMilli(), + val: val, + } +} + +func (o *numberLiteralSelector) Explain() (me string, next []model.VectorOperator) { + return fmt.Sprintf("[*numberLiteralSelector] %v", o.val), nil +} + +func (o *numberLiteralSelector) Series(context.Context) ([]labels.Labels, error) { + o.loadSeries() + return o.series, nil +} + +func (o *numberLiteralSelector) GetPool() *model.VectorPool { + return o.vectorPool +} + +func (o *numberLiteralSelector) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if o.currentStep > o.maxt { + return nil, nil + } + + o.loadSeries() + + vectors := o.vectorPool.GetVectorBatch() + ts := o.currentStep + for currStep := 0; currStep < o.numSteps && ts <= o.maxt; currStep++ { + if len(vectors) <= currStep { + vectors = append(vectors, o.vectorPool.GetStepVector(ts)) + } + + vectors[currStep].SampleIDs = append(vectors[currStep].SampleIDs, uint64(0)) + vectors[currStep].Samples = append(vectors[currStep].Samples, o.val) + + ts += o.step + } + + // For instant queries, set the step to a positive value + // so that the operator can terminate. + if o.step == 0 { + o.step = 1 + } + o.currentStep += o.step * int64(o.numSteps) + + return vectors, nil +} + +func (o *numberLiteralSelector) loadSeries() { + // If number literal is included within function, []labels.labels must be initialized. + o.once.Do(func() { + o.series = make([]labels.Labels, 1) + o.vectorPool.SetStepSize(len(o.series)) + }) +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/scan/matrix_selector.go b/vendor/github.com/thanos-community/promql-engine/execution/scan/matrix_selector.go new file mode 100644 index 00000000000..b03e1696b1a --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/scan/matrix_selector.go @@ -0,0 +1,281 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package scan + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + + "github.com/thanos-community/promql-engine/execution/function" + "github.com/thanos-community/promql-engine/execution/model" + engstore "github.com/thanos-community/promql-engine/execution/storage" + "github.com/thanos-community/promql-engine/query" +) + +type matrixScanner struct { + labels labels.Labels + signature uint64 + previousPoints []promql.Point + samples *storage.BufferedSeriesIterator +} + +type matrixSelector struct { + funcExpr *parser.Call + storage engstore.SeriesSelector + call function.FunctionCall + scanners []matrixScanner + series []labels.Labels + once sync.Once + + vectorPool *model.VectorPool + + numSteps int + mint int64 + maxt int64 + step int64 + selectRange int64 + offset int64 + currentStep int64 + + shard int + numShards int +} + +// NewMatrixSelector creates operator which selects vector of series over time. +func NewMatrixSelector( + pool *model.VectorPool, + selector engstore.SeriesSelector, + call function.FunctionCall, + funcExpr *parser.Call, + opts *query.Options, + selectRange, offset time.Duration, + shard, numShard int, +) model.VectorOperator { + // TODO(fpetkovski): Add offset parameter. + return &matrixSelector{ + storage: selector, + call: call, + funcExpr: funcExpr, + vectorPool: pool, + + numSteps: opts.NumSteps(), + mint: opts.Start.UnixMilli(), + maxt: opts.End.UnixMilli(), + step: opts.Step.Milliseconds(), + + selectRange: selectRange.Milliseconds(), + offset: offset.Milliseconds(), + currentStep: opts.Start.UnixMilli(), + + shard: shard, + numShards: numShard, + } +} + +func (o *matrixSelector) Explain() (me string, next []model.VectorOperator) { + r := time.Duration(o.selectRange) * time.Millisecond + if o.call != nil { + return fmt.Sprintf("[*matrixSelector] %v({%v}[%s] %v mod %v)", o.funcExpr.Func.Name, o.storage.Matchers(), r, o.shard, o.numShards), nil + } + return fmt.Sprintf("[*matrixSelector] {%v}[%s] %v mod %v", o.storage.Matchers(), r, o.shard, o.numShards), nil +} + +func (o *matrixSelector) Series(ctx context.Context) ([]labels.Labels, error) { + if err := o.loadSeries(ctx); err != nil { + return nil, err + } + return o.series, nil +} + +func (o *matrixSelector) GetPool() *model.VectorPool { + return o.vectorPool +} + +func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if o.currentStep > o.maxt { + return nil, nil + } + + if err := o.loadSeries(ctx); err != nil { + return nil, err + } + + vectors := o.vectorPool.GetVectorBatch() + ts := o.currentStep + for i := 0; i < len(o.scanners); i++ { + var ( + series = o.scanners[i] + seriesTs = ts + ) + + for currStep := 0; currStep < o.numSteps && seriesTs <= o.maxt; currStep++ { + if len(vectors) <= currStep { + vectors = append(vectors, o.vectorPool.GetStepVector(seriesTs)) + } + maxt := seriesTs - o.offset + mint := maxt - o.selectRange + rangePoints, err := selectPoints(series.samples, mint, maxt, o.scanners[i].previousPoints) + if err != nil { + return nil, err + } + + // TODO(saswatamcode): Handle multi-arg functions for matrixSelectors. + // Also, allow operator to exist independently without being nested + // under parser.Call by implementing new data model. + // https://github.com/thanos-community/promql-engine/issues/39 + result := o.call(function.FunctionArgs{ + Labels: series.labels, + Points: rangePoints, + StepTime: seriesTs, + SelectRange: o.selectRange, + Offset: o.offset, + }) + + if result.Point != function.InvalidSample.Point { + vectors[currStep].T = result.T + vectors[currStep].Samples = append(vectors[currStep].Samples, result.V) + vectors[currStep].SampleIDs = append(vectors[currStep].SampleIDs, series.signature) + } + + o.scanners[i].previousPoints = rangePoints + + // Only buffer stepRange milliseconds from the second step on. + stepRange := o.selectRange + if stepRange > o.step { + stepRange = o.step + } + series.samples.ReduceDelta(stepRange) + + seriesTs += o.step + } + } + // For instant queries, set the step to a positive value + // so that the operator can terminate. + if o.step == 0 { + o.step = 1 + } + o.currentStep += o.step * int64(o.numSteps) + + return vectors, nil +} + +func (o *matrixSelector) loadSeries(ctx context.Context) error { + var err error + o.once.Do(func() { + series, loadErr := o.storage.GetSeries(ctx, o.shard, o.numShards) + if loadErr != nil { + err = loadErr + return + } + + o.scanners = make([]matrixScanner, len(series)) + o.series = make([]labels.Labels, len(series)) + for i, s := range series { + lbls := s.Labels() + if o.funcExpr.Func.Name != "last_over_time" { + // This modifies the array in place. Because labels.Labels + // can be re-used between different Select() calls, it means that + // we have to copy it here. + // TODO(GiedriusS): could we identify somehow whether labels.Labels + // is reused between Select() calls? + lbls, _ = function.DropMetricName(lbls.Copy()) + } + + sort.Sort(lbls) + + o.scanners[i] = matrixScanner{ + labels: lbls, + signature: s.Signature, + samples: storage.NewBufferIterator(s.Iterator(), o.selectRange), + } + o.series[i] = lbls + } + o.vectorPool.SetStepSize(len(series)) + }) + return err +} + +// matrixIterSlice populates a matrix vector covering the requested range for a +// single time series, with points retrieved from an iterator. +// +// As an optimization, the matrix vector may already contain points of the same +// time series from the evaluation of an earlier step (with lower mint and maxt +// values). Any such points falling before mint are discarded; points that fall +// into the [mint, maxt] range are retained; only points with later timestamps +// are populated from the iterator. +// TODO(fpetkovski): Add max samples limit. +func selectPoints(it *storage.BufferedSeriesIterator, mint, maxt int64, out []promql.Point) ([]promql.Point, error) { + if len(out) > 0 && out[len(out)-1].T >= mint { + // There is an overlap between previous and current ranges, retain common + // points. In most such cases: + // (a) the overlap is significantly larger than the eval step; and/or + // (b) the number of samples is relatively small. + // so a linear search will be as fast as a binary search. + var drop int + for drop = 0; out[drop].T < mint; drop++ { + } + copy(out, out[drop:]) + out = out[:len(out)-drop] + // Only append points with timestamps after the last timestamp we have. + mint = out[len(out)-1].T + 1 + } else { + out = out[:0] + } + + soughtValueType := it.Seek(maxt) + if soughtValueType == chunkenc.ValNone { + if it.Err() != nil { + return nil, it.Err() + } + } + + buf := it.Buffer() +loop: + for { + switch buf.Next() { + case chunkenc.ValNone: + break loop + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + return nil, ErrNativeHistogramsUnsupported + case chunkenc.ValFloat: + t, v := buf.At() + if value.IsStaleNaN(v) { + continue loop + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mint { + out = append(out, promql.Point{T: t, V: v}) + } + } + } + + // The sought sample might also be in the range. + switch soughtValueType { + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + return nil, ErrNativeHistogramsUnsupported + case chunkenc.ValFloat: + t, v := it.At() + if t == maxt && !value.IsStaleNaN(v) { + out = append(out, promql.Point{T: t, V: v}) + } + } + + return out, nil +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/scan/vector_selector.go b/vendor/github.com/thanos-community/promql-engine/execution/scan/vector_selector.go new file mode 100644 index 00000000000..d3075e98fd3 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/scan/vector_selector.go @@ -0,0 +1,195 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package scan + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/tsdb/chunkenc" + + "github.com/thanos-community/promql-engine/execution/model" + engstore "github.com/thanos-community/promql-engine/execution/storage" + "github.com/thanos-community/promql-engine/query" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" + + "github.com/prometheus/prometheus/storage" +) + +var ErrNativeHistogramsUnsupported = errors.Newf("querying native histograms is not supported") + +type vectorScanner struct { + labels labels.Labels + signature uint64 + samples *storage.MemoizedSeriesIterator +} + +type vectorSelector struct { + storage engstore.SeriesSelector + scanners []vectorScanner + series []labels.Labels + + once sync.Once + vectorPool *model.VectorPool + + numSteps int + mint int64 + maxt int64 + lookbackDelta int64 + step int64 + currentStep int64 + offset int64 + + shard int + numShards int +} + +// NewVectorSelector creates operator which selects vector of series. +func NewVectorSelector( + pool *model.VectorPool, + selector engstore.SeriesSelector, + queryOpts *query.Options, + offset time.Duration, + shard, numShards int, +) model.VectorOperator { + return &vectorSelector{ + storage: selector, + vectorPool: pool, + + mint: queryOpts.Start.UnixMilli(), + maxt: queryOpts.End.UnixMilli(), + step: queryOpts.Step.Milliseconds(), + currentStep: queryOpts.Start.UnixMilli(), + lookbackDelta: queryOpts.LookbackDelta.Milliseconds(), + offset: offset.Milliseconds(), + numSteps: queryOpts.NumSteps(), + + shard: shard, + numShards: numShards, + } +} + +func (o *vectorSelector) Explain() (me string, next []model.VectorOperator) { + return fmt.Sprintf("[*vectorSelector] {%v} %v mod %v", o.storage.Matchers(), o.shard, o.numShards), nil +} + +func (o *vectorSelector) Series(ctx context.Context) ([]labels.Labels, error) { + if err := o.loadSeries(ctx); err != nil { + return nil, err + } + return o.series, nil +} + +func (o *vectorSelector) GetPool() *model.VectorPool { + return o.vectorPool +} + +func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if o.currentStep > o.maxt { + return nil, nil + } + + if err := o.loadSeries(ctx); err != nil { + return nil, err + } + + vectors := o.vectorPool.GetVectorBatch() + ts := o.currentStep + for i := 0; i < len(o.scanners); i++ { + var ( + series = o.scanners[i] + seriesTs = ts + ) + + for currStep := 0; currStep < o.numSteps && seriesTs <= o.maxt; currStep++ { + if len(vectors) <= currStep { + vectors = append(vectors, o.vectorPool.GetStepVector(seriesTs)) + } + _, v, ok, err := selectPoint(series.samples, seriesTs, o.lookbackDelta, o.offset) + if err != nil { + return nil, err + } + if ok { + vectors[currStep].SampleIDs = append(vectors[currStep].SampleIDs, series.signature) + vectors[currStep].Samples = append(vectors[currStep].Samples, v) + } + seriesTs += o.step + } + } + // For instant queries, set the step to a positive value + // so that the operator can terminate. + if o.step == 0 { + o.step = 1 + } + o.currentStep += o.step * int64(o.numSteps) + + return vectors, nil +} + +func (o *vectorSelector) loadSeries(ctx context.Context) error { + var err error + o.once.Do(func() { + series, loadErr := o.storage.GetSeries(ctx, o.shard, o.numShards) + if loadErr != nil { + err = loadErr + return + } + + o.scanners = make([]vectorScanner, len(series)) + o.series = make([]labels.Labels, len(series)) + for i, s := range series { + o.scanners[i] = vectorScanner{ + labels: s.Labels(), + signature: s.Signature, + samples: storage.NewMemoizedIterator(s.Iterator(), o.lookbackDelta), + } + o.series[i] = s.Labels() + } + o.vectorPool.SetStepSize(len(series)) + }) + return err +} + +// TODO(fpetkovski): Add max samples limit. +func selectPoint(it *storage.MemoizedSeriesIterator, ts, lookbackDelta, offset int64) (int64, float64, bool, error) { + refTime := ts - offset + var t int64 + var v float64 + + valueType := it.Seek(refTime) + switch valueType { + case chunkenc.ValNone: + if it.Err() != nil { + return 0, 0, false, it.Err() + } + case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: + return 0, 0, false, ErrNativeHistogramsUnsupported + case chunkenc.ValFloat: + t, v = it.At() + default: + panic(errors.Newf("unknown value type %v", valueType)) + } + if valueType == chunkenc.ValNone || t > refTime { + var ok bool + t, v, _, _, ok = it.PeekPrev() + if !ok || t < refTime-lookbackDelta { + return 0, 0, false, nil + } + } + if value.IsStaleNaN(v) { + return 0, 0, false, nil + } + return t, v, true, nil +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/step_invariant/step_invariant.go b/vendor/github.com/thanos-community/promql-engine/execution/step_invariant/step_invariant.go new file mode 100644 index 00000000000..e2b35f8517e --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/step_invariant/step_invariant.go @@ -0,0 +1,150 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package step_invariant + +import ( + "context" + "sync" + + "github.com/efficientgo/core/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/query" +) + +type stepInvariantOperator struct { + vectorPool *model.VectorPool + next model.VectorOperator + cacheResult bool + + seriesOnce sync.Once + series []labels.Labels + cacheVectorOnce sync.Once + cachedVector model.StepVector + + mint int64 + maxt int64 + step int64 + currentStep int64 + stepsBatch int +} + +func (u *stepInvariantOperator) Explain() (me string, next []model.VectorOperator) { + return "[*stepInvariantOperator]", []model.VectorOperator{u.next} +} + +func NewStepInvariantOperator( + pool *model.VectorPool, + next model.VectorOperator, + expr parser.Expr, + opts *query.Options, + stepsBatch int, +) (model.VectorOperator, error) { + interval := opts.Step.Milliseconds() + // We set interval to be at least 1. + if interval == 0 { + interval = 1 + } + u := &stepInvariantOperator{ + vectorPool: pool, + next: next, + currentStep: opts.Start.UnixMilli(), + mint: opts.Start.UnixMilli(), + maxt: opts.End.UnixMilli(), + step: interval, + stepsBatch: stepsBatch, + cacheResult: true, + } + // We do not duplicate results for range selectors since result is a matrix + // with their unique timestamps which does not depend on the step. + switch expr.(type) { + case *parser.MatrixSelector, *parser.SubqueryExpr: + u.cacheResult = false + } + + return u, nil +} + +func (u *stepInvariantOperator) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + u.seriesOnce.Do(func() { + u.series, err = u.next.Series(ctx) + }) + if err != nil { + return nil, err + } + return u.series, nil +} + +func (u *stepInvariantOperator) GetPool() *model.VectorPool { + return u.vectorPool +} + +func (u *stepInvariantOperator) Next(ctx context.Context) ([]model.StepVector, error) { + if u.currentStep > u.maxt { + return nil, nil + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if !u.cacheResult { + return u.next.Next(ctx) + } + + if err := u.cacheInputVector(ctx); err != nil { + return nil, err + } + + if len(u.cachedVector.Samples) == 0 { + return nil, nil + } + + result := u.vectorPool.GetVectorBatch() + for i := 0; i < u.stepsBatch && u.currentStep <= u.maxt; i++ { + outVector := u.vectorPool.GetStepVector(u.currentStep) + outVector.Samples = append(outVector.Samples, u.cachedVector.Samples...) + outVector.SampleIDs = append(outVector.SampleIDs, u.cachedVector.SampleIDs...) + result = append(result, outVector) + u.currentStep += u.step + } + + return result, nil +} + +func (u *stepInvariantOperator) cacheInputVector(ctx context.Context) error { + var err error + var in []model.StepVector + u.cacheVectorOnce.Do(func() { + in, err = u.next.Next(ctx) + if err != nil { + return + } + defer u.next.GetPool().PutVectors(in) + + if len(in) == 0 || len(in[0].Samples) == 0 { + return + } + + // Make sure we only have exactly one step vector. + if len(in) != 1 { + err = errors.New("unexpected number of samples") + return + } + + // Copy the evaluated step vector. + // The timestamp of the vector is not relevant since we will produce + // new output vectors with the current step's timestamp. + u.cachedVector = u.vectorPool.GetStepVector(0) + u.cachedVector.Samples = append(u.cachedVector.Samples, in[0].Samples...) + u.cachedVector.SampleIDs = append(u.cachedVector.SampleIDs, in[0].SampleIDs...) + u.next.GetPool().PutStepVector(in[0]) + }) + return err +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/storage/filter.go b/vendor/github.com/thanos-community/promql-engine/execution/storage/filter.go new file mode 100644 index 00000000000..b35ca7b584d --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/storage/filter.go @@ -0,0 +1,60 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package storage + +import ( + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" +) + +type Filter interface { + Matches(series storage.Series) bool + Matchers() []*labels.Matcher +} + +type nopFilter struct{} + +func (n nopFilter) Matchers() []*labels.Matcher { return nil } + +func (n nopFilter) Matches(storage.Series) bool { return true } + +type filter struct { + matchers []*labels.Matcher + matcherSet map[string]*labels.Matcher +} + +func NewFilter(matchers []*labels.Matcher) Filter { + if len(matchers) == 0 { + return &nopFilter{} + } + + matcherSet := make(map[string]*labels.Matcher) + for _, m := range matchers { + matcherSet[m.Name] = m + } + return &filter{ + matchers: matchers, + matcherSet: matcherSet, + } +} + +func (f filter) Matchers() []*labels.Matcher { return f.matchers } + +func (f filter) Matches(series storage.Series) bool { + if len(f.matcherSet) == 0 { + return true + } + + for _, l := range series.Labels() { + m, ok := f.matcherSet[l.Name] + if !ok { + continue + } + if !m.Matches(l.Value) { + return false + } + } + + return true +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/storage/filtered_selector.go b/vendor/github.com/thanos-community/promql-engine/execution/storage/filtered_selector.go new file mode 100644 index 00000000000..8bd7241797e --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/storage/filtered_selector.go @@ -0,0 +1,61 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package storage + +import ( + "context" + "sync" + + "github.com/prometheus/prometheus/model/labels" +) + +type filteredSelector struct { + selector *seriesSelector + filter Filter + + once sync.Once + series []SignedSeries +} + +func NewFilteredSelector(selector *seriesSelector, filter Filter) SeriesSelector { + return &filteredSelector{ + selector: selector, + filter: filter, + } +} + +func (f *filteredSelector) Matchers() []*labels.Matcher { + return append(f.selector.matchers, f.filter.Matchers()...) +} + +func (f *filteredSelector) GetSeries(ctx context.Context, shard, numShards int) ([]SignedSeries, error) { + var err error + f.once.Do(func() { err = f.loadSeries(ctx) }) + if err != nil { + return nil, err + } + + return seriesShard(f.series, shard, numShards), nil +} + +func (f *filteredSelector) loadSeries(ctx context.Context) error { + series, err := f.selector.GetSeries(ctx, 0, 1) + if err != nil { + return err + } + + var i uint64 + f.series = make([]SignedSeries, 0, len(series)) + for _, s := range series { + if f.filter.Matches(s) { + f.series = append(f.series, SignedSeries{ + Series: s.Series, + Signature: i, + }) + i++ + } + } + + return nil +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/storage/pool.go b/vendor/github.com/thanos-community/promql-engine/execution/storage/pool.go new file mode 100644 index 00000000000..55302a24cbd --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/storage/pool.go @@ -0,0 +1,83 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package storage + +import ( + "fmt" + "strconv" + "strings" + + "github.com/cespare/xxhash/v2" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" +) + +var sep = []byte{'\xff'} + +type SelectorPool struct { + selectors map[uint64]*seriesSelector + + queryable storage.Queryable +} + +func NewSelectorPool(queryable storage.Queryable) *SelectorPool { + return &SelectorPool{ + selectors: make(map[uint64]*seriesSelector), + queryable: queryable, + } +} + +func (p *SelectorPool) GetSelector(mint, maxt, step int64, matchers []*labels.Matcher, hints storage.SelectHints) SeriesSelector { + key := hashMatchers(matchers, mint, maxt, hints) + if _, ok := p.selectors[key]; !ok { + p.selectors[key] = newSeriesSelector(p.queryable, mint, maxt, step, matchers, hints) + } + return p.selectors[key] +} + +func (p *SelectorPool) GetFilteredSelector(mint, maxt, step int64, matchers, filters []*labels.Matcher, hints storage.SelectHints) SeriesSelector { + key := hashMatchers(matchers, mint, maxt, hints) + if _, ok := p.selectors[key]; !ok { + p.selectors[key] = newSeriesSelector(p.queryable, mint, maxt, step, matchers, hints) + } + + return NewFilteredSelector(p.selectors[key], NewFilter(filters)) +} + +func hashMatchers(matchers []*labels.Matcher, mint, maxt int64, hints storage.SelectHints) uint64 { + sb := xxhash.New() + for _, m := range matchers { + writeMatcher(sb, m) + } + writeInt64(sb, mint) + writeInt64(sb, maxt) + writeInt64(sb, hints.Step) + writeString(sb, hints.Func) + writeString(sb, strings.Join(hints.Grouping, ";")) + writeBool(sb, hints.By) + + key := sb.Sum64() + return key +} + +func writeMatcher(sb *xxhash.Digest, m *labels.Matcher) { + writeString(sb, m.Name) + writeString(sb, strconv.Itoa(int(m.Type))) + writeString(sb, m.Value) +} + +func writeInt64(sb *xxhash.Digest, val int64) { + _, _ = sb.WriteString(fmt.Sprintf("%d", val)) + _, _ = sb.Write(sep) +} + +func writeString(sb *xxhash.Digest, val string) { + _, _ = sb.WriteString(val) + _, _ = sb.Write(sep) +} + +func writeBool(sb *xxhash.Digest, val bool) { + _, _ = sb.WriteString(fmt.Sprintf("%t", val)) + _, _ = sb.Write(sep) +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/storage/series_selector.go b/vendor/github.com/thanos-community/promql-engine/execution/storage/series_selector.go new file mode 100644 index 00000000000..e9f0cf2791b --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/storage/series_selector.go @@ -0,0 +1,97 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package storage + +import ( + "context" + "sync" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" +) + +type SeriesSelector interface { + GetSeries(ctx context.Context, shard, numShards int) ([]SignedSeries, error) + Matchers() []*labels.Matcher +} + +type SignedSeries struct { + storage.Series + Signature uint64 +} + +type seriesSelector struct { + storage storage.Queryable + mint int64 + maxt int64 + step int64 + matchers []*labels.Matcher + hints storage.SelectHints + + once sync.Once + series []SignedSeries +} + +func newSeriesSelector(storage storage.Queryable, mint, maxt, step int64, matchers []*labels.Matcher, hints storage.SelectHints) *seriesSelector { + return &seriesSelector{ + storage: storage, + maxt: maxt, + mint: mint, + step: step, + matchers: matchers, + hints: hints, + } +} + +func (o *seriesSelector) Matchers() []*labels.Matcher { + return o.matchers +} + +func (o *seriesSelector) GetSeries(ctx context.Context, shard int, numShards int) ([]SignedSeries, error) { + var err error + o.once.Do(func() { err = o.loadSeries(ctx) }) + if err != nil { + return nil, err + } + + return seriesShard(o.series, shard, numShards), nil +} + +func (o *seriesSelector) loadSeries(ctx context.Context) error { + querier, err := o.storage.Querier(ctx, o.mint, o.maxt) + if err != nil { + return err + } + defer querier.Close() + + seriesSet := querier.Select(false, &o.hints, o.matchers...) + i := 0 + for seriesSet.Next() { + s := seriesSet.At() + o.series = append(o.series, SignedSeries{ + Series: s, + Signature: uint64(i), + }) + i++ + } + + return seriesSet.Err() +} + +func seriesShard(series []SignedSeries, index int, numShards int) []SignedSeries { + start := index * len(series) / numShards + end := (index + 1) * len(series) / numShards + if end > len(series) { + end = len(series) + } + + slice := series[start:end] + shard := make([]SignedSeries, len(slice)) + copy(shard, slice) + + for i := range shard { + shard[i].Signature = uint64(i) + } + return shard +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/unary/unary.go b/vendor/github.com/thanos-community/promql-engine/execution/unary/unary.go new file mode 100644 index 00000000000..1ebb1350e3b --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/unary/unary.go @@ -0,0 +1,104 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package unary + +import ( + "context" + "sync" + + "github.com/prometheus/prometheus/model/labels" + "gonum.org/v1/gonum/floats" + + "github.com/thanos-community/promql-engine/execution/model" + "github.com/thanos-community/promql-engine/worker" +) + +type unaryNegation struct { + next model.VectorOperator + once sync.Once + + series []labels.Labels + + workers worker.Group +} + +func (u *unaryNegation) Explain() (me string, next []model.VectorOperator) { + return "[*unaryNegation]", []model.VectorOperator{u.next} +} + +func NewUnaryNegation( + next model.VectorOperator, + stepsBatch int, +) (model.VectorOperator, error) { + u := &unaryNegation{ + next: next, + } + + u.workers = worker.NewGroup(stepsBatch, u.workerTask) + return u, nil +} + +func (u *unaryNegation) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + u.once.Do(func() { err = u.loadSeries(ctx) }) + if err != nil { + return nil, err + } + return u.series, nil +} + +func (u *unaryNegation) loadSeries(ctx context.Context) error { + vectorSeries, err := u.next.Series(ctx) + if err != nil { + return err + } + u.series = make([]labels.Labels, len(vectorSeries)) + for i := range vectorSeries { + lbls := labels.NewBuilder(vectorSeries[i]).Del(labels.MetricName).Labels(nil) + u.series[i] = lbls + } + + u.workers.Start(ctx) + return nil +} + +func (u *unaryNegation) GetPool() *model.VectorPool { + return u.next.GetPool() +} + +func (u *unaryNegation) Next(ctx context.Context) ([]model.StepVector, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + in, err := u.next.Next(ctx) + if err != nil { + return nil, err + } + if in == nil { + return nil, nil + } + for i, vector := range in { + if err := u.workers[i].Send(0, vector); err != nil { + return nil, err + } + } + + for i := range in { + // Make sure worker finishes the job. + // Since it is in-place so no need another buffer. + if _, err := u.workers[i].GetOutput(); err != nil { + return nil, err + } + } + + return in, nil +} + +func (u *unaryNegation) workerTask(_ int, _ float64, vector model.StepVector) model.StepVector { + floats.Scale(-1, vector.Samples) + return vector +} diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go new file mode 100644 index 00000000000..d332e7c5235 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go @@ -0,0 +1,133 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package logicalplan + +import ( + "fmt" + + "github.com/prometheus/prometheus/promql/parser" + + "github.com/thanos-community/promql-engine/api" +) + +type Coalesce struct { + Expressions parser.Expressions +} + +func (r Coalesce) String() string { + return fmt.Sprintf("coalesce(%s)", r.Expressions) +} + +func (r Coalesce) Pretty(level int) string { return r.String() } + +func (r Coalesce) PositionRange() parser.PositionRange { return parser.PositionRange{} } + +func (r Coalesce) Type() parser.ValueType { return parser.ValueTypeMatrix } + +func (r Coalesce) PromQLExpr() {} + +type RemoteExecution struct { + Engine api.RemoteEngine + Query string +} + +func (r RemoteExecution) String() string { + return fmt.Sprintf("remote(%s)", r.Query) +} + +func (r RemoteExecution) Pretty(level int) string { return r.String() } + +func (r RemoteExecution) PositionRange() parser.PositionRange { return parser.PositionRange{} } + +func (r RemoteExecution) Type() parser.ValueType { return parser.ValueTypeMatrix } + +func (r RemoteExecution) PromQLExpr() {} + +var distributiveAggregations = map[parser.ItemType]struct{}{ + parser.SUM: {}, + parser.MIN: {}, + parser.MAX: {}, + parser.GROUP: {}, + parser.COUNT: {}, + parser.BOTTOMK: {}, + parser.TOPK: {}, +} + +// DistributedExecutionOptimizer produces a logical plan suitable for +// distributed Query execution. +type DistributedExecutionOptimizer struct { + Endpoints api.RemoteEndpoints +} + +func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr) parser.Expr { + engines := m.Endpoints.Engines() + traverseBottomUp(nil, &plan, func(parent, current *parser.Expr) (stop bool) { + // If the current operation is not distributive, stop the traversal. + if !isDistributive(current) { + return true + } + + // If the current node is an aggregation, distribute the operation and + // stop the traversal. + if aggr, ok := (*current).(*parser.AggregateExpr); ok { + localAggregation := aggr.Op + if aggr.Op == parser.COUNT { + localAggregation = parser.SUM + } + subQueries := m.makeSubQueries(current, engines) + *current = &parser.AggregateExpr{ + Op: localAggregation, + Expr: subQueries, + Param: aggr.Param, + Grouping: aggr.Grouping, + Without: aggr.Without, + PosRange: aggr.PosRange, + } + return true + } + + // If the parent operation is distributive, continue the traversal. + if isDistributive(parent) { + return false + } + + *current = m.makeSubQueries(current, engines) + return true + }) + + return plan +} + +func (m DistributedExecutionOptimizer) makeSubQueries(current *parser.Expr, engines []api.RemoteEngine) Coalesce { + remoteQueries := Coalesce{ + Expressions: make(parser.Expressions, len(engines)), + } + for i := 0; i < len(engines); i++ { + remoteQueries.Expressions[i] = &RemoteExecution{ + Engine: engines[i], + Query: (*current).String(), + } + } + return remoteQueries +} + +func isDistributive(expr *parser.Expr) bool { + if expr == nil { + return false + } + switch aggr := (*expr).(type) { + case *parser.BinaryExpr: + // Binary expressions are joins and need to be done across the entire + // data set. This is why we cannot push down aggregations where + // the operand is a binary expression. + return false + case *parser.AggregateExpr: + // Certain aggregations are currently not supported. + if _, ok := distributiveAggregations[aggr.Op]; !ok { + return false + } + } + + return true +} diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/filter.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/filter.go new file mode 100644 index 00000000000..fcaef1b728a --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/filter.go @@ -0,0 +1,28 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package logicalplan + +import ( + "fmt" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +type FilteredSelector struct { + *parser.VectorSelector + Filters []*labels.Matcher +} + +func (f FilteredSelector) String() string { + return fmt.Sprintf("filter(%s, %s)", f.Filters, f.VectorSelector.String()) +} + +func (f FilteredSelector) Pretty(level int) string { return f.String() } + +func (f FilteredSelector) PositionRange() parser.PositionRange { return parser.PositionRange{} } + +func (f FilteredSelector) Type() parser.ValueType { return parser.ValueTypeVector } + +func (f FilteredSelector) PromQLExpr() {} diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/merge_selects.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/merge_selects.go new file mode 100644 index 00000000000..85d7d8d6144 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/merge_selects.go @@ -0,0 +1,156 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package logicalplan + +import ( + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +// MergeSelectsOptimizer optimizes a binary expression where +// one select is a superset of the other select. +// For example, the expression: +// +// metric{a="b", c="d"} / scalar(metric{a="b"}) becomes: +// Filter(c="d", metric{a="b"}) / scalar(metric{a="b"}). +// +// The engine can then cache the result of `metric{a="b"}` +// and apply an additional filter for {c="d"}. +type MergeSelectsOptimizer struct{} + +func (m MergeSelectsOptimizer) Optimize(expr parser.Expr) parser.Expr { + heap := make(matcherHeap) + extractSelectors(heap, expr) + replaceMatchers(heap, &expr) + + return expr +} + +func extractSelectors(selectors matcherHeap, expr parser.Expr) { + parser.Inspect(expr, func(node parser.Node, nodes []parser.Node) error { + e, ok := node.(*parser.VectorSelector) + if !ok { + return nil + } + for _, l := range e.LabelMatchers { + if l.Name == labels.MetricName { + selectors.add(l.Value, e.LabelMatchers) + } + } + return nil + }) +} + +func replaceMatchers(selectors matcherHeap, expr *parser.Expr) { + traverse(expr, func(node *parser.Expr) { + e, ok := (*node).(*parser.VectorSelector) + if !ok { + return + } + + for _, l := range e.LabelMatchers { + if l.Name != labels.MetricName { + continue + } + replacement, found := selectors.findReplacement(l.Value, e.LabelMatchers) + if !found { + continue + } + + // Make a copy of the original selectors to avoid modifying them while + // trimming filters. + filters := make([]*labels.Matcher, len(e.LabelMatchers)) + copy(filters, e.LabelMatchers) + + // All replacements are done on metrics name only, + // so we can drop the explicit metric name selector. + filters = dropMatcher(labels.MetricName, filters) + + // Drop filters which are already present as matchers in the replacement selector. + for _, s := range replacement { + for _, f := range filters { + if s.Name == f.Name && s.Value == f.Value && s.Type == f.Type { + filters = dropMatcher(f.Name, filters) + } + } + } + e.LabelMatchers = replacement + *node = &FilteredSelector{ + Filters: filters, + VectorSelector: e, + } + return + } + }) +} + +func dropMatcher(matcherName string, originalMatchers []*labels.Matcher) []*labels.Matcher { + i := 0 + for i < len(originalMatchers) { + l := originalMatchers[i] + if l.Name == matcherName { + originalMatchers = append(originalMatchers[:i], originalMatchers[i+1:]...) + } else { + i++ + } + } + return originalMatchers +} + +func matcherToMap(matchers []*labels.Matcher) map[string]*labels.Matcher { + r := make(map[string]*labels.Matcher, len(matchers)) + for i := 0; i < len(matchers); i++ { + r[matchers[i].Name] = matchers[i] + } + return r +} + +// matcherHeap is a set of the most selective label matchers +// for each metrics discovered in a PromQL expression. +// The selectivity of a matcher is defined by how many series are +// matched by it. Since matchers in PromQL are open, selectors +// with the least amount of matchers are typically the most selective ones. +type matcherHeap map[string][]*labels.Matcher + +func (m matcherHeap) add(metricName string, lessSelective []*labels.Matcher) { + moreSelective, ok := m[metricName] + if !ok { + m[metricName] = lessSelective + return + } + + if len(lessSelective) < len(moreSelective) { + moreSelective = lessSelective + } + + m[metricName] = moreSelective +} + +func (m matcherHeap) findReplacement(metricName string, matcher []*labels.Matcher) ([]*labels.Matcher, bool) { + top, ok := m[metricName] + if !ok { + return nil, false + } + + matcherSet := matcherToMap(matcher) + topSet := matcherToMap(top) + for k, v := range topSet { + m, ok := matcherSet[k] + if !ok { + return nil, false + } + + equals := v.Name == m.Name && v.Type == m.Type && v.Value == m.Value + if !equals { + return nil, false + } + } + + // The top matcher and input matcher are equal. No replacement needed. + if len(topSet) == len(matcherSet) { + return nil, false + } + + return top, true +} diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go new file mode 100644 index 00000000000..8d8720d3013 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go @@ -0,0 +1,150 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package logicalplan + +import ( + "time" + + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" +) + +var ( + NoOptimizers = []Optimizer{} + AllOptimizers = append(DefaultOptimizers, PropagateMatchersOptimizer{}) +) + +var DefaultOptimizers = []Optimizer{ + SortMatchers{}, + MergeSelectsOptimizer{}, +} + +type Plan interface { + Optimize([]Optimizer) Plan + Expr() parser.Expr +} + +type Optimizer interface { + Optimize(parser.Expr) parser.Expr +} + +type plan struct { + expr parser.Expr +} + +func New(expr parser.Expr, mint, maxt time.Time) Plan { + expr = promql.PreprocessExpr(expr, mint, maxt) + setOffsetForAtModifier(mint.UnixMilli(), expr) + + return &plan{ + expr: expr, + } +} + +func (p *plan) Optimize(optimizers []Optimizer) Plan { + for _, o := range optimizers { + p.expr = o.Optimize(p.expr) + } + + return &plan{p.expr} +} + +func (p *plan) Expr() parser.Expr { + return p.expr +} + +func traverse(expr *parser.Expr, transform func(*parser.Expr)) { + switch node := (*expr).(type) { + case *parser.StepInvariantExpr: + transform(&node.Expr) + case *parser.VectorSelector: + transform(expr) + case *parser.MatrixSelector: + transform(&node.VectorSelector) + case *parser.AggregateExpr: + transform(expr) + traverse(&node.Expr, transform) + case *parser.Call: + for _, n := range node.Args { + traverse(&n, transform) + } + case *parser.BinaryExpr: + transform(expr) + traverse(&node.LHS, transform) + traverse(&node.RHS, transform) + case *parser.UnaryExpr: + traverse(&node.Expr, transform) + case *parser.ParenExpr: + traverse(&node.Expr, transform) + case *parser.SubqueryExpr: + traverse(&node.Expr, transform) + } +} + +func traverseBottomUp(parent *parser.Expr, current *parser.Expr, transform func(parent *parser.Expr, node *parser.Expr) bool) bool { + switch node := (*current).(type) { + case *parser.StepInvariantExpr: + return traverseBottomUp(current, &node.Expr, transform) + case *parser.VectorSelector: + return transform(parent, current) + case *parser.MatrixSelector: + return transform(parent, &node.VectorSelector) + case *parser.AggregateExpr: + if stop := traverseBottomUp(current, &node.Expr, transform); stop { + return stop + } + return transform(parent, current) + case *parser.Call: + for _, n := range node.Args { + if stop := traverseBottomUp(current, &n, transform); stop { + return stop + } + } + return transform(parent, current) + case *parser.BinaryExpr: + lstop := traverseBottomUp(current, &node.LHS, transform) + rstop := traverseBottomUp(current, &node.RHS, transform) + if lstop || rstop { + return true + } + return transform(parent, current) + case *parser.UnaryExpr: + return traverseBottomUp(current, &node.Expr, transform) + case *parser.ParenExpr: + return traverseBottomUp(current, &node.Expr, transform) + case *parser.SubqueryExpr: + return traverseBottomUp(current, &node.Expr, transform) + } + + return true +} + +// Copy from https://github.com/prometheus/prometheus/blob/v2.39.1/promql/engine.go#L2658. +func setOffsetForAtModifier(evalTime int64, expr parser.Expr) { + getOffset := func(ts *int64, originalOffset time.Duration, path []parser.Node) time.Duration { + if ts == nil { + return originalOffset + } + // TODO: support subquery. + + offsetForTs := time.Duration(evalTime-*ts) * time.Millisecond + offsetDiff := offsetForTs + return originalOffset + offsetDiff + } + + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + switch n := node.(type) { + case *parser.VectorSelector: + n.Offset = getOffset(n.Timestamp, n.OriginalOffset, path) + + case *parser.MatrixSelector: + vs := n.VectorSelector.(*parser.VectorSelector) + vs.Offset = getOffset(vs.Timestamp, vs.OriginalOffset, path) + + case *parser.SubqueryExpr: + n.Offset = getOffset(n.Timestamp, n.OriginalOffset, path) + } + return nil + }) +} diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/propagate_selectors.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/propagate_selectors.go new file mode 100644 index 00000000000..ab1af39a8f9 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/propagate_selectors.go @@ -0,0 +1,120 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package logicalplan + +import ( + "sort" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +// PropagateMatchersOptimizer implements matcher propagation between +// two vector selectors in a binary expression. +type PropagateMatchersOptimizer struct{} + +func (m PropagateMatchersOptimizer) Optimize(expr parser.Expr) parser.Expr { + traverse(&expr, func(expr *parser.Expr) { + binOp, ok := (*expr).(*parser.BinaryExpr) + if !ok { + return + } + + // The optimizer cannot be applied to comparison operations. + if binOp.Op.IsComparisonOperator() { + return + } + + // TODO(fpetkovski): Investigate support for vector matching on a subset of labels. + if binOp.VectorMatching != nil && len(binOp.VectorMatching.MatchingLabels) > 0 { + return + } + + // TODO(fpetkovski): Investigate support for one-to-many and many-to-one. + if binOp.VectorMatching != nil && binOp.VectorMatching.Card != parser.CardOneToOne { + return + } + + propagateMatchers(binOp) + }) + + return expr +} + +func propagateMatchers(binOp *parser.BinaryExpr) { + lhSelector, ok := binOp.LHS.(*parser.VectorSelector) + if !ok { + return + } + rhSelector, ok := binOp.RHS.(*parser.VectorSelector) + if !ok { + return + } + // This case is handled by MergeSelectsOptimizer. + if lhSelector.Name == rhSelector.Name { + return + } + + lhMatchers := toMatcherMap(lhSelector) + rhMatchers := toMatcherMap(rhSelector) + union, hasDuplicates := makeUnion(lhMatchers, rhMatchers) + if hasDuplicates { + return + } + + finalMatchers := toSlice(union) + lhSelector.LabelMatchers = finalMatchers + rhSelector.LabelMatchers = finalMatchers +} + +func toSlice(union map[string]*labels.Matcher) []*labels.Matcher { + finalMatchers := make([]*labels.Matcher, 0, len(union)) + for _, m := range union { + finalMatchers = append(finalMatchers, m) + } + + sort.Slice(finalMatchers, func(i, j int) bool { return finalMatchers[i].Name < finalMatchers[j].Name }) + return finalMatchers +} + +func makeUnion(lhMatchers map[string]*labels.Matcher, rhMatchers map[string]*labels.Matcher) (map[string]*labels.Matcher, bool) { + union := make(map[string]*labels.Matcher) + for _, m := range lhMatchers { + if m.Name == labels.MetricName { + continue + } + if duplicateExists(rhMatchers, m) { + return nil, true + } + union[m.Name] = m + } + + for _, m := range rhMatchers { + if m.Name == labels.MetricName { + continue + } + if duplicateExists(lhMatchers, m) { + return nil, true + } + union[m.Name] = m + } + return union, false +} + +func toMatcherMap(lhSelector *parser.VectorSelector) map[string]*labels.Matcher { + lhMatchers := make(map[string]*labels.Matcher) + for _, m := range lhSelector.LabelMatchers { + lhMatchers[m.Name] = m + } + return lhMatchers +} + +func duplicateExists(matchers map[string]*labels.Matcher, matcher *labels.Matcher) bool { + existing, ok := matchers[matcher.Name] + if !ok { + return false + } + + return existing.String() == matcher.String() +} diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/sort_matchers.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/sort_matchers.go new file mode 100644 index 00000000000..818ff7a804e --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/sort_matchers.go @@ -0,0 +1,29 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package logicalplan + +import ( + "sort" + + "github.com/prometheus/prometheus/promql/parser" +) + +// SortMatchers sorts all matchers in a selector so that +// all subsequent optimizers, both in the logical and physical plan, +// can rely on this property. +type SortMatchers struct{} + +func (m SortMatchers) Optimize(expr parser.Expr) parser.Expr { + traverse(&expr, func(node *parser.Expr) { + e, ok := (*node).(*parser.VectorSelector) + if !ok { + return + } + + sort.Slice(e.LabelMatchers, func(i, j int) bool { + return e.LabelMatchers[i].Name < e.LabelMatchers[j].Name + }) + }) + return expr +} diff --git a/vendor/github.com/thanos-community/promql-engine/query/options.go b/vendor/github.com/thanos-community/promql-engine/query/options.go new file mode 100644 index 00000000000..70c24099ddf --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/query/options.go @@ -0,0 +1,36 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package query + +import ( + "time" +) + +type Options struct { + Start time.Time + End time.Time + Step time.Duration + LookbackDelta time.Duration + + StepsBatch int64 +} + +func (o *Options) NumSteps() int { + // Instant evaluation is executed as a range evaluation with one step. + if o.Step.Milliseconds() == 0 { + return 1 + } + + totalSteps := (o.End.UnixMilli()-o.Start.UnixMilli())/o.Step.Milliseconds() + 1 + if o.StepsBatch < totalSteps { + return int(o.StepsBatch) + } + return int(totalSteps) +} + +func (o *Options) WithEndTime(end time.Time) *Options { + result := *o + result.End = end + return &result +} diff --git a/vendor/github.com/thanos-community/promql-engine/worker/worker.go b/vendor/github.com/thanos-community/promql-engine/worker/worker.go new file mode 100644 index 00000000000..06b9fd8b45c --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/worker/worker.go @@ -0,0 +1,97 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package worker + +import ( + "context" + "sync" + + "github.com/thanos-community/promql-engine/execution/model" +) + +type doneFunc func() + +type Group []*Worker + +func NewGroup(numWorkers int, task Task) Group { + group := make([]*Worker, numWorkers) + for i := 0; i < numWorkers; i++ { + group[i] = New(i, task) + } + return group +} + +func (g Group) Start(ctx context.Context) { + var wg sync.WaitGroup + for _, w := range g { + wg.Add(1) + go w.start(wg.Done, ctx) + } + wg.Wait() +} + +type input struct { + arg float64 + in model.StepVector +} + +type Worker struct { + ctx context.Context + + workerID int + input chan *input + output chan model.StepVector + doWork Task +} + +type Task func(workerID int, arg float64, in model.StepVector) model.StepVector + +func New(workerID int, task Task) *Worker { + input := make(chan *input, 1) + output := make(chan model.StepVector, 1) + + return &Worker{ + workerID: workerID, + input: input, + output: output, + doWork: task, + } +} + +func (w *Worker) start(done doneFunc, ctx context.Context) { + w.ctx = ctx + done() + for { + select { + case <-w.ctx.Done(): + close(w.output) + return + case task, ok := <-w.input: + if !ok { + return + } + w.output <- w.doWork(w.workerID, task.arg, task.in) + } + } +} + +func (w *Worker) Send(arg float64, in model.StepVector) error { + select { + case <-w.ctx.Done(): + close(w.input) + return w.ctx.Err() + default: + w.input <- &input{arg: arg, in: in} + return nil + } +} + +func (w *Worker) GetOutput() (model.StepVector, error) { + select { + case <-w.ctx.Done(): + return model.StepVector{}, w.ctx.Err() + default: + return <-w.output, nil + } +} diff --git a/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/gonum.org/v1/gonum/AUTHORS new file mode 100644 index 00000000000..fbff74ec59d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/AUTHORS @@ -0,0 +1,121 @@ +# This is the official list of Gonum authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Alexander Egurnov +Andrei Blinnikov +Bill Gray +Bill Noon +Brendan Tracey +Brent Pedersen +Chad Kunde +Chih-Wei Chang +Chris Tessum +Christophe Meessen +Christopher Waldon +Clayton Northey +Dan Kortschak +Daniel Fireman +Dario Heinisch +David Kleiven +David Samborski +Davor Kapsa +DeepMind Technologies +Delaney Gillilan +Dezmond Goff +Dong-hee Na +Dustin Spicuzza +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Evgeny Savinov +Fabian Wickborn +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Google Inc +Gustaf Johansson +Hossein Zolfi +Iakov Davydov +Igor Mikushkin +Iskander Sharipov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeremy Atkinson +Jinesi Yelizati +Jonas Kahler +Jonas Schulze +Jonathan J Lawlor +Jonathan Reiter +Jonathan Schroeder +Joost van Amersfoort +Joseph Watson +Josh Wilson +Julien Roland +Kai Trukenmüller +Kent English +Kevin C. Zimmerman +Kirill Motkov +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Marco Leogrande +Mark Canning +Mark Skilbeck +Martin Diz +Matthew Connelly +Matthieu Di Mercurio +Max Halford +Maxim Sergeev +Microsoft Corporation +MinJae Kwon +Nathan Edwards +Nick Potts +Nils Wogatzky +Olivier Wulveryck +Or Rikon +Patricio Whittingslow +Patrick DeVivo +Pontus Melke +Renee French +Rishi Desai +Robin Eklind +Roger Welin +Rondall Jones +Sam Zaydel +Samuel Kelemen +Saran Ahluwalia +Scott Holden +Scott Kiesel +Sebastien Binet +Shawn Smith +Sintela Ltd +source{d} +Spencer Lyon +Steve McCoy +Taesu Pyo +Takeshi Yoneda +Tamir Hyman +The University of Adelaide +The University of Minnesota +The University of Washington +Thomas Berg +Tobin Harding +Valentin Deleplace +Vincent Thiery +Vladimír Chalupecký +Will Tekulve +Yasuhiro Matsumoto +Yevgeniy Vahlis +Yucheng Zhu +Zoe Juozapaitis diff --git a/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/gonum.org/v1/gonum/CONTRIBUTORS new file mode 100644 index 00000000000..baacd28c881 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/CONTRIBUTORS @@ -0,0 +1,124 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Gonum +# project. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees would be listed here +# but not in AUTHORS, because Google would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Alexander Egurnov +Andrei Blinnikov +Andrew Brampton +Bill Gray +Bill Noon +Brendan Tracey +Brent Pedersen +Chad Kunde +Chih-Wei Chang +Chris Tessum +Christophe Meessen +Christopher Waldon +Clayton Northey +Dan Kortschak +Dan Lorenc +Daniel Fireman +Dario Heinisch +David Kleiven +David Samborski +Davor Kapsa +Delaney Gillilan +Dezmond Goff +Dong-hee Na +Dustin Spicuzza +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Evgeny Savinov +Fabian Wickborn +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Gustaf Johansson +Hossein Zolfi +Iakov Davydov +Igor Mikushkin +Iskander Sharipov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeremy Atkinson +Jinesi Yelizati +Jon Richards +Jonas Kahler +Jonas Schulze +Jonathan J Lawlor +Jonathan Reiter +Jonathan Schroeder +Joost van Amersfoort +Joseph Watson +Josh Wilson +Julien Roland +Kai Trukenmüller +Kent English +Kevin C. Zimmerman +Kirill Motkov +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Marco Leogrande +Mark Canning +Mark Skilbeck +Martin Diz +Matthew Connelly +Matthieu Di Mercurio +Max Halford +Maxim Sergeev +MinJae Kwon +Nathan Edwards +Nick Potts +Nils Wogatzky +Olivier Wulveryck +Or Rikon +Patricio Whittingslow +Patrick DeVivo +Pontus Melke +Renee French +Rishi Desai +Robin Eklind +Roger Welin +Roman Werpachowski +Rondall Jones +Sam Zaydel +Samuel Kelemen +Saran Ahluwalia +Scott Holden +Scott Kiesel +Sebastien Binet +Shawn Smith +Spencer Lyon +Steve McCoy +Taesu Pyo +Takeshi Yoneda +Tamir Hyman +Thomas Berg +Tobin Harding +Valentin Deleplace +Vincent Thiery +Vladimír Chalupecký +Will Tekulve +Yasuhiro Matsumoto +Yevgeniy Vahlis +Yucheng Zhu +Zoe Juozapaitis diff --git a/vendor/gonum.org/v1/gonum/LICENSE b/vendor/gonum.org/v1/gonum/LICENSE new file mode 100644 index 00000000000..ed477e59b51 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/LICENSE @@ -0,0 +1,23 @@ +Copyright ©2013 The Gonum Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/floats/README.md b/vendor/gonum.org/v1/gonum/floats/README.md new file mode 100644 index 00000000000..ee867bb7bf6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/README.md @@ -0,0 +1,4 @@ +# Gonum floats [![GoDoc](https://godoc.org/gonum.org/v1/gonum/floats?status.svg)](https://godoc.org/gonum.org/v1/gonum/floats) + +Package floats provides a set of helper routines for dealing with slices of float64. +The functions avoid allocations to allow for use within tight loops without garbage collection overhead. diff --git a/vendor/gonum.org/v1/gonum/floats/doc.go b/vendor/gonum.org/v1/gonum/floats/doc.go new file mode 100644 index 00000000000..bfe05c1918d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/doc.go @@ -0,0 +1,11 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package floats provides a set of helper routines for dealing with slices +// of float64. The functions avoid allocations to allow for use within tight +// loops without garbage collection overhead. +// +// The convention used is that when a slice is being modified in place, it has +// the name dst. +package floats // import "gonum.org/v1/gonum/floats" diff --git a/vendor/gonum.org/v1/gonum/floats/floats.go b/vendor/gonum.org/v1/gonum/floats/floats.go new file mode 100644 index 00000000000..5db73a0573a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/floats.go @@ -0,0 +1,807 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package floats + +import ( + "errors" + "math" + "sort" + + "gonum.org/v1/gonum/floats/scalar" + "gonum.org/v1/gonum/internal/asm/f64" +) + +const ( + zeroLength = "floats: zero length slice" + shortSpan = "floats: slice length less than 2" + badLength = "floats: slice lengths do not match" + badDstLength = "floats: destination slice length does not match input" +) + +// Add adds, element-wise, the elements of s and dst, and stores the result in dst. +// It panics if the argument lengths do not match. +func Add(dst, s []float64) { + if len(dst) != len(s) { + panic(badDstLength) + } + f64.AxpyUnitaryTo(dst, 1, s, dst) +} + +// AddTo adds, element-wise, the elements of s and t and +// stores the result in dst. +// It panics if the argument lengths do not match. +func AddTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic(badLength) + } + if len(dst) != len(s) { + panic(badDstLength) + } + f64.AxpyUnitaryTo(dst, 1, s, t) + return dst +} + +// AddConst adds the scalar c to all of the values in dst. +func AddConst(c float64, dst []float64) { + f64.AddConst(c, dst) +} + +// AddScaled performs dst = dst + alpha * s. +// It panics if the slice argument lengths do not match. +func AddScaled(dst []float64, alpha float64, s []float64) { + if len(dst) != len(s) { + panic(badLength) + } + f64.AxpyUnitaryTo(dst, alpha, s, dst) +} + +// AddScaledTo performs dst = y + alpha * s, where alpha is a scalar, +// and dst, y and s are all slices. +// It panics if the slice argument lengths do not match. +// +// At the return of the function, dst[i] = y[i] + alpha * s[i] +func AddScaledTo(dst, y []float64, alpha float64, s []float64) []float64 { + if len(s) != len(y) { + panic(badLength) + } + if len(dst) != len(y) { + panic(badDstLength) + } + f64.AxpyUnitaryTo(dst, alpha, s, y) + return dst +} + +// argsort is a helper that implements sort.Interface, as used by +// Argsort and ArgsortStable. +type argsort struct { + s []float64 + inds []int +} + +func (a argsort) Len() int { + return len(a.s) +} + +func (a argsort) Less(i, j int) bool { + return a.s[i] < a.s[j] +} + +func (a argsort) Swap(i, j int) { + a.s[i], a.s[j] = a.s[j], a.s[i] + a.inds[i], a.inds[j] = a.inds[j], a.inds[i] +} + +// Argsort sorts the elements of dst while tracking their original order. +// At the conclusion of Argsort, dst will contain the original elements of dst +// but sorted in increasing order, and inds will contain the original position +// of the elements in the slice such that dst[i] = origDst[inds[i]]. +// It panics if the argument lengths do not match. +func Argsort(dst []float64, inds []int) { + if len(dst) != len(inds) { + panic(badDstLength) + } + for i := range dst { + inds[i] = i + } + + a := argsort{s: dst, inds: inds} + sort.Sort(a) +} + +// ArgsortStable sorts the elements of dst while tracking their original order and +// keeping the original order of equal elements. At the conclusion of ArgsortStable, +// dst will contain the original elements of dst but sorted in increasing order, +// and inds will contain the original position of the elements in the slice such +// that dst[i] = origDst[inds[i]]. +// It panics if the argument lengths do not match. +func ArgsortStable(dst []float64, inds []int) { + if len(dst) != len(inds) { + panic(badDstLength) + } + for i := range dst { + inds[i] = i + } + + a := argsort{s: dst, inds: inds} + sort.Stable(a) +} + +// Count applies the function f to every element of s and returns the number +// of times the function returned true. +func Count(f func(float64) bool, s []float64) int { + var n int + for _, val := range s { + if f(val) { + n++ + } + } + return n +} + +// CumProd finds the cumulative product of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. +// It panics if the argument lengths do not match. +// +// At the return of the function, dst[i] = s[i] * s[i-1] * s[i-2] * ... +func CumProd(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic(badDstLength) + } + if len(dst) == 0 { + return dst + } + return f64.CumProd(dst, s) +} + +// CumSum finds the cumulative sum of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. +// It panics if the argument lengths do not match. +// +// At the return of the function, dst[i] = s[i] + s[i-1] + s[i-2] + ... +func CumSum(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic(badDstLength) + } + if len(dst) == 0 { + return dst + } + return f64.CumSum(dst, s) +} + +// Distance computes the L-norm of s - t. See Norm for special cases. +// It panics if the slice argument lengths do not match. +func Distance(s, t []float64, L float64) float64 { + if len(s) != len(t) { + panic(badLength) + } + if len(s) == 0 { + return 0 + } + if L == 2 { + return f64.L2DistanceUnitary(s, t) + } + var norm float64 + if L == 1 { + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm + } + if math.IsInf(L, 1) { + for i, v := range s { + absDiff := math.Abs(t[i] - v) + if absDiff > norm { + norm = absDiff + } + } + return norm + } + for i, v := range s { + norm += math.Pow(math.Abs(t[i]-v), L) + } + return math.Pow(norm, 1/L) +} + +// Div performs element-wise division dst / s +// and stores the value in dst. +// It panics if the argument lengths do not match. +func Div(dst, s []float64) { + if len(dst) != len(s) { + panic(badLength) + } + f64.Div(dst, s) +} + +// DivTo performs element-wise division s / t +// and stores the value in dst. +// It panics if the argument lengths do not match. +func DivTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic(badLength) + } + if len(dst) != len(s) { + panic(badDstLength) + } + return f64.DivTo(dst, s, t) +} + +// Dot computes the dot product of s1 and s2, i.e. +// sum_{i = 1}^N s1[i]*s2[i]. +// It panics if the argument lengths do not match. +func Dot(s1, s2 []float64) float64 { + if len(s1) != len(s2) { + panic(badLength) + } + return f64.DotUnitary(s1, s2) +} + +// Equal returns true when the slices have equal lengths and +// all elements are numerically identical. +func Equal(s1, s2 []float64) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if s2[i] != val { + return false + } + } + return true +} + +// EqualApprox returns true when the slices have equal lengths and +// all element pairs have an absolute tolerance less than tol or a +// relative tolerance less than tol. +func EqualApprox(s1, s2 []float64, tol float64) bool { + if len(s1) != len(s2) { + return false + } + for i, a := range s1 { + if !scalar.EqualWithinAbsOrRel(a, s2[i], tol, tol) { + return false + } + } + return true +} + +// EqualFunc returns true when the slices have the same lengths +// and the function returns true for all element pairs. +func EqualFunc(s1, s2 []float64, f func(float64, float64) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if !f(val, s2[i]) { + return false + } + } + return true +} + +// EqualLengths returns true when all of the slices have equal length, +// and false otherwise. It also returns true when there are no input slices. +func EqualLengths(slices ...[]float64) bool { + // This length check is needed: http://play.golang.org/p/sdty6YiLhM + if len(slices) == 0 { + return true + } + l := len(slices[0]) + for i := 1; i < len(slices); i++ { + if len(slices[i]) != l { + return false + } + } + return true +} + +// Find applies f to every element of s and returns the indices of the first +// k elements for which the f returns true, or all such elements +// if k < 0. +// Find will reslice inds to have 0 length, and will append +// found indices to inds. +// If k > 0 and there are fewer than k elements in s satisfying f, +// all of the found elements will be returned along with an error. +// At the return of the function, the input inds will be in an undetermined state. +func Find(inds []int, f func(float64) bool, s []float64, k int) ([]int, error) { + // inds is also returned to allow for calling with nil. + + // Reslice inds to have zero length. + inds = inds[:0] + + // If zero elements requested, can just return. + if k == 0 { + return inds, nil + } + + // If k < 0, return all of the found indices. + if k < 0 { + for i, val := range s { + if f(val) { + inds = append(inds, i) + } + } + return inds, nil + } + + // Otherwise, find the first k elements. + nFound := 0 + for i, val := range s { + if f(val) { + inds = append(inds, i) + nFound++ + if nFound == k { + return inds, nil + } + } + } + // Finished iterating over the loop, which means k elements were not found. + return inds, errors.New("floats: insufficient elements found") +} + +// HasNaN returns true when the slice s has any values that are NaN and false +// otherwise. +func HasNaN(s []float64) bool { + for _, v := range s { + if math.IsNaN(v) { + return true + } + } + return false +} + +// LogSpan returns a set of n equally spaced points in log space between, +// l and u where N is equal to len(dst). The first element of the +// resulting dst will be l and the final element of dst will be u. +// It panics if the length of dst is less than 2. +// Note that this call will return NaNs if either l or u are negative, and +// will return all zeros if l or u is zero. +// Also returns the mutated slice dst, so that it can be used in range, like: +// +// for i, x := range LogSpan(dst, l, u) { ... } +func LogSpan(dst []float64, l, u float64) []float64 { + Span(dst, math.Log(l), math.Log(u)) + for i := range dst { + dst[i] = math.Exp(dst[i]) + } + return dst +} + +// LogSumExp returns the log of the sum of the exponentials of the values in s. +// Panics if s is an empty slice. +func LogSumExp(s []float64) float64 { + // Want to do this in a numerically stable way which avoids + // overflow and underflow + // First, find the maximum value in the slice. + maxval := Max(s) + if math.IsInf(maxval, 0) { + // If it's infinity either way, the logsumexp will be infinity as well + // returning now avoids NaNs + return maxval + } + var lse float64 + // Compute the sumexp part + for _, val := range s { + lse += math.Exp(val - maxval) + } + // Take the log and add back on the constant taken out + return math.Log(lse) + maxval +} + +// Max returns the maximum value in the input slice. If the slice is empty, Max will panic. +func Max(s []float64) float64 { + return s[MaxIdx(s)] +} + +// MaxIdx returns the index of the maximum value in the input slice. If several +// entries have the maximum value, the first such index is returned. +// It panics if s is zero length. +func MaxIdx(s []float64) int { + if len(s) == 0 { + panic(zeroLength) + } + max := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v > max || math.IsNaN(max) { + max = v + ind = i + } + } + return ind +} + +// Min returns the minimum value in the input slice. +// It panics if s is zero length. +func Min(s []float64) float64 { + return s[MinIdx(s)] +} + +// MinIdx returns the index of the minimum value in the input slice. If several +// entries have the minimum value, the first such index is returned. +// It panics if s is zero length. +func MinIdx(s []float64) int { + if len(s) == 0 { + panic(zeroLength) + } + min := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v < min || math.IsNaN(min) { + min = v + ind = i + } + } + return ind +} + +// Mul performs element-wise multiplication between dst +// and s and stores the value in dst. +// It panics if the argument lengths do not match. +func Mul(dst, s []float64) { + if len(dst) != len(s) { + panic(badLength) + } + for i, val := range s { + dst[i] *= val + } +} + +// MulTo performs element-wise multiplication between s +// and t and stores the value in dst. +// It panics if the argument lengths do not match. +func MulTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic(badLength) + } + if len(dst) != len(s) { + panic(badDstLength) + } + for i, val := range t { + dst[i] = val * s[i] + } + return dst +} + +// NearestIdx returns the index of the element in s +// whose value is nearest to v. If several such +// elements exist, the lowest index is returned. +// It panics if s is zero length. +func NearestIdx(s []float64, v float64) int { + if len(s) == 0 { + panic(zeroLength) + } + switch { + case math.IsNaN(v): + return 0 + case math.IsInf(v, 1): + return MaxIdx(s) + case math.IsInf(v, -1): + return MinIdx(s) + } + var ind int + dist := math.NaN() + for i, val := range s { + newDist := math.Abs(v - val) + // A NaN distance will not be closer. + if math.IsNaN(newDist) { + continue + } + if newDist < dist || math.IsNaN(dist) { + dist = newDist + ind = i + } + } + return ind +} + +// NearestIdxForSpan return the index of a hypothetical vector created +// by Span with length n and bounds l and u whose value is closest +// to v. That is, NearestIdxForSpan(n, l, u, v) is equivalent to +// Nearest(Span(make([]float64, n),l,u),v) without an allocation. +// It panics if n is less than two. +func NearestIdxForSpan(n int, l, u float64, v float64) int { + if n < 2 { + panic(shortSpan) + } + if math.IsNaN(v) { + return 0 + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l) && !math.IsNaN(u): + return n - 1 + case math.IsNaN(u): + return 0 + case math.IsInf(l, 0) && math.IsInf(u, 0): + if l == u { + return 0 + } + if n%2 == 1 { + if !math.IsInf(v, 0) { + return n / 2 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n/2 + 1 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n / 2 + case math.IsInf(l, 0): + if v == l { + return 0 + } + return n - 1 + case math.IsInf(u, 0): + if v == u { + return n - 1 + } + return 0 + case math.IsInf(v, -1): + if l <= u { + return 0 + } + return n - 1 + case math.IsInf(v, 1): + if u <= l { + return 0 + } + return n - 1 + } + + // Special cases for v outside (l, u) and (u, l). + switch { + case l < u: + if v <= l { + return 0 + } + if v >= u { + return n - 1 + } + case l > u: + if v >= l { + return 0 + } + if v <= u { + return n - 1 + } + default: + return 0 + } + + // Can't guarantee anything about exactly halfway between + // because of floating point weirdness. + return int((float64(n)-1)/(u-l)*(v-l) + 0.5) +} + +// Norm returns the L norm of the slice S, defined as +// (sum_{i=1}^N s[i]^L)^{1/L} +// Special cases: +// L = math.Inf(1) gives the maximum absolute value. +// Does not correctly compute the zero norm (use Count). +func Norm(s []float64, L float64) float64 { + // Should this complain if L is not positive? + // Should this be done in log space for better numerical stability? + // would be more cost + // maybe only if L is high? + if len(s) == 0 { + return 0 + } + if L == 2 { + return f64.L2NormUnitary(s) + } + var norm float64 + if L == 1 { + for _, val := range s { + norm += math.Abs(val) + } + return norm + } + if math.IsInf(L, 1) { + for _, val := range s { + norm = math.Max(norm, math.Abs(val)) + } + return norm + } + for _, val := range s { + norm += math.Pow(math.Abs(val), L) + } + return math.Pow(norm, 1/L) +} + +// Prod returns the product of the elements of the slice. +// Returns 1 if len(s) = 0. +func Prod(s []float64) float64 { + prod := 1.0 + for _, val := range s { + prod *= val + } + return prod +} + +// Reverse reverses the order of elements in the slice. +func Reverse(s []float64) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// Same returns true when the input slices have the same length and all +// elements have the same value with NaN treated as the same. +func Same(s, t []float64) bool { + if len(s) != len(t) { + return false + } + for i, v := range s { + w := t[i] + if v != w && !(math.IsNaN(v) && math.IsNaN(w)) { + return false + } + } + return true +} + +// Scale multiplies every element in dst by the scalar c. +func Scale(c float64, dst []float64) { + if len(dst) > 0 { + f64.ScalUnitary(c, dst) + } +} + +// ScaleTo multiplies the elements in s by c and stores the result in dst. +// It panics if the slice argument lengths do not match. +func ScaleTo(dst []float64, c float64, s []float64) []float64 { + if len(dst) != len(s) { + panic(badDstLength) + } + if len(dst) > 0 { + f64.ScalUnitaryTo(dst, c, s) + } + return dst +} + +// Span returns a set of N equally spaced points between l and u, where N +// is equal to the length of the destination. The first element of the destination +// is l, the final element of the destination is u. +// It panics if the length of dst is less than 2. +// +// Span also returns the mutated slice dst, so that it can be used in range expressions, +// like: +// +// for i, x := range Span(dst, l, u) { ... } +func Span(dst []float64, l, u float64) []float64 { + n := len(dst) + if n < 2 { + panic(shortSpan) + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l): + for i := range dst[:len(dst)-1] { + dst[i] = math.NaN() + } + dst[len(dst)-1] = u + return dst + case math.IsNaN(u): + for i := range dst[1:] { + dst[i+1] = math.NaN() + } + dst[0] = l + return dst + case math.IsInf(l, 0) && math.IsInf(u, 0): + for i := range dst[:len(dst)/2] { + dst[i] = l + dst[len(dst)-i-1] = u + } + if len(dst)%2 == 1 { + if l != u { + dst[len(dst)/2] = 0 + } else { + dst[len(dst)/2] = l + } + } + return dst + case math.IsInf(l, 0): + for i := range dst[:len(dst)-1] { + dst[i] = l + } + dst[len(dst)-1] = u + return dst + case math.IsInf(u, 0): + for i := range dst[1:] { + dst[i+1] = u + } + dst[0] = l + return dst + } + + step := (u - l) / float64(n-1) + for i := range dst { + dst[i] = l + step*float64(i) + } + return dst +} + +// Sub subtracts, element-wise, the elements of s from dst. +// It panics if the argument lengths do not match. +func Sub(dst, s []float64) { + if len(dst) != len(s) { + panic(badLength) + } + f64.AxpyUnitaryTo(dst, -1, s, dst) +} + +// SubTo subtracts, element-wise, the elements of t from s and +// stores the result in dst. +// It panics if the argument lengths do not match. +func SubTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic(badLength) + } + if len(dst) != len(s) { + panic(badDstLength) + } + f64.AxpyUnitaryTo(dst, -1, t, s) + return dst +} + +// Sum returns the sum of the elements of the slice. +func Sum(s []float64) float64 { + return f64.Sum(s) +} + +// Within returns the first index i where s[i] <= v < s[i+1]. Within panics if: +// - len(s) < 2 +// - s is not sorted +func Within(s []float64, v float64) int { + if len(s) < 2 { + panic(shortSpan) + } + if !sort.Float64sAreSorted(s) { + panic("floats: input slice not sorted") + } + if v < s[0] || v >= s[len(s)-1] || math.IsNaN(v) { + return -1 + } + for i, f := range s[1:] { + if v < f { + return i + } + } + return -1 +} + +// SumCompensated returns the sum of the elements of the slice calculated with greater +// accuracy than Sum at the expense of additional computation. +func SumCompensated(s []float64) float64 { + // SumCompensated uses an improved version of Kahan's compensated + // summation algorithm proposed by Neumaier. + // See https://en.wikipedia.org/wiki/Kahan_summation_algorithm for details. + var sum, c float64 + for _, x := range s { + // This type conversion is here to prevent a sufficiently smart compiler + // from optimising away these operations. + t := float64(sum + x) + if math.Abs(sum) >= math.Abs(x) { + c += (sum - t) + x + } else { + c += (x - t) + sum + } + sum = t + } + return sum + c +} diff --git a/vendor/gonum.org/v1/gonum/floats/scalar/doc.go b/vendor/gonum.org/v1/gonum/floats/scalar/doc.go new file mode 100644 index 00000000000..9e69c193e29 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/scalar/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scalar provides a set of helper routines for dealing with float64 values. +package scalar // import "gonum.org/v1/gonum/floats/scalar" diff --git a/vendor/gonum.org/v1/gonum/floats/scalar/scalar.go b/vendor/gonum.org/v1/gonum/floats/scalar/scalar.go new file mode 100644 index 00000000000..46bf06b3537 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/scalar/scalar.go @@ -0,0 +1,171 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scalar + +import ( + "math" + "strconv" +) + +// EqualWithinAbs returns true when a and b have an absolute difference +// not greater than tol. +func EqualWithinAbs(a, b, tol float64) bool { + return a == b || math.Abs(a-b) <= tol +} + +// minNormalFloat64 is the smallest normal number. For 64 bit IEEE-754 +// floats this is 2^{-1022}. +const minNormalFloat64 = 0x1p-1022 + +// EqualWithinRel returns true when the difference between a and b +// is not greater than tol times the greater absolute value of a and b, +// +// abs(a-b) <= tol * max(abs(a), abs(b)). +func EqualWithinRel(a, b, tol float64) bool { + if a == b { + return true + } + delta := math.Abs(a - b) + if delta <= minNormalFloat64 { + return delta <= tol*minNormalFloat64 + } + // We depend on the division in this relationship to identify + // infinities (we rely on the NaN to fail the test) otherwise + // we compare Infs of the same sign and evaluate Infs as equal + // independent of sign. + return delta/math.Max(math.Abs(a), math.Abs(b)) <= tol +} + +// EqualWithinAbsOrRel returns true when a and b are equal to within +// the absolute or relative tolerances. See EqualWithinAbs and +// EqualWithinRel for details. +func EqualWithinAbsOrRel(a, b, absTol, relTol float64) bool { + return EqualWithinAbs(a, b, absTol) || EqualWithinRel(a, b, relTol) +} + +// EqualWithinULP returns true when a and b are equal to within +// the specified number of floating point units in the last place. +func EqualWithinULP(a, b float64, ulp uint) bool { + if a == b { + return true + } + if math.IsNaN(a) || math.IsNaN(b) { + return false + } + if math.Signbit(a) != math.Signbit(b) { + return math.Float64bits(math.Abs(a))+math.Float64bits(math.Abs(b)) <= uint64(ulp) + } + return ulpDiff(math.Float64bits(a), math.Float64bits(b)) <= uint64(ulp) +} + +func ulpDiff(a, b uint64) uint64 { + if a > b { + return a - b + } + return b - a +} + +const ( + nanBits = 0x7ff8000000000000 + nanMask = 0xfff8000000000000 +) + +// NaNWith returns an IEEE 754 "quiet not-a-number" value with the +// payload specified in the low 51 bits of payload. +// The NaN returned by math.NaN has a bit pattern equal to NaNWith(1). +func NaNWith(payload uint64) float64 { + return math.Float64frombits(nanBits | (payload &^ nanMask)) +} + +// NaNPayload returns the lowest 51 bits payload of an IEEE 754 "quiet +// not-a-number". For values of f other than quiet-NaN, NaNPayload +// returns zero and false. +func NaNPayload(f float64) (payload uint64, ok bool) { + b := math.Float64bits(f) + if b&nanBits != nanBits { + return 0, false + } + return b &^ nanMask, true +} + +// ParseWithNA converts the string s to a float64 in value. +// If s equals missing, weight is returned as 0, otherwise 1. +func ParseWithNA(s, missing string) (value, weight float64, err error) { + if s == missing { + return 0, 0, nil + } + value, err = strconv.ParseFloat(s, 64) + if err == nil { + weight = 1 + } + return value, weight, err +} + +// Round returns the half away from zero rounded value of x with prec precision. +// +// Special cases are: +// +// Round(±0) = +0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + x = math.Round(intermed) + + if x == 0 { + return 0 + } + + return x / pow +} + +// RoundEven returns the half even rounded value of x with prec precision. +// +// Special cases are: +// +// RoundEven(±0) = +0 +// RoundEven(±Inf) = ±Inf +// RoundEven(NaN) = NaN +func RoundEven(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + x = math.RoundToEven(intermed) + + if x == 0 { + return 0 + } + + return x / pow +} + +// Same returns true when the inputs have the same value, allowing NaN equality. +func Same(a, b float64) bool { + return a == b || (math.IsNaN(a) && math.IsNaN(b)) +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s new file mode 100644 index 00000000000..df63dc0905f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s @@ -0,0 +1,82 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func L1Norm(x []float64) float64 +TEXT ·L1Norm(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ x_len+8(FP), CX // CX = len(x) + XORQ AX, AX // i = 0 + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVUPS (SI)(AX*8), X8 // X_i = x[i:i+1] + MOVUPS 16(SI)(AX*8), X9 + MOVUPS 32(SI)(AX*8), X10 + MOVUPS 48(SI)(AX*8), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + ADDQ $8, AX // i += 8 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI)(AX*8), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + INCQ AX // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+24(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s new file mode 100644 index 00000000000..647517333cf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s @@ -0,0 +1,90 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func L1NormInc(x []float64, n, incX int) (sum float64) +TEXT ·L1NormInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ n+24(FP), CX // CX = n + MOVQ incX+32(FP), AX // AX = increment * sizeof( float64 ) + SHLQ $3, AX + MOVQ AX, DX // DX = AX * 3 + IMULQ $3, DX + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = n % 8 + SHRQ $3, CX // CX = floor( n / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum = max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_i[0] = x[i] + MOVSD (SI)(AX*1), X9 + MOVSD (SI)(AX*2), X10 + MOVSD (SI)(DX*1), X11 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + MOVHPD (SI), X8 // X_i[1] = x[i+4] + MOVHPD (SI)(AX*1), X9 + MOVHPD (SI)(AX*2), X10 + MOVHPD (SI)(DX*1), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + ADDQ AX, SI // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+40(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s new file mode 100644 index 00000000000..e377f512566 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s @@ -0,0 +1,66 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func Add(dst, s []float64) +TEXT ·Add(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE add_end + XORQ AX, AX + MOVQ DI, BX + ANDQ $0x0F, BX // BX = &dst & 15 + JZ add_no_trim // if BX == 0 { goto add_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // i++ + DECQ CX // --CX + JE add_end // if CX == 0 { return } + +add_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ add_tail_start // if CX == 0 { goto add_tail_start } + +add_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD (DI)(AX*8), X0 // X_i += dst[i:i+1] + ADDPD 16(DI)(AX*8), X1 + ADDPD 32(DI)(AX*8), X2 + ADDPD 48(DI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X_i + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP add_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE add_end + +add_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +add_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP add_tail // } while --CX > 0 + +add_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s new file mode 100644 index 00000000000..6f52a8f64fb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s @@ -0,0 +1,53 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func Addconst(alpha float64, x []float64) +TEXT ·AddConst(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ x_len+16(FP), CX // CX = len(x) + CMPQ CX, $0 // if len(x) == 0 { return } + JE ac_end + MOVSD alpha+0(FP), X4 // X4 = { a, a } + SHUFPD $0, X4, X4 + MOVUPS X4, X5 // X5 = X4 + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ ac_tail_start // if CX == 0 { goto ac_tail_start } + +ac_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD X4, X0 // X_i += a + ADDPD X5, X1 + ADDPD X4, X2 + ADDPD X5, X3 + MOVUPS X0, (SI)(AX*8) // s[i:i+1] = X_i + MOVUPS X1, 16(SI)(AX*8) + MOVUPS X2, 32(SI)(AX*8) + MOVUPS X3, 48(SI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP ac_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE ac_end + +ac_tail_start: // Reset loop counters + MOVQ BX, CX // Loop counter: CX = BX + +ac_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD X4, X0 // X0 += a + MOVSD X0, (SI)(AX*8) // s[i] = X0 + INCQ AX // ++i + LOOP ac_tail // } while --CX > 0 + +ac_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go new file mode 100644 index 00000000000..2ab8129a54e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go @@ -0,0 +1,62 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || noasm || gccgo || safe +// +build !amd64 noasm gccgo safe + +package f64 + +// AxpyUnitary is +// +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s new file mode 100644 index 00000000000..a4e180fbfa5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s @@ -0,0 +1,142 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R9 +#define INCx3_DST R12 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR = &y + MOVQ n+56(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+80(FP), INC_X + MOVQ iy+88(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(y[iy]) + MOVQ Y_PTR, DST_PTR // DST_PTR = Y_PTR // Write pointer + + MOVQ incX+64(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+72(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVAPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + // y[i] += alpha * x[i] for the last n % 4 iterations. + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s new file mode 100644 index 00000000000..0f54a394004 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s @@ -0,0 +1,148 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DX +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R10 +#define INCx3_DST R13 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+48(FP), X_PTR // X_PTR := &x + MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y + MOVQ n+96(FP), LEN // LEN := n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+120(FP), INC_X + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + MOVQ iy+128(FP), INC_Y + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst]) + MOVQ idst+32(FP), INC_DST + LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy]) + + MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64) + SHLQ $3, INC_DST + MOVSD alpha+40(FP), ALPHA + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVSD ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 2x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4] + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2] + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s new file mode 100644 index 00000000000..f0b78596b66 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitary(alpha float64, x, y []float64) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR := &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+40(FP), LEN + CMOVQLE y_len+40(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + XORQ IDX, IDX + MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha } + SHUFPD $0, ALPHA, ALPHA + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail2_start } + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= a + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if TAIL == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= a + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s new file mode 100644 index 00000000000..dbb0a7eaba6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DX +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+32(FP), X_PTR // X_PTR := &x + MOVQ y_base+56(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+40(FP), LEN // LEN = min( len(x), len(y), len(dst) ) + CMPQ y_len+64(FP), LEN + CMOVQLE y_len+64(FP), LEN + CMPQ dst_len+8(FP), LEN + CMOVQLE dst_len+8(FP), LEN + + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVSD alpha+24(FP), ALPHA + SHUFPD $0, ALPHA, ALPHA // ALPHA := { alpha, alpha } + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= alpha + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= alpha + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s new file mode 100644 index 00000000000..58168482d81 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s @@ -0,0 +1,71 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +TEXT ·CumProd(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cp_end + XORQ AX, AX // i = 0 + + MOVSD (SI), X5 // p_prod = { s[0], s[0] } + SHUFPD $0, X5, X5 + MOVSD X5, (DI) // dst[0] = s[0] + INCQ AX // ++i + DECQ CX // -- CX + JZ cp_end // if CX == 0 { return } + + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cp_tail_start // if CX == 0 { goto cp_tail_start } + +cp_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + MULPD X0, X1 // X1 *= X0 + MULPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + MULPD X5, X0 // X0 *= p_prod + MULPD X1, X5 // p_prod *= X1 + MULPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + MULPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cp_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cp_end + +cp_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cp_tail: // do { + MULSD (SI)(AX*8), X5 // p_prod *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_prod + INCQ AX // ++i + LOOP cp_tail // } while --CX > 0 + +cp_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s new file mode 100644 index 00000000000..85613202a47 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s @@ -0,0 +1,64 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +TEXT ·CumSum(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cs_end + XORQ AX, AX // i = 0 + PXOR X5, X5 // p_sum = 0 + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cs_tail_start // if CX == 0 { goto cs_tail_start } + +cs_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + ADDPD X0, X1 // X1 += X0 + ADDPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + ADDPD X5, X0 // X0 += p_sum + ADDPD X1, X5 // p_sum += X1 + ADDPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + ADDPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cs_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cs_end + +cs_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cs_tail: // do { + ADDSD (SI)(AX*8), X5 // p_sum *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_sum + INCQ AX // ++i + LOOP cs_tail // } while --CX > 0 + +cs_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s new file mode 100644 index 00000000000..95839767486 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s @@ -0,0 +1,67 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func Div(dst, s []float64) +TEXT ·Div(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ SI, BX + ANDQ $15, BX // BX = &s & 15 + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (DI)(AX*8), X0 // X0 = dst[i:i+1] + MOVUPS 16(DI)(AX*8), X1 + MOVUPS 32(DI)(AX*8), X2 + MOVUPS 48(DI)(AX*8), X3 + DIVPD (SI)(AX*8), X0 // X0 /= s[i:i+1] + DIVPD 16(SI)(AX*8), X1 + DIVPD 32(SI)(AX*8), X2 + DIVPD 48(SI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s new file mode 100644 index 00000000000..e7094cb95b6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s @@ -0,0 +1,73 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func DivTo(dst, x, y []float64) +TEXT ·DivTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ x_base+24(FP), SI // SI = &x + MOVQ y_base+48(FP), DX // DX = &y + CMPQ x_len+32(FP), CX // CX = max( len(dst), len(x), len(y) ) + CMOVQLE x_len+32(FP), CX + CMPQ y_len+56(FP), CX + CMOVQLE y_len+56(FP), CX + MOVQ CX, ret_len+80(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ DX, BX + ANDQ $15, BX // BX = &y & OxF + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + DIVSD (DX)(AX*8), X0 // X0 /= t[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X0 = x[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + DIVPD (DX)(AX*8), X0 // X0 /= y[i:i+1] + DIVPD 16(DX)(AX*8), X1 + DIVPD 32(DX)(AX*8), X2 + DIVPD 48(DX)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = x[i] + DIVSD (DX)(AX*8), X0 // X0 /= y[i] + MOVSD X0, (DI)(AX*8) + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + MOVQ DI, ret_base+72(FP) // &ret = &dst + MOVQ dst_cap+16(FP), DI // cap(ret) = cap(dst) + MOVQ DI, ret_cap+88(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go new file mode 100644 index 00000000000..33c76c1e034 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f64 provides float64 vector primitives. +package f64 // import "gonum.org/v1/gonum/internal/asm/f64" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go new file mode 100644 index 00000000000..099316440e6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go @@ -0,0 +1,38 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || noasm || gccgo || safe +// +build !amd64 noasm gccgo safe + +package f64 + +// DotUnitary is +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotInc is +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s new file mode 100644 index 00000000000..c8cd4129621 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s @@ -0,0 +1,145 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func DdotUnitary(x, y []float64) (sum float64) +// This function assumes len(y) >= len(x). +TEXT ·DotUnitary(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ x_len+8(FP), DI // n = len(x) + MOVQ y+24(FP), R9 + + MOVSD $(0.0), X7 // sum = 0 + MOVSD $(0.0), X8 // sum = 0 + + MOVQ $0, SI // i = 0 + SUBQ $4, DI // n -= 4 + JL tail_uni // if n < 0 goto tail_uni + +loop_uni: + // sum += x[i] * y[i] unrolled 4x. + MOVUPD 0(R8)(SI*8), X0 + MOVUPD 0(R9)(SI*8), X1 + MOVUPD 16(R8)(SI*8), X2 + MOVUPD 16(R9)(SI*8), X3 + MULPD X1, X0 + MULPD X3, X2 + ADDPD X0, X7 + ADDPD X2, X8 + + ADDQ $4, SI // i += 4 + SUBQ $4, DI // n -= 4 + JGE loop_uni // if n >= 0 goto loop_uni + +tail_uni: + ADDQ $4, DI // n += 4 + JLE end_uni // if n <= 0 goto end_uni + +onemore_uni: + // sum += x[i] * y[i] for the remaining 1-3 elements. + MOVSD 0(R8)(SI*8), X0 + MOVSD 0(R9)(SI*8), X1 + MULSD X1, X0 + ADDSD X0, X7 + + ADDQ $1, SI // i++ + SUBQ $1, DI // n-- + JNZ onemore_uni // if n != 0 goto onemore_uni + +end_uni: + // Add the four sums together. + ADDPD X8, X7 + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+48(FP) // Return final sum. + RET + +// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) +TEXT ·DotInc(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ y+24(FP), R9 + MOVQ n+48(FP), CX + MOVQ incX+56(FP), R11 + MOVQ incY+64(FP), R12 + MOVQ ix+72(FP), R13 + MOVQ iy+80(FP), R14 + + MOVSD $(0.0), X7 // sum = 0 + LEAQ (R8)(R13*8), SI // p = &x[ix] + LEAQ (R9)(R14*8), DI // q = &y[ix] + SHLQ $3, R11 // incX *= sizeof(float64) + SHLQ $3, R12 // indY *= sizeof(float64) + + SUBQ $2, CX // n -= 2 + JL tail_inc // if n < 0 goto tail_inc + +loop_inc: + // sum += *p * *q unrolled 2x. + MOVHPD (SI), X0 + MOVHPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + MOVLPD (SI), X0 + MOVLPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + + MULPD X1, X0 + ADDPD X0, X7 + + SUBQ $2, CX // n -= 2 + JGE loop_inc // if n >= 0 goto loop_inc + +tail_inc: + ADDQ $2, CX // n += 2 + JLE end_inc // if n <= 0 goto end_inc + + // sum += *p * *q for the last iteration if n is odd. + MOVSD (SI), X0 + MULSD (DI), X0 + ADDSD X0, X7 + +end_inc: + // Add the two sums together. + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+88(FP) // Return final sum. + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go new file mode 100644 index 00000000000..5b042338453 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go @@ -0,0 +1,29 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !noasm && !gccgo && !safe +// +build !noasm,!gccgo,!safe + +package f64 + +// Ger performs the rank-one operation +// +// A += alpha * x * yᵀ +// +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) + +// GemvN computes +// +// y = alpha * A * x + beta * y +// +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) + +// GemvT computes +// +// y = alpha * Aᵀ * x + beta * y +// +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go new file mode 100644 index 00000000000..e8dee0511bf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go @@ -0,0 +1,125 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || noasm || gccgo || safe +// +build !amd64 noasm gccgo safe + +package f64 + +// Ger performs the rank-one operation +// +// A += alpha * x * yᵀ +// +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) { + if incX == 1 && incY == 1 { + x = x[:m] + y = y[:n] + for i, xv := range x { + AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) + } + return + } + + var ky, kx uintptr + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + + ix := kx + for i := 0; i < int(m); i++ { + AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], n, incY, 1, ky, 0) + ix += incX + } +} + +// GemvN computes +// +// y = alpha * A * x + beta * y +// +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(n-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(m-1) * int(incY)) + } + + if incX == 1 && incY == 1 { + if beta == 0 { + for i = 0; i < m; i++ { + y[i] = alpha * DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + for i = 0; i < m; i++ { + y[i] = y[i]*beta + alpha*DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + iy := ky + if beta == 0 { + for i = 0; i < m; i++ { + y[iy] = alpha * DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } + return + } + for i = 0; i < m; i++ { + y[iy] = y[iy]*beta + alpha*DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } +} + +// GemvT computes +// +// y = alpha * Aᵀ * x + beta * y +// +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + switch { + case beta == 0: // beta == 0 is special-cased to memclear + if incY == 1 { + for i := range y { + y[i] = 0 + } + } else { + iy := ky + for i := 0; i < int(n); i++ { + y[iy] = 0 + iy += incY + } + } + case int(incY) < 0: + ScalInc(beta, y, n, uintptr(int(-incY))) + case incY == 1: + ScalUnitary(beta, y[:n]) + default: + ScalInc(beta, y, n, incY) + } + + if incX == 1 && incY == 1 { + for i = 0; i < m; i++ { + AxpyUnitaryTo(y, alpha*x[i], a[lda*i:lda*i+n], y) + } + return + } + ix := kx + for i = 0; i < m; i++ { + AxpyInc(alpha*x[ix], a[lda*i:lda*i+n], y, n, 1, incY, 0, ky) + ix += incX + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s new file mode 100644 index 00000000000..917e0e30e17 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s @@ -0,0 +1,685 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define INC_X R8 +#define INC3_X R9 + +#define Y_PTR DX +#define Y y_base+96(FP) +#define INC_Y R10 +#define INC3_Y R11 + +#define A_ROW AX +#define A_PTR DI +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + XORPS X0, X0 \ + XORPS X1, X1 \ + XORPS X2, X2 \ + XORPS X3, X3 + +#define INIT2 \ + XORPS X0, X0 \ + XORPS X1, X1 + +#define INIT1 \ + XORPS X0, X0 + +#define KERNEL_LOAD4 \ + MOVUPS (X_PTR), X12 \ + MOVUPS 2*SIZE(X_PTR), X13 + +#define KERNEL_LOAD2 \ + MOVUPS (X_PTR), X12 + +#define KERNEL_LOAD4_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 \ + MOVSD (X_PTR)(INC_X*2), X13 \ + MOVHPD (X_PTR)(INC3_X*1), X13 + +#define KERNEL_LOAD2_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MOVUPS (A_PTR)(LDA*2), X8 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X9 \ + MOVUPS (A_PTR)(LDA3*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X11 \ + MULPD X12, X4 \ + MULPD X13, X5 \ + MULPD X12, X6 \ + MULPD X13, X7 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X1 \ + ADDPD X7, X1 \ + ADDPD X8, X2 \ + ADDPD X9, X2 \ + ADDPD X10, X3 \ + ADDPD X11, X3 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + MULPD X12, X6 \ + MULPD X12, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X2 \ + ADDPD X7, X3 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X4 \ + MOVHPD (A_PTR)(LDA*1), X4 \ + MOVSD (A_PTR)(LDA*2), X5 \ + MOVHPD (A_PTR)(LDA3*1), X5 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X2 \ + ADDQ $SIZE, A_PTR + +#define STORE4 \ + MOVUPS (Y_PTR), X4 \ + MOVUPS 2*SIZE(Y_PTR), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVUPS X4, (Y_PTR) \ + MOVUPS X5, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MOVSD (Y_PTR)(INC_Y*2), X5 \ + MOVHPD (Y_PTR)(INC3_Y*1), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVLPD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) \ + MOVLPD X5, (Y_PTR)(INC_Y*2) \ + MOVHPD X5, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X11 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X8, X0 \ + ADDPD X10, X1 \ + ADDPD X9, X0 \ + ADDPD X11, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X8 \ + MOVUPS (A_PTR)(LDA*1), X9 \ + MULPD X12, X8 \ + MULPD X12, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X1 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MOVHPD (A_PTR)(LDA*1), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE2 \ + MOVUPS (Y_PTR), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVUPS X4, (Y_PTR) + +#define STORE2_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVSD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X0 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVUPS (A_PTR), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MULSD X12, X8 \ + ADDSD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE1 \ + HADDPD X0, X0 \ + MOVSD (Y_PTR), X4 \ + MULSD ALPHA, X0 \ + MULSD BETA, X4 \ + ADDSD X0, X4 \ + MOVSD X4, (Y_PTR) + +// func GemvN(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvN(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + MOVDDUP beta+88(FP), BETA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y // INC_Y = incY * sizeof(float64) + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $0.0, X0 + COMISD BETA, X0 + JNE gemv_start // if beta != 0 { goto gemv_start } + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + CMPQ incY+120(FP), $1 // Check for dense vector X (fast-path) + JNE inc_clear + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + + JMP prep_end + +inc_clear: + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + CMPQ incX+80(FP), $1 // Check for dense vector X (fast-path) + JNE inc + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r4c4 + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + + ADDQ $2*SIZE, X_PTR + +r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ $SIZE, X_PTR + +r4end: + CMPQ INC_Y, $SIZE + JNZ r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP r4inc + +r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + + ADDQ $2*SIZE, X_PTR + +r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ $SIZE, X_PTR + +r2end: + CMPQ INC_Y, $SIZE + JNE r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP r2inc + +r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + + ADDQ $2*SIZE, X_PTR + +r1c1: + + TESTQ $1, N_DIM + JZ r1end + + // 1x1 KERNEL + KERNEL_1x1 + +r1end: + STORE1 + +end: + RET + +inc: // Algorithm for incX != 1 ( split loads in kernel ) + MOVQ incX+80(FP), INC_X // INC_X = incX + + XORQ TMP2, TMP2 // TMP2 = 0 + MOVQ N, TMP1 // TMP1 = N + SUBQ $1, TMP1 // TMP1 -= 1 + NEGQ TMP1 // TMP1 = -TMP1 + IMULQ INC_X, TMP1 // TMP1 *= INC_X + CMPQ INC_X, $0 // if INC_X < 0 { TMP2 = TMP1 } + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR // X_PTR = X_PTR[TMP2] + MOVQ X_PTR, X // X = X_PTR + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + + DECQ N + JNZ inc_r4c4 + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ INC_X, X_PTR + +inc_r4end: + CMPQ INC_Y, $SIZE + JNE inc_r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP inc_r4inc + +inc_r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ INC_X, X_PTR + +inc_r2end: + CMPQ INC_Y, $SIZE + JNE inc_r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP inc_r2inc + +inc_r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ inc_r1end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_r1end: + STORE1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s new file mode 100644 index 00000000000..040710009ed --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s @@ -0,0 +1,745 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM n+8(FP) +#define M CX +#define N_DIM m+0(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define Y_PTR DX +#define Y y_base+96(FP) +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MOVDDUP (X_PTR)(INC_X*2), X10 \ + MOVDDUP (X_PTR)(INC3_X*1), X11 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 \ + MULPD ALPHA, X10 \ + MULPD ALPHA, X11 + +#define INIT2 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 + +#define INIT1 \ + MOVDDUP (X_PTR), X8 \ + MULPD ALPHA, X8 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X0 \ + MOVUPS 2*SIZE(Y_PTR), X1 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X0 + +#define KERNEL_LOAD4_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 \ + MOVSD (Y_PTR)(INC_Y*2), X1 \ + MOVHPD (Y_PTR)(INC3_Y*1), X1 + +#define KERNEL_LOAD2_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + MOVUPS (A_PTR)(LDA*2), X4 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X5 \ + MOVUPS (A_PTR)(LDA3*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X7 \ + MULPD X10, X4 \ + MULPD X10, X5 \ + MULPD X11, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDQ $4*SIZE, A_PTR + +#define STORE4 \ + MOVUPS X0, (Y_PTR) \ + MOVUPS X1, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) \ + MOVLPD X1, (Y_PTR)(INC_Y*2) \ + MOVHPD X1, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + MULPD X10, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X0 \ + ADDPD X7, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVUPS (A_PTR), X4 \ + MULPD X8, X4 \ + ADDPD X4, X0 \ + ADDQ $2*SIZE, A_PTR + +#define STORE2 \ + MOVUPS X0, (Y_PTR) + +#define STORE2_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MOVSD (A_PTR)(LDA*2), X6 \ + MOVSD (A_PTR)(LDA3*1), X7 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + MULSD X10, X6 \ + MULSD X11, X7 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + ADDSD X6, X0 \ + ADDSD X7, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MULSD X8, X4 \ + ADDSD X4, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define SCALE_8(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MOVUPS 32(PTR), X2 \ + MOVUPS 48(PTR), X3 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MULPD SCAL, X2 \ + MULPD SCAL, X3 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + MOVUPS X2, 32(PTR) \ + MOVUPS X3, 48(PTR) + +#define SCALE_4(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + +#define SCALE_2(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MULPD SCAL, X0 \ + MOVUPS X0, (PTR) \ + +#define SCALE_1(PTR, SCAL) \ + MOVSD (PTR), X0 \ + MULSD SCAL, X0 \ + MOVSD X0, (PTR) \ + +#define SCALEINC_4(PTR, INC, INC3, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MOVSD (PTR)(INC*2), X2 \ + MOVSD (PTR)(INC3*1), X3 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MULSD SCAL, X2 \ + MULSD SCAL, X3 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) \ + MOVSD X2, (PTR)(INC*2) \ + MOVSD X3, (PTR)(INC3*1) + +#define SCALEINC_2(PTR, INC, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) + +// func GemvT(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvT(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y // INC_Y = incY * sizeof(float64) + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + MOVQ incX+80(FP), INC_X // INC_X = incX * sizeof(float64) + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + NEGQ TMP1 + IMULQ INC_X, TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + MOVQ X_PTR, X + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + CMPQ incY+120(FP), $1 // Check for dense vector Y (fast-path) + JNE inc + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $3, M + JZ scal4 + +scal8: + SCALE_8(Y_PTR, BETA) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ scal8 + +scal4: + TESTQ $4, M_DIM + JZ scal2 + SCALE_4(Y_PTR, BETA) + ADDQ $4*SIZE, Y_PTR + +scal2: + TESTQ $2, M_DIM + JZ scal1 + SCALE_2(Y_PTR, BETA) + ADDQ $2*SIZE, Y_PTR + +scal1: + TESTQ $1, M_DIM + JZ prep_end + SCALE_1(Y_PTR, BETA) + + JMP prep_end + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + SHRQ $2, N + JZ c2 + +c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c4r2 + +c4r4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c4r4 + +c4r2: + TESTQ $2, M_DIM + JZ c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x4 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c4r1: + TESTQ $1, M_DIM + JZ c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ $SIZE, Y_PTR + +c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ c4 + +c2: + TESTQ $2, N_DIM + JZ c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c2r2 + +c2r4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x2 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c2r4 + +c2r2: + TESTQ $2, M_DIM + JZ c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c2r1: + TESTQ $1, M_DIM + JZ c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ $SIZE, Y_PTR + +c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +c1: + TESTQ $1, N_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c1r2 + +c1r4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x1 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c1r4 + +c1r2: + TESTQ $2, M_DIM + JZ c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x1 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c1r1: + TESTQ $1, M_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + +end: + RET + +inc: // Algorithm for incX != 0 ( split loads in kernel ) + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $2, M + JZ inc_scal2 + +inc_scal4: + SCALEINC_4(Y_PTR, INC_Y, INC3_Y, BETA) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_scal4 + +inc_scal2: + TESTQ $2, M_DIM + JZ inc_scal1 + + SCALEINC_2(Y_PTR, INC_Y, BETA) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_scal1: + TESTQ $1, M_DIM + JZ inc_prep_end + SCALE_1(Y_PTR, BETA) + + JMP inc_prep_end + +inc_gemv_clear: // beta == 0 is special-cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ inc_prep_end + MOVSD X0, (Y_PTR) + +inc_prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +inc_gemv_start: + SHRQ $2, N + JZ inc_c2 + +inc_c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c4r2 + +inc_c4r4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + + DECQ M + JNZ inc_c4r4 + +inc_c4r2: + TESTQ $2, M_DIM + JZ inc_c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x4 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c4r1: + TESTQ $1, M_DIM + JZ inc_c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ INC_Y, Y_PTR + +inc_c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ inc_c4 + +inc_c2: + TESTQ $2, N_DIM + JZ inc_c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c2r2 + +inc_c2r4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x2 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c2r4 + +inc_c2r2: + TESTQ $2, M_DIM + JZ inc_c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c2r1: + TESTQ $1, M_DIM + JZ inc_c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ INC_Y, Y_PTR + +inc_c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_c1: + TESTQ $1, N_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c1r2 + +inc_c1r4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x1 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c1r4 + +inc_c1r2: + TESTQ $2, M_DIM + JZ inc_c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x1 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c1r1: + TESTQ $1, M_DIM + JZ inc_end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s new file mode 100644 index 00000000000..8cae569138e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s @@ -0,0 +1,591 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define Y y_base+56(FP) +#define Y_PTR DX +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X0 + +#define LOAD4 \ + PREFETCHNTA (X_PTR )(INC_X*8) \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MOVDDUP (X_PTR)(INC_X*2), X3 \ + MOVDDUP (X_PTR)(INC3_X*1), X4 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 \ + MULPD ALPHA, X3 \ + MULPD ALPHA, X4 + +#define LOAD2 \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 + +#define LOAD1 \ + MOVDDUP (X_PTR), X1 \ + MULPD ALPHA, X1 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X5 \ + MOVUPS 2*SIZE(Y_PTR), X6 + +#define KERNEL_LOAD4_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 \ + MOVLPD (Y_PTR)(INC_Y*2), X6 \ + MOVHPD (Y_PTR)(INC3_Y*1), X6 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X5 + +#define KERNEL_LOAD2_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 + +#define KERNEL_4x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MOVUPS X5, X9 \ + MOVUPS X6, X10 \ + MOVUPS X5, X11 \ + MOVUPS X6, X12 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 \ + MULPD X3, X9 \ + MULPD X3, X10 \ + MULPD X4, X11 \ + MULPD X4, X12 + +#define STORE_4x4 \ + MOVUPS (A_PTR), X13 \ + ADDPD X13, X5 \ + MOVUPS 2*SIZE(A_PTR), X14 \ + ADDPD X14, X6 \ + MOVUPS (A_PTR)(LDA*1), X15 \ + ADDPD X15, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X0 \ + ADDPD X0, X8 \ + MOVUPS (A_PTR)(LDA*2), X13 \ + ADDPD X13, X9 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X14 \ + ADDPD X14, X10 \ + MOVUPS (A_PTR)(LDA3*1), X15 \ + ADDPD X15, X11 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X0 \ + ADDPD X0, X12 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + MOVUPS X9, (A_PTR)(LDA*2) \ + MOVUPS X10, 2*SIZE(A_PTR)(LDA*2) \ + MOVUPS X11, (A_PTR)(LDA3*1) \ + MOVUPS X12, 2*SIZE(A_PTR)(LDA3*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPD X1, X5 \ + MULPD X2, X6 \ + MULPD X3, X7 \ + MULPD X4, X8 + +#define STORE_4x2 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*2), X11 \ + ADDPD X11, X7 \ + MOVUPS (A_PTR)(LDA3*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + MOVUPS X7, (A_PTR)(LDA*2) \ + MOVUPS X8, (A_PTR)(LDA3*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MOVSD X5, X7 \ + MOVSD X5, X8 \ + MULSD X1, X5 \ + MULSD X2, X6 \ + MULSD X3, X7 \ + MULSD X4, X8 + +#define STORE_4x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + ADDSD (A_PTR)(LDA*2), X7 \ + ADDSD (A_PTR)(LDA3*1), X8 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + MOVSD X7, (A_PTR)(LDA*2) \ + MOVSD X8, (A_PTR)(LDA3*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_2x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 + +#define STORE_2x4 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS 2*SIZE(A_PTR), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPD X11, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS X5, X6 \ + MULPD X1, X5 \ + MULPD X2, X6 + +#define STORE_2x2 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS (A_PTR)(LDA*1), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MULSD X1, X5 \ + MULSD X2, X6 + +#define STORE_2x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x4 \ + MULPD X1, X5 \ + MULPD X1, X6 + +#define STORE_1x4 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS 2*SIZE(A_PTR), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MULPD X1, X5 + +#define STORE_1x2 \ + MOVUPS (A_PTR), X6 \ + ADDPD X6, X5 \ + MOVUPS X5, (A_PTR) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X5 \ + MULSD X1, X5 + +#define STORE_1x1 \ + ADDSD (A_PTR), X5 \ + MOVSD X5, (A_PTR) \ + ADDQ $SIZE, A_PTR + +// func Ger(m, n uintptr, alpha float64, +// x []float64, incX uintptr, +// y []float64, incY uintptr, +// a []float64, lda uintptr) +TEXT ·Ger(SB), NOSPLIT, $0 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+24(FP), X_PTR + MOVQ y_base+56(FP), Y_PTR + MOVQ a_base+88(FP), A_ROW + MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_X, TMP1 + NEGQ TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + + CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) + JG inc + JL end + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE_4x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + STORE_4x2 + + ADDQ $2*SIZE, Y_PTR + +r4c1: + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ $SIZE, Y_PTR + +r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + STORE_2x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE_2x2 + + ADDQ $2*SIZE, Y_PTR + +r2c1: + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ $SIZE, Y_PTR + +r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + STORE_1x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + STORE_1x2 + + ADDQ $2*SIZE, Y_PTR + +r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ $SIZE, Y_PTR + +end: + RET + +inc: // Algorithm for incY != 1 ( split loads in kernel ) + + MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE_4x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + STORE_4x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r4c1: + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ INC_Y, Y_PTR + +inc_r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + STORE_2x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE_2x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2c1: + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ INC_Y, Y_PTR + +inc_r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + STORE_1x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + STORE_1x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ INC_Y, Y_PTR + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s new file mode 100644 index 00000000000..b4b1fd02fb5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s @@ -0,0 +1,58 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func L1Dist(s, t []float64) float64 +TEXT ·L1Dist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDPD X0, X3 // norm += X0 + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // x1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDSD X0, X3 // norm += X0 + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + ADDSD X3, X2 // X2 = X3[1] + X3[0] + MOVSD X2, ret+48(FP) // return X2 + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_amd64.s new file mode 100644 index 00000000000..86e01c87014 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_amd64.s @@ -0,0 +1,109 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define SUMSQ X0 +#define ABSX X1 +#define SCALE X2 +#define ZERO X3 +#define TMP X4 +#define ABSMASK X5 +#define INF X7 +#define INFMASK X11 +#define NANMASK X12 +#define IDX AX +#define LEN SI +#define X_ DI + +#define ABSMASK_DATA l2nrodata<>+0(SB) +#define INF_DATA l2nrodata<>+8(SB) +#define NAN_DATA l2nrodata<>+16(SB) +// AbsMask +DATA l2nrodata<>+0(SB)/8, $0x7FFFFFFFFFFFFFFF +// Inf +DATA l2nrodata<>+8(SB)/8, $0x7FF0000000000000 +// NaN +DATA l2nrodata<>+16(SB)/8, $0xFFF8000000000000 +GLOBL l2nrodata<>+0(SB), RODATA, $24 + +// L2NormUnitary returns the L2-norm of x. +// func L2NormUnitary(x []float64) (norm float64) +TEXT ·L2NormUnitary(SB), NOSPLIT, $0 + MOVQ x_len+8(FP), LEN // LEN = len(x) + MOVQ x_base+0(FP), X_ + PXOR ZERO, ZERO + CMPQ LEN, $0 // if LEN == 0 { return 0 } + JZ retZero + + PXOR INFMASK, INFMASK + PXOR NANMASK, NANMASK + MOVSD $1.0, SUMSQ // ssq = 1 + XORPS SCALE, SCALE + MOVSD ABSMASK_DATA, ABSMASK + MOVSD INF_DATA, INF + XORQ IDX, IDX // idx == 0 + +initZero: // for ;x[i]==0; i++ {} + // Skip all leading zeros, to avoid divide by zero NaN + MOVSD (X_)(IDX*8), ABSX // absxi = x[i] + UCOMISD ABSX, ZERO + JP retNaN // if isNaN(x[i]) { return NaN } + JNE loop // if x[i] != 0 { goto loop } + INCQ IDX // i++ + CMPQ IDX, LEN + JE retZero // if i == LEN { return 0 } + JMP initZero + +loop: + MOVSD (X_)(IDX*8), ABSX // absxi = x[i] + MOVUPS ABSX, TMP + CMPSD ABSX, TMP, $3 + ORPD TMP, NANMASK // NANMASK = NANMASK | IsNaN(absxi) + MOVSD INF, TMP + ANDPD ABSMASK, ABSX // absxi == Abs(absxi) + CMPSD ABSX, TMP, $0 + ORPD TMP, INFMASK // INFMASK = INFMASK | IsInf(absxi) + UCOMISD SCALE, ABSX + JA adjScale // IF SCALE > ABSXI { goto adjScale } + + DIVSD SCALE, ABSX // absxi = scale / absxi + MULSD ABSX, ABSX // absxi *= absxi + ADDSD ABSX, SUMSQ // sumsq += absxi + INCQ IDX // i++ + CMPQ IDX, LEN + JNE loop // if i < LEN { continue } + JMP retSum // if i == LEN { goto retSum } + +adjScale: // Scale > Absxi + DIVSD ABSX, SCALE // tmp = absxi / scale + MULSD SCALE, SUMSQ // sumsq *= tmp + MULSD SCALE, SUMSQ // sumsq *= tmp + ADDSD $1.0, SUMSQ // sumsq += 1 + MOVUPS ABSX, SCALE // scale = absxi + INCQ IDX // i++ + CMPQ IDX, LEN + JNE loop // if i < LEN { continue } + +retSum: // Calculate return value + SQRTSD SUMSQ, SUMSQ // sumsq = sqrt(sumsq) + MULSD SCALE, SUMSQ // sumsq += scale + MOVQ SUMSQ, R10 // tmp = sumsq + UCOMISD ZERO, INFMASK + CMOVQPS INF_DATA, R10 // if INFMASK { tmp = INF } + UCOMISD ZERO, NANMASK + CMOVQPS NAN_DATA, R10 // if NANMASK { tmp = NaN } + MOVQ R10, norm+24(FP) // return tmp + RET + +retZero: + MOVSD ZERO, norm+24(FP) // return 0 + RET + +retNaN: + MOVSD NAN_DATA, TMP // return NaN + MOVSD TMP, norm+24(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_noasm.go new file mode 100644 index 00000000000..bfb8fba9811 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norm_noasm.go @@ -0,0 +1,93 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || noasm || gccgo || safe +// +build !amd64 noasm gccgo safe + +package f64 + +import "math" + +// L2NormUnitary returns the L2-norm of x. +func L2NormUnitary(x []float64) (norm float64) { + var scale float64 + sumSquares := 1.0 + for _, v := range x { + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + s := scale / absxi + sumSquares = 1 + sumSquares*s*s + scale = absxi + } else { + s := absxi / scale + sumSquares += s * s + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// L2NormInc returns the L2-norm of x. +func L2NormInc(x []float64, n, incX uintptr) (norm float64) { + var scale float64 + sumSquares := 1.0 + for ix := uintptr(0); ix < n*incX; ix += incX { + val := x[ix] + if val == 0 { + continue + } + absxi := math.Abs(val) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + s := scale / absxi + sumSquares = 1 + sumSquares*s*s + scale = absxi + } else { + s := absxi / scale + sumSquares += s * s + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// L2DistanceUnitary returns the L2-norm of x-y. +func L2DistanceUnitary(x, y []float64) (norm float64) { + var scale float64 + sumSquares := 1.0 + for i, v := range x { + v -= y[i] + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + s := scale / absxi + sumSquares = 1 + sumSquares*s*s + scale = absxi + } else { + s := absxi / scale + sumSquares += s * s + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l2normdist_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2normdist_amd64.s new file mode 100644 index 00000000000..10dcae400ef --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2normdist_amd64.s @@ -0,0 +1,115 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define SUMSQ X0 +#define ABSX X1 +#define SCALE X2 +#define ZERO X3 +#define TMP X4 +#define ABSMASK X5 +#define INF X7 +#define INFMASK X11 +#define NANMASK X12 +#define IDX AX +#define X_ DI +#define Y_ BX +#define LEN SI + +#define ABSMASK_DATA l2nrodata<>+0(SB) +#define INF_DATA l2nrodata<>+8(SB) +#define NAN_DATA l2nrodata<>+16(SB) +// AbsMask +DATA l2nrodata<>+0(SB)/8, $0x7FFFFFFFFFFFFFFF +// Inf +DATA l2nrodata<>+8(SB)/8, $0x7FF0000000000000 +// NaN +DATA l2nrodata<>+16(SB)/8, $0xFFF8000000000000 +GLOBL l2nrodata<>+0(SB), RODATA, $24 + +// L2DistanceUnitary returns the L2-norm of x-y. +// func L2DistanceUnitary(x,y []float64) (norm float64) +TEXT ·L2DistanceUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_ + MOVQ y_base+24(FP), Y_ + PXOR ZERO, ZERO + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return 0 } + JZ retZero + + PXOR INFMASK, INFMASK + PXOR NANMASK, NANMASK + MOVSD $1.0, SUMSQ // ssq = 1 + XORPS SCALE, SCALE + MOVSD ABSMASK_DATA, ABSMASK + MOVSD INF_DATA, INF + XORQ IDX, IDX // idx == 0 + +initZero: // for ;x[i]==0; i++ {} + // Skip all leading zeros, to avoid divide by zero NaN + MOVSD (X_)(IDX*8), ABSX // absxi = x[i] + SUBSD (Y_)(IDX*8), ABSX // absxi = x[i]-y[i] + UCOMISD ABSX, ZERO + JP retNaN // if isNaN(absxi) { return NaN } + JNE loop // if absxi != 0 { goto loop } + INCQ IDX // i++ + CMPQ IDX, LEN + JE retZero // if i == LEN { return 0 } + JMP initZero + +loop: + MOVSD (X_)(IDX*8), ABSX // absxi = x[i] + SUBSD (Y_)(IDX*8), ABSX // absxi = x[i]-y[i] + MOVUPS ABSX, TMP + CMPSD ABSX, TMP, $3 + ORPD TMP, NANMASK // NANMASK = NANMASK | IsNaN(absxi) + MOVSD INF, TMP + ANDPD ABSMASK, ABSX // absxi == Abs(absxi) + CMPSD ABSX, TMP, $0 + ORPD TMP, INFMASK // INFMASK = INFMASK | IsInf(absxi) + UCOMISD SCALE, ABSX + JA adjScale // IF SCALE > ABSXI { goto adjScale } + + DIVSD SCALE, ABSX // absxi = scale / absxi + MULSD ABSX, ABSX // absxi *= absxi + ADDSD ABSX, SUMSQ // sumsq += absxi + INCQ IDX // i++ + CMPQ IDX, LEN + JNE loop // if i < LEN { continue } + JMP retSum // if i == LEN { goto retSum } + +adjScale: // Scale > Absxi + DIVSD ABSX, SCALE // tmp = absxi / scale + MULSD SCALE, SUMSQ // sumsq *= tmp + MULSD SCALE, SUMSQ // sumsq *= tmp + ADDSD $1.0, SUMSQ // sumsq += 1 + MOVUPS ABSX, SCALE // scale = absxi + INCQ IDX // i++ + CMPQ IDX, LEN + JNE loop // if i < LEN { continue } + +retSum: // Calculate return value + SQRTSD SUMSQ, SUMSQ // sumsq = sqrt(sumsq) + MULSD SCALE, SUMSQ // sumsq += scale + MOVQ SUMSQ, R10 // tmp = sumsq + UCOMISD ZERO, INFMASK + CMOVQPS INF_DATA, R10 // if INFMASK { tmp = INF } + UCOMISD ZERO, NANMASK + CMOVQPS NAN_DATA, R10 // if NANMASK { tmp = NaN } + MOVQ R10, norm+48(FP) // return tmp + RET + +retZero: + MOVSD ZERO, norm+48(FP) // return 0 + RET + +retNaN: + MOVSD NAN_DATA, TMP // return NaN + MOVSD TMP, norm+48(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norminc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norminc_amd64.s new file mode 100644 index 00000000000..8341db93ac5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/l2norminc_amd64.s @@ -0,0 +1,110 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define SUMSQ X0 +#define ABSX X1 +#define SCALE X2 +#define ZERO X3 +#define TMP X4 +#define ABSMASK X5 +#define INF X7 +#define INFMASK X11 +#define NANMASK X12 +#define IDX AX +#define LEN SI +#define INC BX +#define X_ DI + +#define ABSMASK_DATA l2nrodata<>+0(SB) +#define INF_DATA l2nrodata<>+8(SB) +#define NAN_DATA l2nrodata<>+16(SB) +// AbsMask +DATA l2nrodata<>+0(SB)/8, $0x7FFFFFFFFFFFFFFF +// Inf +DATA l2nrodata<>+8(SB)/8, $0x7FF0000000000000 +// NaN +DATA l2nrodata<>+16(SB)/8, $0xFFF8000000000000 +GLOBL l2nrodata<>+0(SB), RODATA, $24 + +// func L2NormInc(x []float64, n, incX uintptr) (norm float64) +TEXT ·L2NormInc(SB), NOSPLIT, $0 + MOVQ n+24(FP), LEN // LEN = len(x) + MOVQ incX+32(FP), INC + MOVQ x_base+0(FP), X_ + XORPS ZERO, ZERO + CMPQ LEN, $0 // if LEN == 0 { return 0 } + JZ retZero + + XORPS INFMASK, INFMASK + XORPS NANMASK, NANMASK + MOVSD $1.0, SUMSQ // ssq = 1 + XORPS SCALE, SCALE + MOVSD ABSMASK_DATA, ABSMASK + MOVSD INF_DATA, INF + SHLQ $3, INC // INC *= sizeof(float64) + +initZero: // for ;x[i]==0; i++ {} + // Skip all leading zeros, to avoid divide by zero NaN + MOVSD (X_), ABSX // absxi = x[i] + UCOMISD ABSX, ZERO + JP retNaN // if isNaN(x[i]) { return NaN } + JNZ loop // if x[i] != 0 { goto loop } + ADDQ INC, X_ // i += INC + DECQ LEN // LEN-- + JZ retZero // if LEN == 0 { return 0 } + JMP initZero + +loop: + MOVSD (X_), ABSX // absxi = x[i] + MOVUPS ABSX, TMP + CMPSD ABSX, TMP, $3 + ORPD TMP, NANMASK // NANMASK = NANMASK | IsNaN(absxi) + MOVSD INF, TMP + ANDPD ABSMASK, ABSX // absxi == Abs(absxi) + CMPSD ABSX, TMP, $0 + ORPD TMP, INFMASK // INFMASK = INFMASK | IsInf(absxi) + UCOMISD SCALE, ABSX + JA adjScale // IF SCALE > ABSXI { goto adjScale } + + DIVSD SCALE, ABSX // absxi = scale / absxi + MULSD ABSX, ABSX // absxi *= absxi + ADDSD ABSX, SUMSQ // sumsq += absxi + ADDQ INC, X_ // i += INC + DECQ LEN // LEN-- + JNZ loop // if LEN > 0 { continue } + JMP retSum // if LEN == 0 { goto retSum } + +adjScale: // Scale > Absxi + DIVSD ABSX, SCALE // tmp = absxi / scale + MULSD SCALE, SUMSQ // sumsq *= tmp + MULSD SCALE, SUMSQ // sumsq *= tmp + ADDSD $1.0, SUMSQ // sumsq += 1 + MOVUPS ABSX, SCALE // scale = absxi + ADDQ INC, X_ // i += INC + DECQ LEN // LEN-- + JNZ loop // if LEN > 0 { continue } + +retSum: // Calculate return value + SQRTSD SUMSQ, SUMSQ // sumsq = sqrt(sumsq) + MULSD SCALE, SUMSQ // sumsq += scale + MOVQ SUMSQ, R10 // tmp = sumsq + UCOMISD ZERO, INFMASK + CMOVQPS INF_DATA, R10 // if INFMASK { tmp = INF } + UCOMISD ZERO, NANMASK + CMOVQPS NAN_DATA, R10 // if NANMASK { tmp = NaN } + MOVQ R10, norm+40(FP) // return tmp + RET + +retZero: + MOVSD ZERO, norm+40(FP) // return 0 + RET + +retNaN: + MOVSD NAN_DATA, TMP // return NaN + MOVSD TMP, norm+40(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s new file mode 100644 index 00000000000..ac18b481de4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +// func LinfDist(s, t []float64) float64 +TEXT ·LinfDist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXPD X0, X3 // norm = max( norm, X0 ) + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // X1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXSD X0, X3 // norm = max( norm, X0 ) + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + MAXSD X3, X2 // X2 = max( X3[1], X3[0] ) + MOVSD X2, ret+48(FP) // return X2 + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go new file mode 100644 index 00000000000..c95219e18a8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go @@ -0,0 +1,62 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || noasm || gccgo || safe +// +build !amd64 noasm gccgo safe + +package f64 + +// ScalUnitary is +// +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s new file mode 100644 index 00000000000..d623a284f9c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define X_PTR SI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalInc(alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalInc(SB), NOSPLIT, $0 + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ incX+40(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + MOVQ n+32(FP), LEN // LEN = n + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + MOVSD X4, (X_PTR)(INC_X*2) + MOVSD X5, (X_PTR)(INCx3_X*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: // do { + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (X_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s new file mode 100644 index 00000000000..1c2722098de --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s @@ -0,0 +1,122 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define X_PTR SI +#define DST_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define INC_DST R10 +#define INCx3_DST R11 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst + SHLQ $3, INC_DST // INC_DST *= sizeof(float64) + MOVSD alpha+32(FP), ALPHA // ALPHA = alpha + MOVQ x_base+40(FP), X_PTR // X_PTR = &x + MOVQ n+64(FP), LEN // LEN = n + MOVQ incX+72(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incDst*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s new file mode 100644 index 00000000000..6e8f5ca6e1d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s @@ -0,0 +1,112 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // @ MOVDDUP XMM0, 8[RSP] + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitary(alpha float64, x []float64) +TEXT ·ScalUnitary(SB), NOSPLIT, $0 + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ x_len+16(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + XORQ IDX, IDX // IDX = 0 + + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 + +loop: // do { // x[i] *= alpha unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + MOVUPS X3, 16(X_PTR)(IDX*8) + MOVUPS X4, 32(X_PTR)(IDX*8) + MOVUPS X5, 48(X_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if n == 0 goto end + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + // x[i] *= alpha for the remaining element. + MOVSD (X_PTR)(IDX*8), X2 + MULSD ALPHA, X2 + MOVSD X2, (X_PTR)(IDX*8) + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s new file mode 100644 index 00000000000..986480a5bed --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x2024 // @ MOVDDUP 32(SP), X0 /*XMM0, 32[RSP]*/ + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitaryTo(dst []float64, alpha float64, x []float64) +// This function assumes len(dst) >= len(x). +TEXT ·ScalUnitaryTo(SB), NOSPLIT, $0 + MOVQ x_base+32(FP), X_PTR // X_PTR = &x + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_len+40(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + +loop: // do { // dst[i] = alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop counters + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail_one } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR)(IDX*8) // dst[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go new file mode 100644 index 00000000000..7139bedd74c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go @@ -0,0 +1,277 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !noasm && !gccgo && !safe +// +build !noasm,!gccgo,!safe + +package f64 + +// L1Norm is +// +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) + +// L1NormInc is +// +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) + +// AddConst is +// +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) + +// Add is +// +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) + +// AxpyUnitary is +// +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) + +// AxpyUnitaryTo is +// +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) + +// AxpyInc is +// +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// CumSum is +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 + +// CumProd is +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 + +// Div is +// +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) + +// DivTo is +// +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, x, y []float64) []float64 + +// DotUnitary is +// +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) + +// DotInc is +// +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) + +// L1Dist is +// +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 + +// LinfDist is +// +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 + +// ScalUnitary is +// +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) + +// ScalUnitaryTo is +// +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) + +// ScalInc is +// +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) + +// ScalIncTo is +// +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) + +// Sum is +// +// var sum float64 +// for i := range x { +// sum += x[i] +// } +func Sum(x []float64) float64 + +// L2NormUnitary returns the L2-norm of x. +// +// var scale float64 +// sumSquares := 1.0 +// for _, v := range x { +// if v == 0 { +// continue +// } +// absxi := math.Abs(v) +// if math.IsNaN(absxi) { +// return math.NaN() +// } +// if scale < absxi { +// s := scale / absxi +// sumSquares = 1 + sumSquares*s*s +// scale = absxi +// } else { +// s := absxi / scale +// sumSquares += s * s +// } +// if math.IsInf(scale, 1) { +// return math.Inf(1) +// } +// } +// return scale * math.Sqrt(sumSquares) +func L2NormUnitary(x []float64) (norm float64) + +// L2NormInc returns the L2-norm of x. +// +// var scale float64 +// sumSquares := 1.0 +// for ix := uintptr(0); ix < n*incX; ix += incX { +// val := x[ix] +// if val == 0 { +// continue +// } +// absxi := math.Abs(val) +// if math.IsNaN(absxi) { +// return math.NaN() +// } +// if scale < absxi { +// s := scale / absxi +// sumSquares = 1 + sumSquares*s*s +// scale = absxi +// } else { +// s := absxi / scale +// sumSquares += s * s +// } +// } +// if math.IsInf(scale, 1) { +// return math.Inf(1) +// } +// return scale * math.Sqrt(sumSquares) +func L2NormInc(x []float64, n, incX uintptr) (norm float64) + +// L2DistanceUnitary returns the L2-norm of x-y. +// +// var scale float64 +// sumSquares := 1.0 +// for i, v := range x { +// v -= y[i] +// if v == 0 { +// continue +// } +// absxi := math.Abs(v) +// if math.IsNaN(absxi) { +// return math.NaN() +// } +// if scale < absxi { +// s := scale / absxi +// sumSquares = 1 + sumSquares*s*s +// scale = absxi +// } else { +// s := absxi / scale +// sumSquares += s * s +// } +// } +// if math.IsInf(scale, 1) { +// return math.Inf(1) +// } +// return scale * math.Sqrt(sumSquares) +func L2DistanceUnitary(x, y []float64) (norm float64) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go new file mode 100644 index 00000000000..f066379191b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go @@ -0,0 +1,182 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || noasm || gccgo || safe +// +build !amd64 noasm gccgo safe + +package f64 + +import "math" + +// L1Norm is +// +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) { + for _, v := range x { + sum += math.Abs(v) + } + return sum +} + +// L1NormInc is +// +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) { + for i := 0; i < n*incX; i += incX { + sum += math.Abs(x[i]) + } + return sum +} + +// Add is +// +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) { + for i, v := range s { + dst[i] += v + } +} + +// AddConst is +// +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) { + for i := range x { + x[i] += alpha + } +} + +// CumSum is +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] + v + } + return dst +} + +// CumProd is +// +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] * v + } + return dst +} + +// Div is +// +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) { + for i, v := range s { + dst[i] /= v + } +} + +// DivTo is +// +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, s, t []float64) []float64 { + for i, v := range s { + dst[i] = v / t[i] + } + return dst +} + +// L1Dist is +// +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 { + var norm float64 + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm +} + +// LinfDist is +// +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 { + var norm float64 + if len(s) == 0 { + return 0 + } + norm = math.Abs(t[0] - s[0]) + for i, v := range s[1:] { + absDiff := math.Abs(t[i+1] - v) + if absDiff > norm || math.IsNaN(norm) { + norm = absDiff + } + } + return norm +} + +// Sum is +// +// var sum float64 +// for i := range x { +// sum += x[i] +// } +func Sum(x []float64) float64 { + var sum float64 + for _, v := range x { + sum += v + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s new file mode 100644 index 00000000000..dd77cbd0536 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s @@ -0,0 +1,99 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!gccgo,!safe + +#include "textflag.h" + +#define X_PTR SI +#define IDX AX +#define LEN CX +#define TAIL BX +#define SUM X0 +#define SUM_1 X1 +#define SUM_2 X2 +#define SUM_3 X3 + +// func Sum(x []float64) float64 +TEXT ·Sum(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ x_len+8(FP), LEN // LEN = len(x) + XORQ IDX, IDX // i = 0 + PXOR SUM, SUM // p_sum_i = 0 + CMPQ LEN, $0 // if LEN == 0 { return 0 } + JE sum_end + + PXOR SUM_1, SUM_1 + PXOR SUM_2, SUM_2 + PXOR SUM_3, SUM_3 + + MOVQ X_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + ADDSD (X_PTR), X0 // X0 += x[0] + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ sum_end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + SHRQ $4, LEN // LEN = floor( n / 16 ) + JZ sum_tail8 // if LEN == 0 { goto sum_tail8 } + +sum_loop: // sum 16x wide do { + ADDPD (X_PTR)(IDX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(X_PTR)(IDX*8), SUM_1 + ADDPD 32(X_PTR)(IDX*8), SUM_2 + ADDPD 48(X_PTR)(IDX*8), SUM_3 + ADDPD 64(X_PTR)(IDX*8), SUM + ADDPD 80(X_PTR)(IDX*8), SUM_1 + ADDPD 96(X_PTR)(IDX*8), SUM_2 + ADDPD 112(X_PTR)(IDX*8), SUM_3 + ADDQ $16, IDX // i += 16 + DECQ LEN + JNZ sum_loop // } while --LEN > 0 + +sum_tail8: + TESTQ $8, TAIL + JZ sum_tail4 + + ADDPD (X_PTR)(IDX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(X_PTR)(IDX*8), SUM_1 + ADDPD 32(X_PTR)(IDX*8), SUM_2 + ADDPD 48(X_PTR)(IDX*8), SUM_3 + ADDQ $8, IDX + +sum_tail4: + ADDPD SUM_3, SUM + ADDPD SUM_2, SUM_1 + + TESTQ $4, TAIL + JZ sum_tail2 + + ADDPD (X_PTR)(IDX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(X_PTR)(IDX*8), SUM_1 + ADDQ $4, IDX + +sum_tail2: + ADDPD SUM_1, SUM + + TESTQ $2, TAIL + JZ sum_tail1 + + ADDPD (X_PTR)(IDX*8), SUM // sum_i += x[i:i+2] + ADDQ $2, IDX + +sum_tail1: + HADDPD SUM, SUM // sum_i[0] += sum_i[1] + + TESTQ $1, TAIL + JZ sum_end + + ADDSD (X_PTR)(IDX*8), SUM + +sum_end: // return sum + MOVSD SUM, ret+24(FP) + RET diff --git a/vendor/modules.txt b/vendor/modules.txt index e9f7f7f79ed..c17d9e2ebc8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -792,6 +792,25 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require +# github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389 +## explicit; go 1.19 +github.com/thanos-community/promql-engine/api +github.com/thanos-community/promql-engine/engine +github.com/thanos-community/promql-engine/execution +github.com/thanos-community/promql-engine/execution/aggregate +github.com/thanos-community/promql-engine/execution/binary +github.com/thanos-community/promql-engine/execution/exchange +github.com/thanos-community/promql-engine/execution/function +github.com/thanos-community/promql-engine/execution/model +github.com/thanos-community/promql-engine/execution/parse +github.com/thanos-community/promql-engine/execution/remote +github.com/thanos-community/promql-engine/execution/scan +github.com/thanos-community/promql-engine/execution/step_invariant +github.com/thanos-community/promql-engine/execution/storage +github.com/thanos-community/promql-engine/execution/unary +github.com/thanos-community/promql-engine/logicalplan +github.com/thanos-community/promql-engine/query +github.com/thanos-community/promql-engine/worker # github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 ## explicit; go 1.18 github.com/thanos-io/objstore @@ -1132,6 +1151,11 @@ golang.org/x/tools/internal/typesinternal ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal +# gonum.org/v1/gonum v0.12.0 +## explicit; go 1.18 +gonum.org/v1/gonum/floats +gonum.org/v1/gonum/floats/scalar +gonum.org/v1/gonum/internal/asm/f64 # google.golang.org/api v0.102.0 ## explicit; go 1.19 google.golang.org/api/googleapi From be52dd53a0fb0e61740b5fcbe84fe8ce8f532ab2 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 17 Jan 2023 14:03:57 -0800 Subject: [PATCH 046/133] use labels Bytes rather than String for map key (#5095) Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- pkg/distributor/query.go | 3 ++- pkg/querier/tripperware/instantquery/instant_query.go | 3 ++- pkg/querier/tripperware/merge.go | 3 ++- pkg/querier/tripperware/merge_test.go | 9 +++++---- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index d41556e283c..2a58c4955c1 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -257,10 +257,11 @@ func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSe func mergeExemplarQueryResponses(results []interface{}) *ingester_client.ExemplarQueryResponse { var keys []string exemplarResults := make(map[string]cortexpb.TimeSeries) + buf := make([]byte, 0, 1024) for _, result := range results { r := result.(*ingester_client.ExemplarQueryResponse) for _, ts := range r.Timeseries { - lbls := cortexpb.FromLabelAdaptersToLabels(ts.Labels).String() + lbls := string(cortexpb.FromLabelAdaptersToLabels(ts.Labels).Bytes(buf)) e, ok := exemplarResults[lbls] if !ok { exemplarResults[lbls] = ts diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index 645f9f70e42..fd1f3559db3 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -299,6 +299,7 @@ func (instantQueryCodec) MergeResponse(ctx context.Context, responses ...tripper func vectorMerge(resps []*PrometheusInstantQueryResponse) *Vector { output := map[string]*Sample{} + buf := make([]byte, 0, 1024) for _, resp := range resps { if resp == nil { continue @@ -313,7 +314,7 @@ func vectorMerge(resps []*PrometheusInstantQueryResponse) *Vector { if s == nil { continue } - metric := cortexpb.FromLabelAdaptersToLabels(sample.Labels).String() + metric := string(cortexpb.FromLabelAdaptersToLabels(sample.Labels).Bytes(buf)) if existingSample, ok := output[metric]; !ok { output[metric] = s } else if existingSample.GetSample().TimestampMs < s.GetSample().TimestampMs { diff --git a/pkg/querier/tripperware/merge.go b/pkg/querier/tripperware/merge.go index 30d811aa8ed..84dbb81afbc 100644 --- a/pkg/querier/tripperware/merge.go +++ b/pkg/querier/tripperware/merge.go @@ -8,8 +8,9 @@ import ( // MergeSampleStreams deduplicates sample streams using a map. func MergeSampleStreams(output map[string]SampleStream, sampleStreams []SampleStream) { + buf := make([]byte, 0, 1024) for _, stream := range sampleStreams { - metric := cortexpb.FromLabelAdaptersToLabels(stream.Labels).String() + metric := string(cortexpb.FromLabelAdaptersToLabels(stream.Labels).Bytes(buf)) existing, ok := output[metric] if !ok { existing = SampleStream{ diff --git a/pkg/querier/tripperware/merge_test.go b/pkg/querier/tripperware/merge_test.go index 561d47fdf94..2fa100ac244 100644 --- a/pkg/querier/tripperware/merge_test.go +++ b/pkg/querier/tripperware/merge_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/cortexproject/cortex/pkg/cortexpb" + ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" ) func TestMergeSampleStreams(t *testing.T) { @@ -34,7 +35,7 @@ func TestMergeSampleStreams(t *testing.T) { }, }, expectedOutput: map[string]SampleStream{ - lbls.String(): { + ingester_client.LabelsToKeyString(lbls): { Labels: cortexpb.FromLabelsToLabelAdapters(lbls), Samples: []cortexpb.Sample{ {Value: 0, TimestampMs: 0}, @@ -63,7 +64,7 @@ func TestMergeSampleStreams(t *testing.T) { }, }, expectedOutput: map[string]SampleStream{ - lbls.String(): { + ingester_client.LabelsToKeyString(lbls): { Labels: cortexpb.FromLabelsToLabelAdapters(lbls), Samples: []cortexpb.Sample{ {Value: 0, TimestampMs: 0}, @@ -109,7 +110,7 @@ func TestMergeSampleStreams(t *testing.T) { }, }, expectedOutput: map[string]SampleStream{ - lbls.String(): { + ingester_client.LabelsToKeyString(lbls): { Labels: cortexpb.FromLabelsToLabelAdapters(lbls), Samples: []cortexpb.Sample{ {Value: 0, TimestampMs: 0}, @@ -118,7 +119,7 @@ func TestMergeSampleStreams(t *testing.T) { {Value: 4, TimestampMs: 4}, }, }, - lbls1.String(): { + ingester_client.LabelsToKeyString(lbls1): { Labels: cortexpb.FromLabelsToLabelAdapters(lbls1), Samples: []cortexpb.Sample{ {Value: 0, TimestampMs: 0}, From dcc489170a55ab0155a76ee4386b6e3cfe357c0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 20 Jan 2023 22:34:36 +0200 Subject: [PATCH 047/133] frontend/transport: print Grafana specific headers in slow query log (#5042) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * frontend/transport: print Grafana specific headers in slow query log Print some Grafana specific headers in the slow query log if they exist. See this [pull request](https://github.com/grafana/grafana/pull/60301) for more info. Signed-off-by: Giedrius Statkevičius * frontend/transport: print Grafana data in query stats Signed-off-by: Giedrius Statkevičius * transport/handler: fix bug Signed-off-by: Giedrius Statkevičius Signed-off-by: Giedrius Statkevičius Signed-off-by: Alex Le --- pkg/frontend/transport/handler.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 794f61cf383..8add8ae82bc 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -172,8 +172,26 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } +func formatGrafanaStatsFields(r *http.Request) []interface{} { + grafanaDashboardUID := "-" + if dashboardUID := r.Header.Get("X-Dashboard-Uid"); dashboardUID != "" { + grafanaDashboardUID = dashboardUID + } + grafanaPanelID := "-" + if panelID := r.Header.Get("X-Panel-Id"); panelID != "" { + grafanaPanelID = panelID + } + + return []interface{}{ + "grafana_dashboard_uid", grafanaDashboardUID, + "grafana_panel_id", grafanaPanelID, + } +} + // reportSlowQuery reports slow queries. func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, queryResponseTime time.Duration) { + // NOTE(GiedriusS): see https://github.com/grafana/grafana/pull/60301 for more info. + logMessage := append([]interface{}{ "msg", "slow query detected", "method", r.Method, @@ -181,6 +199,7 @@ func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, query "path", r.URL.Path, "time_taken", queryResponseTime.String(), }, formatQueryString(queryString)...) + logMessage = append(logMessage, formatGrafanaStatsFields(r)...) level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) } @@ -217,6 +236,7 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer }, stats.LoadExtraFields()...) logMessage = append(logMessage, formatQueryString(queryString)...) + logMessage = append(logMessage, formatGrafanaStatsFields(r)...) if error != nil { s, ok := status.FromError(error) From 30c6e4c78d46c60a54aaa14dd787134e32a03dae Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Fri, 20 Jan 2023 19:07:59 -0800 Subject: [PATCH 048/133] Fix Flaky Test (#5100) * add queryIngestersWithin so we know which test is failing on the ci * giving a little more wigle room Signed-off-by: Alex Le --- pkg/querier/querier_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 8ed80d03a6c..8a4f8a9312f 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -418,7 +418,7 @@ func TestNoHistoricalQueryToIngester(t *testing.T) { { name: "hit-test2", mint: time.Now().Add(-5 * time.Hour), - maxt: time.Now().Add(-59 * time.Minute), + maxt: time.Now().Add(-55 * time.Minute), hitIngester: true, queryIngestersWithin: 1 * time.Hour, }, @@ -467,7 +467,7 @@ func TestNoHistoricalQueryToIngester(t *testing.T) { } for _, c := range testCases { cfg.QueryIngestersWithin = c.queryIngestersWithin - t.Run(fmt.Sprintf("IngesterStreaming=%t,thanosEngine=%t,test=%s", cfg.IngesterStreaming, thanosEngine, c.name), func(t *testing.T) { + t.Run(fmt.Sprintf("IngesterStreaming=%t,thanosEngine=%t,queryIngestersWithin=%v, test=%s", cfg.IngesterStreaming, thanosEngine, c.queryIngestersWithin, c.name), func(t *testing.T) { chunkStore, _ := makeMockChunkStore(t, 24, encodings[0].e) distributor := &errDistributor{} From 41becac6c8afde6c432caf95ce738793ea22cdef Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 23 Jan 2023 21:10:16 -0800 Subject: [PATCH 049/133] Update promql engine to fix stats panic (#5099) * update promql engine Signed-off-by: Ben Ye * update engine to latest main Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- go.mod | 4 +- go.sum | 8 +- .../efficientgo/core/testutil/testutil.go | 59 +++++-- .../promql-engine/api/remote.go | 3 + .../promql-engine/engine/engine.go | 146 ++++++++++++++---- .../execution/exchange/coalesce.go | 89 ++++++----- .../promql-engine/execution/exchange/dedup.go | 141 +++++++++++++++++ .../promql-engine/execution/execution.go | 17 +- .../promql-engine/logicalplan/distribute.go | 88 ++++++++--- .../promql-engine/logicalplan/plan.go | 6 +- vendor/modules.txt | 4 +- 11 files changed, 448 insertions(+), 117 deletions(-) create mode 100644 vendor/github.com/thanos-community/promql-engine/execution/exchange/dedup.go diff --git a/go.mod b/go.mod index 4567fd3f33d..2f6e40f981d 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.0 - github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 + github.com/efficientgo/core v1.0.0-rc.2 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/felixge/fgprof v0.9.3 github.com/go-kit/log v0.2.1 @@ -49,7 +49,7 @@ require ( github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 - github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389 + github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a github.com/uber/jaeger-client-go v2.30.0+incompatible diff --git a/go.sum b/go.sum index d73a45131d2..856173871a5 100644 --- a/go.sum +++ b/go.sum @@ -510,8 +510,8 @@ github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 h1:rydBwnBoywKQMjWF0z8SriYtQ+uUcaFsxuijMjJr5PI= -github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4/go.mod h1:kQa0V74HNYMfuJH6jiPiwNdpWXl4xd/K4tzlrcvYDQI= +github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= +github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -1507,8 +1507,8 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389 h1:ZER0Tf+2PnNEd4bcOJFDQ48dOraRyvu5tfYWySMS2S4= -github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389/go.mod h1:GJkKOtKfXos1xbTmHBnX3YTCov8enxdAS1woR6/h4CI= +github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 h1:54iqf5p40TFGTc2Dp791P1/ncO2sTqvXdMLXqNpC/Gc= +github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4/go.mod h1:d52Wfzxs6L3xhc2snodyWvM2bZMMVn0XT2q4vsfBPVo= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a h1:oN3VupYNkavPRvdXwq71p54SAFSbOGvL0qL7CeKFrJ0= diff --git a/vendor/github.com/efficientgo/core/testutil/testutil.go b/vendor/github.com/efficientgo/core/testutil/testutil.go index c74f368507b..c88191bc10d 100644 --- a/vendor/github.com/efficientgo/core/testutil/testutil.go +++ b/vendor/github.com/efficientgo/core/testutil/testutil.go @@ -14,8 +14,19 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/efficientgo/core/errors" "github.com/efficientgo/core/testutil/internal" + "github.com/google/go-cmp/cmp" ) +const limitOfElemChars = 1e3 + +func withLimitf(f string, v ...interface{}) string { + s := fmt.Sprintf(f, v...) + if len(s) > limitOfElemChars { + return s[:limitOfElemChars] + "...(output trimmed)" + } + return s +} + // Assert fails the test if the condition is false. func Assert(tb testing.TB, condition bool, v ...interface{}) { tb.Helper() @@ -28,7 +39,7 @@ func Assert(tb testing.TB, condition bool, v ...interface{}) { if len(v) > 0 { msg = fmt.Sprintf(v[0].(string), v[1:]...) } - tb.Fatalf("\033[31m%s:%d: "+msg+"\033[39m\n\n", filepath.Base(file), line) + tb.Fatalf("\033[31m%s:%d: \"%s\"\033[39m\n\n", filepath.Base(file), line, withLimitf(msg)) } // Ok fails the test if an err is not nil. @@ -43,7 +54,7 @@ func Ok(tb testing.TB, err error, v ...interface{}) { if len(v) > 0 { msg = fmt.Sprintf(v[0].(string), v[1:]...) } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.Fatalf("\033[31m%s:%d: \"%s\"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, withLimitf(msg), withLimitf(err.Error())) } // NotOk fails the test if an err is nil. @@ -58,7 +69,7 @@ func NotOk(tb testing.TB, err error, v ...interface{}) { if len(v) > 0 { msg = fmt.Sprintf(v[0].(string), v[1:]...) } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line) + tb.Fatalf("\033[31m%s:%d: \"%s\"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line, withLimitf(msg)) } // Equals fails the test if exp is not equal to act. @@ -67,13 +78,41 @@ func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { if reflect.DeepEqual(exp, act) { return } - _, file, line, _ := runtime.Caller(1) + fatalNotEqual(tb, exp, act, v...) +} + +func fatalNotEqual(tb testing.TB, exp, act interface{}, v ...interface{}) { + _, file, line, _ := runtime.Caller(2) var msg string if len(v) > 0 { msg = fmt.Sprintf(v[0].(string), v[1:]...) } - tb.Fatal(sprintfWithLimit("\033[31m%s:%d:"+msg+"\n\n\texp: %#v\n\n\tgot: %#v%s\033[39m\n\n", filepath.Base(file), line, exp, act, diff(exp, act))) + tb.Fatalf( + "\033[31m%s:%d: \"%s\"\n\n\texp: %s\n\n\tgot: %s%s\033[39m\n\n", + filepath.Base(file), line, withLimitf(msg), withLimitf("%#v", exp), withLimitf("%#v", act), withLimitf(diff(exp, act)), + ) +} + +type goCmp struct { + opts cmp.Options +} + +// WithGoCmp allows specifying options and using https://github.com/google/go-cmp +// for equality comparisons. The compatibility guarantee of this function's arguments +// are the same as go-cmp (no guarantee due to v0.x). +func WithGoCmp(opts ...cmp.Option) goCmp { + return goCmp{opts: opts} +} + +// Equals uses go-cmp for comparing equality between two structs, and can be used with +// various options defined in go-cmp/cmp and go-cmp/cmp/cmpopts. +func (o goCmp) Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { + tb.Helper() + if cmp.Equal(exp, act, o.opts) { + return + } + fatalNotEqual(tb, exp, act, v...) } // FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. @@ -98,7 +137,7 @@ func ContainsStringSlice(tb testing.TB, haystack, needle []string) { _, file, line, _ := runtime.Caller(1) if !contains(haystack, needle) { - tb.Fatalf(sprintfWithLimit("\033[31m%s:%d: %#v does not contain %#v\033[39m\n\n", filepath.Base(file), line, haystack, needle)) + tb.Fatalf("\033[31m%s:%d: %s does not contain %s\033[39m\n\n", filepath.Base(file), line, withLimitf("%#v", haystack), withLimitf("%#v", needle)) } } @@ -138,14 +177,6 @@ func contains(haystack, needle []string) bool { return false } -func sprintfWithLimit(act string, v ...interface{}) string { - s := fmt.Sprintf(act, v...) - if len(s) > 10000 { - return s[:10000] + "...(output trimmed)" - } - return s -} - func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { t := reflect.TypeOf(v) k := t.Kind() diff --git a/vendor/github.com/thanos-community/promql-engine/api/remote.go b/vendor/github.com/thanos-community/promql-engine/api/remote.go index 2f8c9f2c146..d93ade96046 100644 --- a/vendor/github.com/thanos-community/promql-engine/api/remote.go +++ b/vendor/github.com/thanos-community/promql-engine/api/remote.go @@ -6,6 +6,7 @@ package api import ( "time" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" ) @@ -14,6 +15,8 @@ type RemoteEndpoints interface { } type RemoteEngine interface { + MaxT() int64 + LabelSets() []labels.Labels NewInstantQuery(opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) NewRangeQuery(opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) } diff --git a/vendor/github.com/thanos-community/promql-engine/engine/engine.go b/vendor/github.com/thanos-community/promql-engine/engine/engine.go index 399c0c55c3b..55f25b4c28f 100644 --- a/vendor/github.com/thanos-community/promql-engine/engine/engine.go +++ b/vendor/github.com/thanos-community/promql-engine/engine/engine.go @@ -6,6 +6,7 @@ package engine import ( "context" + "github.com/prometheus/prometheus/model/labels" v1 "github.com/prometheus/prometheus/web/api/v1" "io" @@ -63,29 +64,41 @@ func (o Opts) getLogicalOptimizers() []logicalplan.Optimizer { return o.LogicalOptimizers } -type localEngine struct { - q storage.Queryable - engine *compatibilityEngine +type remoteEngine struct { + q storage.Queryable + engine *compatibilityEngine + labelSets []labels.Labels + maxt int64 } -func NewLocalEngine(opts Opts, q storage.Queryable) *localEngine { - return &localEngine{ - q: q, - engine: New(opts), +func NewRemoteEngine(opts Opts, q storage.Queryable, maxt int64, labelSets []labels.Labels) *remoteEngine { + return &remoteEngine{ + q: q, + labelSets: labelSets, + maxt: maxt, + engine: New(opts), } } -func (l localEngine) NewInstantQuery(opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { +func (l remoteEngine) MaxT() int64 { + return l.maxt +} + +func (l remoteEngine) LabelSets() []labels.Labels { + return l.labelSets +} + +func (l remoteEngine) NewInstantQuery(opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { return l.engine.NewInstantQuery(l.q, opts, qs, ts) } -func (l localEngine) NewRangeQuery(opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { +func (l remoteEngine) NewRangeQuery(opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { return l.engine.NewRangeQuery(l.q, opts, qs, start, end, interval) } type distributedEngine struct { - endpoints api.RemoteEndpoints - localEngine *compatibilityEngine + endpoints api.RemoteEndpoints + remoteEngine *compatibilityEngine } func NewDistributedEngine(opts Opts, endpoints api.RemoteEndpoints) v1.QueryEngine { @@ -94,19 +107,19 @@ func NewDistributedEngine(opts Opts, endpoints api.RemoteEndpoints) v1.QueryEngi logicalplan.DistributedExecutionOptimizer{Endpoints: endpoints}, ) return &distributedEngine{ - endpoints: endpoints, - localEngine: New(opts), + endpoints: endpoints, + remoteEngine: New(opts), } } func (l distributedEngine) SetQueryLogger(log promql.QueryLogger) {} func (l distributedEngine) NewInstantQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { - return l.localEngine.NewInstantQuery(q, opts, qs, ts) + return l.remoteEngine.NewInstantQuery(q, opts, qs, ts) } func (l distributedEngine) NewRangeQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { - return l.localEngine.NewRangeQuery(q, opts, qs, start, end, interval) + return l.remoteEngine.NewRangeQuery(q, opts, qs, start, end, interval) } func New(opts Opts) *compatibilityEngine { @@ -174,11 +187,12 @@ func (e *compatibilityEngine) NewInstantQuery(q storage.Queryable, opts *promql. } return &compatibilityQuery{ - Query: &Query{exec: exec}, - engine: e, - expr: expr, - ts: ts, - t: InstantQuery, + Query: &Query{exec: exec, opts: opts}, + engine: e, + expr: expr, + ts: ts, + t: InstantQuery, + resultSort: newResultSort(expr), }, nil } @@ -211,7 +225,7 @@ func (e *compatibilityEngine) NewRangeQuery(q storage.Queryable, opts *promql.Qu } return &compatibilityQuery{ - Query: &Query{exec: exec}, + Query: &Query{exec: exec, opts: opts}, engine: e, expr: expr, t: RangeQuery, @@ -220,6 +234,7 @@ func (e *compatibilityEngine) NewRangeQuery(q storage.Queryable, opts *promql.Qu type Query struct { exec model.VectorOperator + opts *promql.QueryOpts } // Explain returns human-readable explanation of the created executor. @@ -232,12 +247,83 @@ func (q *Query) Profile() { // TODO(bwplotka): Return profile. } +type sortOrder bool + +const ( + sortOrderAsc sortOrder = false + sortOrderDesc sortOrder = true +) + +type resultSort struct { + sortByValues bool + sortOrder sortOrder + sortingLabels []string + groupBy bool +} + +func newResultSort(expr parser.Expr) resultSort { + aggr, ok := expr.(*parser.AggregateExpr) + if !ok { + return resultSort{} + } + + switch aggr.Op { + case parser.TOPK: + return resultSort{ + sortByValues: true, + sortingLabels: aggr.Grouping, + sortOrder: sortOrderDesc, + groupBy: !aggr.Without, + } + case parser.BOTTOMK: + return resultSort{ + sortByValues: true, + sortingLabels: aggr.Grouping, + sortOrder: sortOrderAsc, + groupBy: !aggr.Without, + } + default: + return resultSort{} + } +} + +func (s resultSort) comparer(samples *promql.Vector) func(i int, j int) bool { + return func(i int, j int) bool { + if !s.sortByValues { + return i < j + } + + var iLbls labels.Labels + var jLbls labels.Labels + iLb := labels.NewBuilder((*samples)[i].Metric) + jLb := labels.NewBuilder((*samples)[j].Metric) + if s.groupBy { + iLbls = iLb.Keep(s.sortingLabels...).Labels(nil) + jLbls = jLb.Keep(s.sortingLabels...).Labels(nil) + } else { + iLbls = iLb.Del(s.sortingLabels...).Labels(nil) + jLbls = jLb.Del(s.sortingLabels...).Labels(nil) + } + + lblsCmp := labels.Compare(iLbls, jLbls) + if lblsCmp != 0 { + return lblsCmp < 0 + } + + if s.sortOrder == sortOrderAsc { + return (*samples)[i].V < (*samples)[j].V + } + return (*samples)[i].V > (*samples)[j].V + } +} + type compatibilityQuery struct { *Query - engine *compatibilityEngine - expr parser.Expr - ts time.Time // Empty for range queries. - t QueryType + engine *compatibilityEngine + expr parser.Expr + ts time.Time // Empty for range queries. + t QueryType + resultSort resultSort cancel context.CancelFunc } @@ -351,6 +437,7 @@ loop: }, }) } + sort.Slice(vector, q.resultSort.comparer(&vector)) result = vector case parser.ValueTypeScalar: v := math.NaN() @@ -378,7 +465,14 @@ func newErrResult(r *promql.Result, err error) *promql.Result { func (q *compatibilityQuery) Statement() parser.Statement { return nil } -func (q *compatibilityQuery) Stats() *stats.Statistics { return &stats.Statistics{} } +// Stats always returns empty query stats for now to avoid panic. +func (q *compatibilityQuery) Stats() *stats.Statistics { + var enablePerStepStats bool + if q.opts != nil { + enablePerStepStats = q.opts.EnablePerStepStats + } + return &stats.Statistics{Timers: stats.NewQueryTimers(), Samples: stats.NewQuerySamples(enablePerStepStats)} +} func (q *compatibilityQuery) Close() { q.Cancel() } diff --git a/vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go b/vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go index 23b45ae86ef..8e5cc15cfbf 100644 --- a/vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go +++ b/vendor/github.com/thanos-community/promql-engine/execution/exchange/coalesce.go @@ -6,6 +6,7 @@ package exchange import ( "context" "sync" + "sync/atomic" "github.com/efficientgo/core/errors" "github.com/prometheus/prometheus/model/labels" @@ -25,34 +26,42 @@ func (c errorChan) getError() error { return nil } -type coalesceOperator struct { +// coalesce is a model.VectorOperator that merges input vectors from multiple downstream operators +// into a single output vector. +// coalesce guarantees that samples from different input vectors will be added to the output in the same order +// as the input vectors themselves are provided in NewCoalesce. +type coalesce struct { once sync.Once series []labels.Labels - pool *model.VectorPool - mu sync.Mutex - wg sync.WaitGroup - operators []model.VectorOperator + pool *model.VectorPool + wg sync.WaitGroup + operators []model.VectorOperator + + // inVectors is an internal per-step cache for references to input vectors. + inVectors [][]model.StepVector + // sampleOffsets holds per-operator offsets needed to map an input sample ID to an output sample ID. sampleOffsets []uint64 } func NewCoalesce(pool *model.VectorPool, operators ...model.VectorOperator) model.VectorOperator { - return &coalesceOperator{ + return &coalesce{ pool: pool, - operators: operators, sampleOffsets: make([]uint64, len(operators)), + operators: operators, + inVectors: make([][]model.StepVector, len(operators)), } } -func (c *coalesceOperator) Explain() (me string, next []model.VectorOperator) { - return "[*coalesceOperator]", c.operators +func (c *coalesce) Explain() (me string, next []model.VectorOperator) { + return "[*coalesce]", c.operators } -func (c *coalesceOperator) GetPool() *model.VectorPool { +func (c *coalesce) GetPool() *model.VectorPool { return c.pool } -func (c *coalesceOperator) Series(ctx context.Context) ([]labels.Labels, error) { +func (c *coalesce) Series(ctx context.Context) ([]labels.Labels, error) { var err error c.once.Do(func() { err = c.loadSeries(ctx) }) if err != nil { @@ -61,7 +70,7 @@ func (c *coalesceOperator) Series(ctx context.Context) ([]labels.Labels, error) return c.series, nil } -func (c *coalesceOperator) Next(ctx context.Context) ([]model.StepVector, error) { +func (c *coalesce) Next(ctx context.Context) ([]model.StepVector, error) { select { case <-ctx.Done(): return nil, ctx.Err() @@ -74,7 +83,6 @@ func (c *coalesceOperator) Next(ctx context.Context) ([]model.StepVector, error) return nil, err } - var out []model.StepVector = nil var errChan = make(errorChan, len(c.operators)) for idx, o := range c.operators { c.wg.Add(1) @@ -86,36 +94,14 @@ func (c *coalesceOperator) Next(ctx context.Context) ([]model.StepVector, error) errChan <- err return } - if in == nil { - return - } + // Map input IDs to output IDs. for _, vector := range in { for i := range vector.SampleIDs { vector.SampleIDs[i] += c.sampleOffsets[opIdx] } } - - c.mu.Lock() - defer c.mu.Unlock() - - if len(in) > 0 && out == nil { - out = c.pool.GetVectorBatch() - for i := 0; i < len(in); i++ { - out = append(out, c.pool.GetStepVector(in[i].T)) - } - } - - for i := 0; i < len(in); i++ { - if len(in[i].Samples) > 0 { - out[i].T = in[i].T - } - - out[i].Samples = append(out[i].Samples, in[i].Samples...) - out[i].SampleIDs = append(out[i].SampleIDs, in[i].SampleIDs...) - o.GetPool().PutStepVector(in[i]) - } - o.GetPool().PutVectors(in) + c.inVectors[opIdx] = in }(idx, o) } c.wg.Wait() @@ -125,6 +111,24 @@ func (c *coalesceOperator) Next(ctx context.Context) ([]model.StepVector, error) return nil, err } + var out []model.StepVector = nil + for opIdx, vector := range c.inVectors { + if len(vector) > 0 && out == nil { + out = c.pool.GetVectorBatch() + for i := 0; i < len(vector); i++ { + out = append(out, c.pool.GetStepVector(vector[i].T)) + } + } + + for i := 0; i < len(vector); i++ { + out[i].Samples = append(out[i].Samples, vector[i].Samples...) + out[i].SampleIDs = append(out[i].SampleIDs, vector[i].SampleIDs...) + c.operators[opIdx].GetPool().PutStepVector(vector[i]) + } + c.inVectors[opIdx] = nil + c.operators[opIdx].GetPool().PutVectors(vector) + } + if out == nil { return nil, nil } @@ -132,9 +136,8 @@ func (c *coalesceOperator) Next(ctx context.Context) ([]model.StepVector, error) return out, nil } -func (c *coalesceOperator) loadSeries(ctx context.Context) error { +func (c *coalesce) loadSeries(ctx context.Context) error { var wg sync.WaitGroup - var mu sync.Mutex var numSeries uint64 allSeries := make([][]labels.Labels, len(c.operators)) errChan := make(errorChan, len(c.operators)) @@ -161,9 +164,7 @@ func (c *coalesceOperator) loadSeries(ctx context.Context) error { } allSeries[i] = series - mu.Lock() - numSeries += uint64(len(series)) - mu.Unlock() + atomic.AddUint64(&numSeries, uint64(len(series))) }(i) } wg.Wait() @@ -172,13 +173,11 @@ func (c *coalesceOperator) loadSeries(ctx context.Context) error { return err } - var offset uint64 c.sampleOffsets = make([]uint64, len(c.operators)) c.series = make([]labels.Labels, 0, numSeries) for i, series := range allSeries { - c.sampleOffsets[i] = offset + c.sampleOffsets[i] = uint64(len(c.series)) c.series = append(c.series, series...) - offset += uint64(len(series)) } c.pool.SetStepSize(len(c.series)) diff --git a/vendor/github.com/thanos-community/promql-engine/execution/exchange/dedup.go b/vendor/github.com/thanos-community/promql-engine/execution/exchange/dedup.go new file mode 100644 index 00000000000..517aed72ab3 --- /dev/null +++ b/vendor/github.com/thanos-community/promql-engine/execution/exchange/dedup.go @@ -0,0 +1,141 @@ +// Copyright (c) The Thanos Community Authors. +// Licensed under the Apache License 2.0. + +package exchange + +import ( + "context" + "sync" + + "github.com/prometheus/prometheus/model/labels" + + "github.com/thanos-community/promql-engine/execution/model" +) + +type dedupSample struct { + t int64 + v float64 +} + +// The dedupCache is an internal cache used to deduplicate samples inside a single step vector. +type dedupCache []dedupSample + +// dedupOperator is a model.VectorOperator that deduplicates samples with +// same IDs inside a single model.StepVector. +// Deduplication is done using a last-sample-wins strategy, which means that +// if multiple samples with the same ID are present in a StepVector, dedupOperator +// will keep the last sample in that vector. +type dedupOperator struct { + once sync.Once + series []labels.Labels + + pool *model.VectorPool + next model.VectorOperator + // outputIndex is a slice that is used as an index from input sample ID to output sample ID. + outputIndex []uint64 + dedupCache dedupCache +} + +func NewDedupOperator(pool *model.VectorPool, next model.VectorOperator) model.VectorOperator { + return &dedupOperator{ + next: next, + pool: pool, + } +} + +func (d *dedupOperator) Next(ctx context.Context) ([]model.StepVector, error) { + var err error + d.once.Do(func() { err = d.loadSeries(ctx) }) + if err != nil { + return nil, err + } + + in, err := d.next.Next(ctx) + if err != nil { + return nil, err + } + if in == nil { + return nil, nil + } + + result := d.pool.GetVectorBatch() + for _, vector := range in { + for i, inputSampleID := range vector.SampleIDs { + outputSampleID := d.outputIndex[inputSampleID] + d.dedupCache[outputSampleID].t = vector.T + d.dedupCache[outputSampleID].v = vector.Samples[i] + } + + out := d.pool.GetStepVector(vector.T) + for outputSampleID, sample := range d.dedupCache { + // To avoid clearing the dedup cache for each step vector, we use the `t` field + // to detect whether a sample for the current step should be mapped to the output. + // If the timestamp of the sample does not match the input vector timestamp, it means that + // the sample was added in a previous iteration and should be skipped. + if sample.t == vector.T { + out.SampleIDs = append(out.SampleIDs, uint64(outputSampleID)) + out.Samples = append(out.Samples, sample.v) + } + } + result = append(result, out) + } + + return result, nil +} + +func (d *dedupOperator) Series(ctx context.Context) ([]labels.Labels, error) { + var err error + d.once.Do(func() { err = d.loadSeries(ctx) }) + if err != nil { + return nil, err + } + return d.series, nil +} + +func (d *dedupOperator) GetPool() *model.VectorPool { + return d.pool +} + +func (d *dedupOperator) Explain() (me string, next []model.VectorOperator) { + return "[*dedup]", []model.VectorOperator{d.next} +} + +func (d *dedupOperator) loadSeries(ctx context.Context) error { + series, err := d.next.Series(ctx) + if err != nil { + return err + } + + outputIndex := make(map[uint64]uint64) + inputIndex := make([]uint64, len(series)) + hashBuf := make([]byte, 0, 128) + for inputSeriesID, inputSeries := range series { + hash := hashSeries(hashBuf, inputSeries) + + inputIndex[inputSeriesID] = hash + outputSeriesID, ok := outputIndex[hash] + if !ok { + outputSeriesID = uint64(len(d.series)) + d.series = append(d.series, inputSeries) + } + outputIndex[hash] = outputSeriesID + } + + d.outputIndex = make([]uint64, len(inputIndex)) + for inputSeriesID, hash := range inputIndex { + outputSeriesID := outputIndex[hash] + d.outputIndex[inputSeriesID] = outputSeriesID + } + d.dedupCache = make(dedupCache, len(outputIndex)) + for i := range d.dedupCache { + d.dedupCache[i].t = -1 + } + + return nil +} + +func hashSeries(hashBuf []byte, inputSeries labels.Labels) uint64 { + hashBuf = hashBuf[:0] + hash, _ := inputSeries.HashWithoutLabels(hashBuf) + return hash +} diff --git a/vendor/github.com/thanos-community/promql-engine/execution/execution.go b/vendor/github.com/thanos-community/promql-engine/execution/execution.go index c563d1bca2a..28ff04ca4d7 100644 --- a/vendor/github.com/thanos-community/promql-engine/execution/execution.go +++ b/vendor/github.com/thanos-community/promql-engine/execution/execution.go @@ -18,6 +18,7 @@ package execution import ( "runtime" + "sort" "time" "github.com/prometheus/prometheus/promql" @@ -236,7 +237,15 @@ func newOperator(expr parser.Expr, storage *engstore.SelectorPool, opts *query.O } return step_invariant.NewStepInvariantOperator(model.NewVectorPool(stepsBatch), next, e.Expr, opts, stepsBatch) - case logicalplan.Coalesce: + case logicalplan.Deduplicate: + // The Deduplicate operator will deduplicate samples using a last-sample-wins strategy. + // Sorting engines by MaxT ensures that samples produced due to + // staleness will be overwritten and corrected by samples coming from + // engines with a higher max time. + sort.Slice(e.Expressions, func(i, j int) bool { + return e.Expressions[i].Engine.MaxT() < e.Expressions[j].Engine.MaxT() + }) + operators := make([]model.VectorOperator, len(e.Expressions)) for i, expr := range e.Expressions { operator, err := newOperator(expr, storage, opts, hints) @@ -245,9 +254,11 @@ func newOperator(expr parser.Expr, storage *engstore.SelectorPool, opts *query.O } operators[i] = operator } - return exchange.NewCoalesce(model.NewVectorPool(stepsBatch), operators...), nil + coalesce := exchange.NewCoalesce(model.NewVectorPool(stepsBatch), operators...) + dedup := exchange.NewDedupOperator(model.NewVectorPool(stepsBatch), coalesce) + return exchange.NewConcurrent(dedup, 2), nil - case *logicalplan.RemoteExecution: + case logicalplan.RemoteExecution: qry, err := e.Engine.NewRangeQuery(&promql.QueryOpts{}, e.Query, opts.Start, opts.End, opts.Step) if err != nil { return nil, err diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go index d332e7c5235..36e9af15120 100644 --- a/vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/distribute.go @@ -5,28 +5,26 @@ package logicalplan import ( "fmt" + "sort" + "strings" "github.com/prometheus/prometheus/promql/parser" "github.com/thanos-community/promql-engine/api" ) -type Coalesce struct { - Expressions parser.Expressions -} +type RemoteExecutions []RemoteExecution -func (r Coalesce) String() string { - return fmt.Sprintf("coalesce(%s)", r.Expressions) +func (rs RemoteExecutions) String() string { + parts := make([]string, len(rs)) + for i, r := range rs { + parts[i] = r.String() + } + return strings.Join(parts, ", ") } -func (r Coalesce) Pretty(level int) string { return r.String() } - -func (r Coalesce) PositionRange() parser.PositionRange { return parser.PositionRange{} } - -func (r Coalesce) Type() parser.ValueType { return parser.ValueTypeMatrix } - -func (r Coalesce) PromQLExpr() {} - +// RemoteExecution is a logical plan that describes a +// remote execution of a Query against the given PromQL Engine. type RemoteExecution struct { Engine api.RemoteEngine Query string @@ -44,6 +42,25 @@ func (r RemoteExecution) Type() parser.ValueType { return parser.ValueTypeMatrix func (r RemoteExecution) PromQLExpr() {} +// Deduplicate is a logical plan which deduplicates samples from multiple RemoteExecutions. +type Deduplicate struct { + Expressions RemoteExecutions +} + +func (r Deduplicate) String() string { + return fmt.Sprintf("dedup(%s)", r.Expressions.String()) +} + +func (r Deduplicate) Pretty(level int) string { return r.String() } + +func (r Deduplicate) PositionRange() parser.PositionRange { return parser.PositionRange{} } + +func (r Deduplicate) Type() parser.ValueType { return parser.ValueTypeMatrix } + +func (r Deduplicate) PromQLExpr() {} + +// distributiveAggregations are all PromQL aggregations which support +// distributed execution. var distributiveAggregations = map[parser.ItemType]struct{}{ parser.SUM: {}, parser.MIN: {}, @@ -62,6 +79,7 @@ type DistributedExecutionOptimizer struct { func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr) parser.Expr { engines := m.Endpoints.Engines() + traverseBottomUp(nil, &plan, func(parent, current *parser.Expr) (stop bool) { // If the current operation is not distributive, stop the traversal. if !isDistributive(current) { @@ -75,7 +93,9 @@ func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr) parser.Expr { if aggr.Op == parser.COUNT { localAggregation = parser.SUM } - subQueries := m.makeSubQueries(current, engines) + + remoteAggregation := newRemoteAggregation(aggr, engines) + subQueries := m.makeSubQueries(&remoteAggregation, engines) *current = &parser.AggregateExpr{ Op: localAggregation, Expr: subQueries, @@ -99,17 +119,47 @@ func (m DistributedExecutionOptimizer) Optimize(plan parser.Expr) parser.Expr { return plan } -func (m DistributedExecutionOptimizer) makeSubQueries(current *parser.Expr, engines []api.RemoteEngine) Coalesce { - remoteQueries := Coalesce{ - Expressions: make(parser.Expressions, len(engines)), +func newRemoteAggregation(rootAggregation *parser.AggregateExpr, engines []api.RemoteEngine) parser.Expr { + groupingSet := make(map[string]struct{}) + for _, lbl := range rootAggregation.Grouping { + groupingSet[lbl] = struct{}{} } + + for _, engine := range engines { + for _, lbls := range engine.LabelSets() { + for _, lbl := range lbls { + if rootAggregation.Without { + delete(groupingSet, lbl.Name) + } else { + groupingSet[lbl.Name] = struct{}{} + } + } + } + } + + groupingLabels := make([]string, 0, len(groupingSet)) + for lbl := range groupingSet { + groupingLabels = append(groupingLabels, lbl) + } + sort.Strings(groupingLabels) + + remoteAggregation := *rootAggregation + remoteAggregation.Grouping = groupingLabels + return &remoteAggregation +} + +func (m DistributedExecutionOptimizer) makeSubQueries(current *parser.Expr, engines []api.RemoteEngine) Deduplicate { + remoteQueries := make(RemoteExecutions, len(engines)) for i := 0; i < len(engines); i++ { - remoteQueries.Expressions[i] = &RemoteExecution{ + remoteQueries[i] = RemoteExecution{ Engine: engines[i], Query: (*current).String(), } } - return remoteQueries + + return Deduplicate{ + Expressions: remoteQueries, + } } func isDistributive(expr *parser.Expr) bool { diff --git a/vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go b/vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go index 8d8720d3013..682a86d299f 100644 --- a/vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go +++ b/vendor/github.com/thanos-community/promql-engine/logicalplan/plan.go @@ -84,6 +84,8 @@ func traverse(expr *parser.Expr, transform func(*parser.Expr)) { func traverseBottomUp(parent *parser.Expr, current *parser.Expr, transform func(parent *parser.Expr, node *parser.Expr) bool) bool { switch node := (*current).(type) { + case *parser.NumberLiteral: + return false case *parser.StepInvariantExpr: return traverseBottomUp(current, &node.Expr, transform) case *parser.VectorSelector: @@ -96,8 +98,8 @@ func traverseBottomUp(parent *parser.Expr, current *parser.Expr, transform func( } return transform(parent, current) case *parser.Call: - for _, n := range node.Args { - if stop := traverseBottomUp(current, &n, transform); stop { + for i := range node.Args { + if stop := traverseBottomUp(current, &node.Args[i], transform); stop { return stop } } diff --git a/vendor/modules.txt b/vendor/modules.txt index c17d9e2ebc8..4c82195c0b9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -265,7 +265,7 @@ github.com/dustin/go-humanize # github.com/edsrzf/mmap-go v1.1.0 ## explicit; go 1.17 github.com/edsrzf/mmap-go -# github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 +# github.com/efficientgo/core v1.0.0-rc.2 ## explicit; go 1.17 github.com/efficientgo/core/errcapture github.com/efficientgo/core/errors @@ -792,7 +792,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-community/promql-engine v0.0.0-20230111121531-c293f65f5389 +# github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 ## explicit; go 1.19 github.com/thanos-community/promql-engine/api github.com/thanos-community/promql-engine/engine From 43a417d32afebf3d3e20a39059c876f91f89cb00 Mon Sep 17 00:00:00 2001 From: Daniel Blando Date: Tue, 24 Jan 2023 10:51:44 -0800 Subject: [PATCH 050/133] Add dynamodb multikey kv (#5026) * Add dynamodb multikey kv Signed-off-by: Daniel Deluiggi * Reorganize import Signed-off-by: Daniel Deluiggi * Rework test Signed-off-by: Daniel Deluiggi * Change ddb CAS to use batch Signed-off-by: Daniel Deluiggi * Use slice to batch writeRequests Signed-off-by: Daniel Deluiggi * Change slice usage Signed-off-by: Daniel Deluiggi Signed-off-by: Daniel Deluiggi Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/blocks-storage/compactor.md | 13 + docs/blocks-storage/store-gateway.md | 13 + docs/configuration/config-file-reference.md | 91 + go.mod | 2 +- integration/kv_test.go | 8 + pkg/ring/http.go | 19 +- pkg/ring/kv/client.go | 16 +- pkg/ring/kv/codec/clonable.go | 6 + pkg/ring/kv/codec/codec.go | 63 +- pkg/ring/kv/codec/codec_test.go | 132 + pkg/ring/kv/codec/multikey.go | 25 + pkg/ring/kv/consul/client.go | 4 + pkg/ring/kv/consul/mock.go | 4 + pkg/ring/kv/dynamodb/client.go | 324 + pkg/ring/kv/dynamodb/client_test.go | 389 + pkg/ring/kv/dynamodb/dynamodb.go | 250 + pkg/ring/kv/dynamodb/dynamodb_test.go | 172 + pkg/ring/kv/dynamodb/metrics.go | 99 + pkg/ring/kv/etcd/etcd.go | 4 + pkg/ring/kv/memberlist/memberlist_client.go | 8 +- .../kv/memberlist/memberlist_client_test.go | 20 +- pkg/ring/kv/memberlist/mergeable.go | 15 +- pkg/ring/kv/metrics.go | 5 + pkg/ring/kv/mock.go | 5 + pkg/ring/kv/multi.go | 5 + pkg/ring/kv/prefix.go | 5 + pkg/ring/lifecycler.go | 8 +- pkg/ring/lifecycler_test.go | 4 + pkg/ring/model.go | 106 +- pkg/ring/model_test.go | 184 + pkg/ring/replication_strategy.go | 17 +- pkg/ring/replication_strategy_test.go | 4 +- pkg/ring/ring.go | 13 +- pkg/ring/ring_test.go | 22 +- pkg/ring/util_test.go | 1 + .../aws/aws-sdk-go/aws/crr/cache.go | 122 + .../aws/aws-sdk-go/aws/crr/endpoint.go | 132 + .../aws/aws-sdk-go/aws/crr/sync_map.go | 30 + .../aws/aws-sdk-go/aws/crr/sync_map_1_8.go | 49 + .../aws/aws-sdk-go/service/dynamodb/api.go | 26975 ++++++++++++++++ .../service/dynamodb/customizations.go | 98 + .../aws/aws-sdk-go/service/dynamodb/doc.go | 45 + .../aws-sdk-go/service/dynamodb/doc_custom.go | 27 + .../dynamodb/dynamodbiface/interface.go | 303 + .../aws/aws-sdk-go/service/dynamodb/errors.go | 347 + .../aws-sdk-go/service/dynamodb/service.go | 112 + .../aws-sdk-go/service/dynamodb/waiters.go | 107 + vendor/modules.txt | 3 + 49 files changed, 30350 insertions(+), 57 deletions(-) create mode 100644 pkg/ring/kv/codec/clonable.go create mode 100644 pkg/ring/kv/codec/codec_test.go create mode 100644 pkg/ring/kv/codec/multikey.go create mode 100644 pkg/ring/kv/dynamodb/client.go create mode 100644 pkg/ring/kv/dynamodb/client_test.go create mode 100644 pkg/ring/kv/dynamodb/dynamodb.go create mode 100644 pkg/ring/kv/dynamodb/dynamodb_test.go create mode 100644 pkg/ring/kv/dynamodb/metrics.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cd9b10e917..656d800b3fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ * [FEATURE] Query-frontend/Querier: Create spans to measure time to merge promql responses. #5041 * [FEATURE] Querier/Ruler: Support the new thanos promql engine. This is an experimental feature and might change in the future. #5093 * [FEATURE] Added zstd as an option for grpc compression #5092 +* [FEATURE] Ring: Add new kv store option `dynamodb`. #5026 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 * [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index 2e7b40b8a2b..9892e959fdd 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -204,6 +204,19 @@ compactor: # CLI flag: -compactor.ring.prefix [prefix: | default = "collectors/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -compactor.ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -compactor.ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -compactor.ring.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: compactor.ring [consul: ] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index a789a9a5607..0c54882bc0b 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -205,6 +205,19 @@ store_gateway: # CLI flag: -store-gateway.sharding-ring.prefix [prefix: | default = "collectors/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -store-gateway.sharding-ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -store-gateway.sharding-ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -store-gateway.sharding-ring.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: # store-gateway.sharding-ring diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index feecef027fe..ab75686cc23 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -493,6 +493,19 @@ ha_tracker: # CLI flag: -distributor.ha-tracker.prefix [prefix: | default = "ha-tracker/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -distributor.ha-tracker.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -distributor.ha-tracker.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -distributor.ha-tracker.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: distributor.ha-tracker [consul: ] @@ -557,6 +570,19 @@ ring: # CLI flag: -distributor.ring.prefix [prefix: | default = "collectors/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -distributor.ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -distributor.ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -distributor.ring.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: distributor.ring [consul: ] @@ -627,6 +653,19 @@ lifecycler: # CLI flag: -ring.prefix [prefix: | default = "collectors/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. [consul: ] @@ -1299,6 +1338,19 @@ ring: # CLI flag: -ruler.ring.prefix [prefix: | default = "rulers/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -ruler.ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -ruler.ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -ruler.ring.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: ruler.ring [consul: ] @@ -1681,6 +1733,19 @@ sharding_ring: # CLI flag: -alertmanager.sharding-ring.prefix [prefix: | default = "alertmanagers/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -alertmanager.sharding-ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -alertmanager.sharding-ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -alertmanager.sharding-ring.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: alertmanager.sharding-ring [consul: ] @@ -3868,6 +3933,19 @@ sharding_ring: # CLI flag: -compactor.ring.prefix [prefix: | default = "collectors/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -compactor.ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -compactor.ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -compactor.ring.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: compactor.ring [consul: ] @@ -3955,6 +4033,19 @@ sharding_ring: # CLI flag: -store-gateway.sharding-ring.prefix [prefix: | default = "collectors/"] + dynamodb: + # Region to access dynamodb. + # CLI flag: -store-gateway.sharding-ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -store-gateway.sharding-ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -store-gateway.sharding-ring.dynamodb.ttl-time + [ttl: | default = 0s] + # The consul_config configures the consul client. # The CLI flags prefix for this block config is: store-gateway.sharding-ring [consul: ] diff --git a/go.mod b/go.mod index 2f6e40f981d..85abbfc07a1 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alicebob/miniredis/v2 v2.30.0 github.com/armon/go-metrics v0.4.1 + github.com/aws/aws-sdk-go v1.44.156 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.0 @@ -89,7 +90,6 @@ require ( github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go v1.44.156 // indirect github.com/aws/aws-sdk-go-v2 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect diff --git a/integration/kv_test.go b/integration/kv_test.go index e55cb5a0cbd..0252f6024f7 100644 --- a/integration/kv_test.go +++ b/integration/kv_test.go @@ -229,6 +229,14 @@ func (c stringCodec) Decode(bb []byte) (interface{}, error) { func (c stringCodec) Encode(v interface{}) ([]byte, error) { return []byte(v.(string)), nil } func (c stringCodec) CodecID() string { return "stringCodec" } +func (stringCodec) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { + return nil, errors.New("String codec does not support EncodeMultiKey") +} + +func (stringCodec) DecodeMultiKey(map[string][]byte) (interface{}, error) { + return nil, errors.New("String codec does not support DecodeMultiKey") +} + type watcher struct { values map[string][]interface{} } diff --git a/pkg/ring/http.go b/pkg/ring/http.go index f23f08b8124..81615afb439 100644 --- a/pkg/ring/http.go +++ b/pkg/ring/http.go @@ -24,6 +24,7 @@ const pageContent = `

Ring Status

Current time: {{ .Now }}

+

Storage updated: {{ .StorageLastUpdated }}

@@ -116,9 +117,10 @@ type ingesterDesc struct { } type httpResponse struct { - Ingesters []ingesterDesc `json:"shards"` - Now time.Time `json:"now"` - ShowTokens bool `json:"-"` + Ingesters []ingesterDesc `json:"shards"` + Now time.Time `json:"now"` + StorageLastUpdated time.Time `json:"storageLastUpdated"` + ShowTokens bool `json:"-"` } func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { @@ -149,14 +151,14 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { } sort.Strings(ingesterIDs) - now := time.Now() + storageLastUpdate := r.KVClient.LastUpdateTime(r.key) var ingesters []ingesterDesc _, owned := r.countTokens() for _, id := range ingesterIDs { ing := r.ringDesc.Ingesters[id] heartbeatTimestamp := time.Unix(ing.Timestamp, 0) state := ing.State.String() - if !r.IsHealthy(&ing, Reporting, now) { + if !r.IsHealthy(&ing, Reporting, storageLastUpdate) { state = unhealthy } @@ -182,9 +184,10 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { tokensParam := req.URL.Query().Get("tokens") renderHTTPResponse(w, httpResponse{ - Ingesters: ingesters, - Now: now, - ShowTokens: tokensParam == "true", + Ingesters: ingesters, + Now: time.Now(), + StorageLastUpdated: storageLastUpdate, + ShowTokens: tokensParam == "true", }, pageTemplate, req) } diff --git a/pkg/ring/kv/client.go b/pkg/ring/kv/client.go index b251a64b8bd..e0ce28d0b3e 100644 --- a/pkg/ring/kv/client.go +++ b/pkg/ring/kv/client.go @@ -5,12 +5,14 @@ import ( "flag" "fmt" "sync" + "time" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/consul" + "github.com/cortexproject/cortex/pkg/ring/kv/dynamodb" "github.com/cortexproject/cortex/pkg/ring/kv/etcd" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" ) @@ -40,9 +42,10 @@ var inmemoryStore Client // Consul, Etcd, Memberlist or MultiClient. It was extracted from Config to keep // single-client config separate from final client-config (with all the wrappers) type StoreConfig struct { - Consul consul.Config `yaml:"consul"` - Etcd etcd.Config `yaml:"etcd"` - Multi MultiConfig `yaml:"multi"` + DynamoDB dynamodb.Config `yaml:"dynamodb"` + Consul consul.Config `yaml:"consul"` + Etcd etcd.Config `yaml:"etcd"` + Multi MultiConfig `yaml:"multi"` // Function that returns memberlist.KV store to use. By using a function, we can delay // initialization of memberlist.KV until it is actually required. @@ -69,6 +72,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(flagsPrefix, defaultPrefix string, f // This needs to be fixed in the future (1.0 release maybe?) when we normalize flags. // At the moment we have consul., and ring.store, going forward it would // be easier to have everything under ring, so ring.consul. + cfg.DynamoDB.RegisterFlags(f, flagsPrefix) cfg.Consul.RegisterFlags(f, flagsPrefix) cfg.Etcd.RegisterFlagsWithPrefix(f, flagsPrefix) cfg.Multi.RegisterFlagsWithPrefix(f, flagsPrefix) @@ -111,6 +115,9 @@ type Client interface { // WatchPrefix calls f whenever any value stored under prefix changes. WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) + + // LastUpdateTime returns the time a key was last sync by the kv store + LastUpdateTime(key string) time.Time } // NewClient creates a new Client (consul, etcd or inmemory) based on the config, @@ -128,6 +135,9 @@ func createClient(backend string, prefix string, cfg StoreConfig, codec codec.Co var err error switch backend { + case "dynamodb": + client, err = dynamodb.NewClient(cfg.DynamoDB, codec, logger, reg) + case "consul": client, err = consul.NewClient(cfg.Consul, codec, logger, reg) diff --git a/pkg/ring/kv/codec/clonable.go b/pkg/ring/kv/codec/clonable.go new file mode 100644 index 00000000000..c3df74c6219 --- /dev/null +++ b/pkg/ring/kv/codec/clonable.go @@ -0,0 +1,6 @@ +package codec + +type Clonable interface { + // Clone should return a deep copy of the state. + Clone() interface{} +} diff --git a/pkg/ring/kv/codec/codec.go b/pkg/ring/kv/codec/codec.go index 49540fe3af1..d701bbe2082 100644 --- a/pkg/ring/kv/codec/codec.go +++ b/pkg/ring/kv/codec/codec.go @@ -1,8 +1,11 @@ package codec import ( + "fmt" + "github.com/gogo/protobuf/proto" "github.com/golang/snappy" + "github.com/pkg/errors" ) // Codec allows KV clients to serialise and deserialise values. @@ -10,6 +13,9 @@ type Codec interface { Decode([]byte) (interface{}, error) Encode(interface{}) ([]byte, error) + DecodeMultiKey(map[string][]byte) (interface{}, error) + EncodeMultiKey(interface{}) (map[string][]byte, error) + // CodecID is a short identifier to communicate what codec should be used to decode the value. // Once in use, this should be stable to avoid confusing other clients. CodecID() string @@ -31,7 +37,34 @@ func (p Proto) CodecID() string { // Decode implements Codec func (p Proto) Decode(bytes []byte) (interface{}, error) { - out := p.factory() + return p.decode(bytes, p.factory()) +} + +// DecodeMultiKey implements Codec +func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { + msg := p.factory() + // Don't even try + out, ok := msg.(MultiKey) + if !ok || out == nil { + return nil, fmt.Errorf("invalid type: %T, expected MultiKey", out) + } + + if len(data) > 0 { + res := make(map[string]interface{}, len(data)) + for key, bytes := range data { + decoded, err := p.decode(bytes, out.GetItemFactory()) + if err != nil { + return nil, err + } + res[key] = decoded + } + out.JoinIds(res) + } + + return out, nil +} + +func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) { bytes, err := snappy.Decode(nil, bytes) if err != nil { return nil, err @@ -51,6 +84,26 @@ func (p Proto) Encode(msg interface{}) ([]byte, error) { return snappy.Encode(nil, bytes), nil } +// EncodeMultiKey implements Codec +func (p Proto) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { + // Don't even try + r, ok := msg.(MultiKey) + if !ok || r == nil { + return nil, fmt.Errorf("invalid type: %T, expected MultiKey", msg) + } + + objs := r.SplitByID() + res := make(map[string][]byte, len(objs)) + for key, value := range objs { + bytes, err := p.Encode(value) + if err != nil { + return nil, err + } + res[key] = bytes + } + return res, nil +} + // String is a code for strings. type String struct{} @@ -67,3 +120,11 @@ func (String) Decode(bytes []byte) (interface{}, error) { func (String) Encode(msg interface{}) ([]byte, error) { return []byte(msg.(string)), nil } + +func (String) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { + return nil, errors.New("String codec does not support EncodeMultiKey") +} + +func (String) DecodeMultiKey(map[string][]byte) (interface{}, error) { + return nil, errors.New("String codec does not support DecodeMultiKey") +} diff --git a/pkg/ring/kv/codec/codec_test.go b/pkg/ring/kv/codec/codec_test.go new file mode 100644 index 00000000000..ff729626e7b --- /dev/null +++ b/pkg/ring/kv/codec/codec_test.go @@ -0,0 +1,132 @@ +package codec + +import ( + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func Test_EncodeMultikey_CheckInterface(t *testing.T) { + m := NewProtoCodec("test", newMockMessage) + + _, err := m.EncodeMultiKey(nil) + + require.Error(t, err, "invalid type") +} + +func Test_DecodeMultikey_CheckInterface(t *testing.T) { + m := NewProtoCodec("test", newMockMessage) + + _, err := m.DecodeMultiKey(nil) + + require.Error(t, err, "invalid type") +} + +func Test_EncodeMultikey(t *testing.T) { + codec := NewProtoCodec("test", newProtoDescMock) + descMock := &DescMock{} + expectedSplitKeys := []string{"t1", "t2"} + expectedSplit := map[string]interface{}{ + expectedSplitKeys[0]: descMock, + expectedSplitKeys[1]: descMock, + } + encoded, err := codec.Encode(expectedSplit[expectedSplitKeys[0]]) + require.NoError(t, err) + + descMock.On("SplitByID").Return(expectedSplit) + + out, err := codec.EncodeMultiKey(descMock) + + require.NoError(t, err) + require.Equal(t, 2, len(out)) + require.Equal(t, encoded, out[expectedSplitKeys[0]]) + require.Equal(t, encoded, out[expectedSplitKeys[1]]) +} + +func Test_DecodeMultikey(t *testing.T) { + descMock := &DescMock{} + codec := NewProtoCodec("test", func() proto.Message { + return descMock + }) + mMock := &mockMessage{} + eT1, err := codec.Encode(mMock) + require.NoError(t, err) + eT2, err := codec.Encode(mMock) + require.NoError(t, err) + expectedSplit := map[string][]byte{ + "t1": eT1, + "t2": eT2, + } + + descMock.On("GetItemFactory").Return(newMockMessage()) + descMock.On("JoinIds").Return(descMock) + + out, err := codec.DecodeMultiKey(expectedSplit) + + require.NoError(t, err) + descMock.AssertNumberOfCalls(t, "GetItemFactory", 2) + descMock.AssertNumberOfCalls(t, "JoinIds", 1) + require.Equal(t, descMock, out) +} + +type mockMessage struct { +} + +func newMockMessage() proto.Message { + return &mockMessage{} +} +func (mockMessage) Reset() {} + +func (mockMessage) String() string { + return "mockString" +} + +func (mockMessage) ProtoMessage() { +} + +type DescMock struct { + mock.Mock +} + +func newProtoDescMock() proto.Message { + return &DescMock{} +} + +func (m *DescMock) Clone() interface{} { + args := m.Called() + return args.Get(0) +} + +func (m *DescMock) SplitByID() map[string]interface{} { + args := m.Called() + return args.Get(0).(map[string]interface{}) +} + +func (m *DescMock) JoinIds(map[string]interface{}) { + m.Called() +} + +func (m *DescMock) GetItemFactory() proto.Message { + args := m.Called() + return args.Get(0).(proto.Message) +} + +func (m *DescMock) FindDifference(that MultiKey) (interface{}, []string, error) { + args := m.Called(that) + var err error + if args.Get(2) != nil { + err = args.Get(2).(error) + } + return args.Get(0), args.Get(1).([]string), err +} + +func (m *DescMock) Reset() {} + +func (m *DescMock) String() string { + return "Desc" +} + +func (m *DescMock) ProtoMessage() { +} diff --git a/pkg/ring/kv/codec/multikey.go b/pkg/ring/kv/codec/multikey.go new file mode 100644 index 00000000000..bd8802c4adc --- /dev/null +++ b/pkg/ring/kv/codec/multikey.go @@ -0,0 +1,25 @@ +package codec + +import ( + "github.com/gogo/protobuf/proto" +) + +type MultiKey interface { + Clonable + + // SplitByID Split interface in array of key and value. THe key is a unique identifier of an instance in the ring. The value is + // interface with its data. The interface resultant need to be a proto.Message + SplitByID() map[string]interface{} + + // JoinIds update the current interface to add receiving key value information. The key is an unique identifier for an instance. + // The value is the information for that instance. + JoinIds(in map[string]interface{}) + + // GetItemFactory method to be used for deserilaize the value information from an instance + GetItemFactory() proto.Message + + // FindDifference returns the difference between two Multikeys. The returns are an interface which also implements Multikey + // with an array of keys which were changed, and an array of strings which are unique identifiers deleted. An error is + // returned when that does not implement the correct codec + FindDifference(that MultiKey) (interface{}, []string, error) +} diff --git a/pkg/ring/kv/consul/client.go b/pkg/ring/kv/consul/client.go index 8d94eae0050..ab1c9da228b 100644 --- a/pkg/ring/kv/consul/client.go +++ b/pkg/ring/kv/consul/client.go @@ -369,6 +369,10 @@ func (c *Client) Delete(ctx context.Context, key string) error { return err } +func (c *Client) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} + func checkLastIndex(index, metaLastIndex uint64) (newIndex uint64, skip bool) { // See https://www.consul.io/api/features/blocking.html#implementation-details for logic behind these checks. if metaLastIndex == 0 { diff --git a/pkg/ring/kv/consul/mock.go b/pkg/ring/kv/consul/mock.go index 6de12058aae..7828499de33 100644 --- a/pkg/ring/kv/consul/mock.go +++ b/pkg/ring/kv/consul/mock.go @@ -233,6 +233,10 @@ func (m *mockKV) Delete(key string, q *consul.WriteOptions) (*consul.WriteMeta, return nil, nil } +func (m *mockKV) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} + func (m *mockKV) ResetIndex() { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/pkg/ring/kv/dynamodb/client.go b/pkg/ring/kv/dynamodb/client.go new file mode 100644 index 00000000000..ff51ac26556 --- /dev/null +++ b/pkg/ring/kv/dynamodb/client.go @@ -0,0 +1,324 @@ +package dynamodb + +import ( + "context" + "flag" + "fmt" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/util/backoff" +) + +// Config to create a ConsulClient +type Config struct { + Region string `yaml:"region"` + TableName string `yaml:"table_name"` + TTL time.Duration `yaml:"ttl"` +} + +type Client struct { + kv dynamoDbClient + codec codec.Codec + ddbMetrics *dynamodbMetrics + logger log.Logger + + staleDataLock sync.RWMutex + staleData map[string]staleData +} + +type staleData struct { + data codec.MultiKey + timestamp time.Time +} + +var ( + backoffConfig = backoff.Config{ + MinBackoff: 1 * time.Second, + MaxBackoff: 1 * time.Minute, + MaxRetries: 0, + } + + defaultLoopDelay = 1 * time.Minute +) + +// RegisterFlags adds the flags required to config this to the given FlagSet +// If prefix is not an empty string it should end with a period. +func (cfg *Config) RegisterFlags(f *flag.FlagSet, prefix string) { + f.StringVar(&cfg.Region, prefix+"dynamodb.region", "", "Region to access dynamodb.") + f.StringVar(&cfg.TableName, prefix+"dynamodb.table-name", "", "Table name to use on dynamodb.") + f.DurationVar(&cfg.TTL, prefix+"dynamodb.ttl-time", 0, "Time to expire items on dynamodb.") +} + +func NewClient(cfg Config, cc codec.Codec, logger log.Logger, registerer prometheus.Registerer) (*Client, error) { + dynamoDB, err := newDynamodbKV(cfg, logger) + if err != nil { + return nil, err + } + + ddbMetrics := newDynamoDbMetrics(registerer) + + c := &Client{ + kv: dynamodbInstrumentation{kv: dynamoDB, ddbMetrics: ddbMetrics}, + codec: cc, + logger: ddbLog(logger), + ddbMetrics: ddbMetrics, + staleData: make(map[string]staleData), + } + return c, nil +} + +func (c *Client) List(ctx context.Context, key string) ([]string, error) { + resp, _, err := c.kv.List(ctx, dynamodbKey{primaryKey: key}) + if err != nil { + level.Warn(c.logger).Log("msg", "error List", "key", key, "err", err) + return nil, err + } + return resp, err +} + +func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { + resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) + if err != nil { + level.Warn(c.logger).Log("msg", "error Get", "key", key, "err", err) + return nil, err + } + res, err := c.decodeMultikey(resp) + if err != nil { + return nil, err + } + c.updateStaleData(key, res, time.Now().UTC()) + + return res, nil +} + +func (c *Client) Delete(ctx context.Context, key string) error { + resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) + if err != nil { + level.Warn(c.logger).Log("msg", "error Delete", "key", key, "err", err) + return err + } + + for innerKey := range resp { + err := c.kv.Delete(ctx, dynamodbKey{ + primaryKey: key, + sortKey: innerKey, + }) + if err != nil { + level.Warn(c.logger).Log("msg", "error Delete", "key", key, "innerKey", innerKey, "err", err) + return err + } + } + c.deleteStaleData(key) + + return err +} + +func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { + bo := backoff.New(ctx, backoffConfig) + for bo.Ongoing() { + resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) + if err != nil { + level.Error(c.logger).Log("msg", "error cas query", "key", key, "err", err) + bo.Wait() + continue + } + + current, err := c.decodeMultikey(resp) + if err != nil { + level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err) + continue + } + + out, retry, err := f(current.Clone()) + if err != nil { + if !retry { + return err + } + bo.Wait() + continue + } + + // Callback returning nil means it doesn't want to CAS anymore. + if out == nil { + return nil + } + // Don't even try + + r, ok := out.(codec.MultiKey) + if !ok || r == nil { + return fmt.Errorf("invalid type: %T, expected MultiKey", out) + } + + toUpdate, toDelete, err := current.FindDifference(r) + + if err != nil { + level.Error(c.logger).Log("msg", "error getting key", "key", key, "err", err) + continue + } + + buf, err := c.codec.EncodeMultiKey(toUpdate) + if err != nil { + level.Error(c.logger).Log("msg", "error serialising value", "key", key, "err", err) + continue + } + + putRequests := map[dynamodbKey][]byte{} + for childKey, bytes := range buf { + putRequests[dynamodbKey{primaryKey: key, sortKey: childKey}] = bytes + } + + deleteRequests := make([]dynamodbKey, 0, len(toDelete)) + for _, childKey := range toDelete { + deleteRequests = append(deleteRequests, dynamodbKey{primaryKey: key, sortKey: childKey}) + } + + if len(putRequests) > 0 || len(deleteRequests) > 0 { + return c.kv.Batch(ctx, putRequests, deleteRequests) + } + + return nil + } + + return fmt.Errorf("failed to CAS %s", key) +} + +func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { + bo := backoff.New(ctx, backoffConfig) + + for bo.Ongoing() { + out, _, err := c.kv.Query(ctx, dynamodbKey{ + primaryKey: key, + }, false) + if err != nil { + level.Error(c.logger).Log("msg", "error WatchKey", "key", key, "err", err) + + if bo.NumRetries() > 10 { + if staleData := c.getStaleData(key); staleData != nil { + if !f(staleData) { + return + } + bo.Reset() + } + } + bo.Wait() + continue + } + + decoded, err := c.decodeMultikey(out) + if err != nil { + level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err) + continue + } + c.updateStaleData(key, decoded, time.Now().UTC()) + + if !f(decoded) { + return + } + + bo.Reset() + select { + case <-ctx.Done(): + return + case <-time.After(defaultLoopDelay): + } + } +} + +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { + bo := backoff.New(ctx, backoffConfig) + + for bo.Ongoing() { + out, _, err := c.kv.Query(ctx, dynamodbKey{ + primaryKey: prefix, + }, true) + if err != nil { + level.Error(c.logger).Log("msg", "WatchPrefix", "prefix", prefix, "err", err) + bo.Wait() + continue + } + + for key, bytes := range out { + decoded, err := c.codec.Decode(bytes) + if err != nil { + level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err) + continue + } + if !f(key, decoded) { + return + } + } + + bo.Reset() + select { + case <-ctx.Done(): + return + case <-time.After(defaultLoopDelay): + } + } +} + +func (c *Client) decodeMultikey(data map[string][]byte) (codec.MultiKey, error) { + res, err := c.codec.DecodeMultiKey(data) + if err != nil { + return nil, err + } + out, ok := res.(codec.MultiKey) + if !ok || out == nil { + return nil, fmt.Errorf("invalid type: %T, expected MultiKey", out) + } + + return out, nil +} + +func (c *Client) LastUpdateTime(key string) time.Time { + c.staleDataLock.RLock() + defer c.staleDataLock.RUnlock() + + data, ok := c.staleData[key] + if !ok { + return time.Time{} + } + + return data.timestamp +} + +func (c *Client) updateStaleData(key string, data codec.MultiKey, timestamp time.Time) { + c.staleDataLock.Lock() + defer c.staleDataLock.Unlock() + + c.staleData[key] = staleData{ + data: data, + timestamp: timestamp, + } +} + +func (c *Client) getStaleData(key string) codec.MultiKey { + c.staleDataLock.RLock() + defer c.staleDataLock.RUnlock() + + data, ok := c.staleData[key] + if !ok { + return nil + } + + newD := data.data.Clone().(codec.MultiKey) + + return newD +} + +func (c *Client) deleteStaleData(key string) { + c.staleDataLock.Lock() + defer c.staleDataLock.Unlock() + + delete(c.staleData, key) +} + +func ddbLog(logger log.Logger) log.Logger { + return log.WithPrefix(logger, "class", "DynamodbKvClient") +} diff --git a/pkg/ring/kv/dynamodb/client_test.go b/pkg/ring/kv/dynamodb/client_test.go new file mode 100644 index 00000000000..f58a8dd500a --- /dev/null +++ b/pkg/ring/kv/dynamodb/client_test.go @@ -0,0 +1,389 @@ +package dynamodb + +import ( + "context" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/ring/kv/codec" +) + +const key = "test" + +func Test_CAS_ErrorNoRetry(t *testing.T) { + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + expectedErr := errors.Errorf("test") + + ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + codecMock.On("DecodeMultiKey").Return(descMock, nil).Twice() + descMock.On("Clone").Return(descMock).Once() + + err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + return nil, false, expectedErr + }) + + require.Equal(t, err, expectedErr) +} + +func Test_CAS_Backoff(t *testing.T) { + backoffConfig.MinBackoff = 1 * time.Millisecond + backoffConfig.MaxBackoff = 1 * time.Millisecond + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + expectedErr := errors.Errorf("test") + + ddbMock.On("Query").Return(map[string][]byte{}, expectedErr).Once() + ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + codecMock.On("DecodeMultiKey").Return(descMock, nil).Twice() + descMock.On("Clone").Return(descMock).Once() + descMock.On("FindDifference", descMock).Return(descMock, []string{}, nil).Once() + codecMock.On("EncodeMultiKey").Return(map[string][]byte{}, nil).Twice() + + err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + return descMock, true, nil + }) + + require.NoError(t, err) +} + +func Test_CAS_Failed(t *testing.T) { + backoffConfig.MinBackoff = 1 * time.Millisecond + backoffConfig.MaxBackoff = 1 * time.Millisecond + backoffConfig.MaxRetries = 10 + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + + ddbMock.On("Query").Return(map[string][]byte{}, errors.Errorf("test")) + + err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + return descMock, true, nil + }) + + ddbMock.AssertNumberOfCalls(t, "Query", 10) + require.Error(t, err, "failed to CAS") +} + +func Test_CAS_Update(t *testing.T) { + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + expectedUpdatedKeys := []string{"t1", "t2"} + expectedUpdated := map[string][]byte{ + expectedUpdatedKeys[0]: []byte(expectedUpdatedKeys[0]), + expectedUpdatedKeys[1]: []byte(expectedUpdatedKeys[1]), + } + expectedBatch := map[dynamodbKey][]byte{ + {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: []byte(expectedUpdatedKeys[0]), + {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: []byte(expectedUpdatedKeys[1]), + } + + ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + codecMock.On("DecodeMultiKey").Return(descMock, nil).Once() + descMock.On("Clone").Return(descMock).Once() + descMock.On("FindDifference", descMock).Return(descMock, []string{}, nil).Once() + codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() + ddbMock.On("Batch", context.TODO(), expectedBatch, []dynamodbKey{}).Once() + + err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + return descMock, true, nil + }) + + require.NoError(t, err) + ddbMock.AssertNumberOfCalls(t, "Batch", 1) + ddbMock.AssertCalled(t, "Batch", context.TODO(), expectedBatch, []dynamodbKey{}) +} + +func Test_CAS_Delete(t *testing.T) { + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + expectedToDelete := []string{"test", "test2"} + expectedBatch := []dynamodbKey{ + {primaryKey: key, sortKey: expectedToDelete[0]}, + {primaryKey: key, sortKey: expectedToDelete[1]}, + } + + ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + codecMock.On("DecodeMultiKey").Return(descMock, nil).Once() + descMock.On("Clone").Return(descMock).Once() + descMock.On("FindDifference", descMock).Return(descMock, expectedToDelete, nil).Once() + codecMock.On("EncodeMultiKey").Return(map[string][]byte{}, nil).Once() + ddbMock.On("Batch", context.TODO(), map[dynamodbKey][]byte{}, expectedBatch) + + err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + return descMock, true, nil + }) + + require.NoError(t, err) + ddbMock.AssertNumberOfCalls(t, "Batch", 1) + ddbMock.AssertCalled(t, "Batch", context.TODO(), map[dynamodbKey][]byte{}, expectedBatch) +} + +func Test_CAS_Update_Delete(t *testing.T) { + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + expectedUpdatedKeys := []string{"t1", "t2"} + expectedUpdated := map[string][]byte{ + expectedUpdatedKeys[0]: []byte(expectedUpdatedKeys[0]), + expectedUpdatedKeys[1]: []byte(expectedUpdatedKeys[1]), + } + expectedUpdateBatch := map[dynamodbKey][]byte{ + {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: []byte(expectedUpdatedKeys[0]), + {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: []byte(expectedUpdatedKeys[1]), + } + expectedToDelete := []string{"test", "test2"} + expectedDeleteBatch := []dynamodbKey{ + {primaryKey: key, sortKey: expectedToDelete[0]}, + {primaryKey: key, sortKey: expectedToDelete[1]}, + } + + ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + codecMock.On("DecodeMultiKey").Return(descMock, nil).Once() + descMock.On("Clone").Return(descMock).Once() + descMock.On("FindDifference", descMock).Return(descMock, expectedToDelete, nil).Once() + codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() + ddbMock.On("Batch", context.TODO(), expectedUpdateBatch, expectedDeleteBatch) + + err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + return descMock, true, nil + }) + + require.NoError(t, err) + ddbMock.AssertNumberOfCalls(t, "Batch", 1) + ddbMock.AssertCalled(t, "Batch", context.TODO(), expectedUpdateBatch, expectedDeleteBatch) +} + +func Test_WatchKey(t *testing.T) { + backoffConfig.MinBackoff = 1 * time.Millisecond + backoffConfig.MaxBackoff = 1 * time.Millisecond + defaultLoopDelay = 1 * time.Second + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + timesCalled := 0 + + ddbMock.On("Query").Return(map[string][]byte{}, nil) + codecMock.On("DecodeMultiKey").Return(descMock, nil) + + c.WatchKey(context.TODO(), key, func(i interface{}) bool { + timesCalled++ + ddbMock.AssertNumberOfCalls(t, "Query", timesCalled) + codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", timesCalled) + require.EqualValues(t, descMock, i) + return timesCalled < 5 + }) +} + +func Test_WatchKey_UpdateStale(t *testing.T) { + backoffConfig.MinBackoff = 1 * time.Millisecond + backoffConfig.MaxBackoff = 1 * time.Millisecond + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + staleData := &DescMock{} + + ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + codecMock.On("DecodeMultiKey").Return(staleData, nil) + + c.WatchKey(context.TODO(), key, func(i interface{}) bool { + ddbMock.AssertNumberOfCalls(t, "Query", 1) + codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", 1) + require.EqualValues(t, staleData, i) + return false + }) + + ddbMock.On("Query").Return(map[string][]byte{}, errors.Errorf("failed")) + staleData.On("Clone").Return(staleData).Once() + + c.WatchKey(context.TODO(), key, func(i interface{}) bool { + ddbMock.AssertNumberOfCalls(t, "Query", 12) + codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", 1) + require.EqualValues(t, staleData, i) + return false + }) +} + +func Test_WatchPrefix(t *testing.T) { + backoffConfig.MinBackoff = 1 * time.Millisecond + backoffConfig.MaxBackoff = 1 * time.Millisecond + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + data := map[string][]byte{} + dataKey := []string{"t1", "t2"} + data[dataKey[0]] = []byte(dataKey[0]) + data[dataKey[1]] = []byte(dataKey[1]) + calls := 0 + + ddbMock.On("Query").Return(data, nil) + codecMock.On("Decode").Twice() + + c.WatchPrefix(context.TODO(), key, func(key string, i interface{}) bool { + require.EqualValues(t, string(data[key]), i) + delete(data, key) + calls++ + return calls < 2 + }) + + require.True(t, len(data) == 0) + + ddbMock.AssertNumberOfCalls(t, "Query", 1) +} + +func Test_UpdateStaleData(t *testing.T) { + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry()) + staleData := &DescMock{} + timestamp := time.Date(2000, 10, 10, 10, 10, 10, 10, time.UTC) + + c.updateStaleData(key, staleData, timestamp) + + require.NotNil(t, c.staleData[key]) + require.EqualValues(t, c.staleData[key].data, staleData) + require.EqualValues(t, c.staleData[key].timestamp, timestamp) + +} + +// NewClientMock makes a new local dynamodb client. +func NewClientMock(ddbClient dynamoDbClient, cc codec.Codec, logger log.Logger, registerer prometheus.Registerer) *Client { + m := &Client{ + kv: ddbClient, + ddbMetrics: newDynamoDbMetrics(registerer), + codec: cc, + logger: logger, + staleData: make(map[string]staleData), + } + + return m +} + +type mockDynamodbClient struct { + mock.Mock +} + +func NewDynamodbClientMock() *mockDynamodbClient { + return &mockDynamodbClient{} +} + +func (m *mockDynamodbClient) List(context.Context, dynamodbKey) ([]string, float64, error) { + args := m.Called() + var err error + if args.Get(1) != nil { + err = args.Get(1).(error) + } + return args.Get(0).([]string), 0, err +} +func (m *mockDynamodbClient) Query(context.Context, dynamodbKey, bool) (map[string][]byte, float64, error) { + args := m.Called() + var err error + if args.Get(1) != nil { + err = args.Get(1).(error) + } + return args.Get(0).(map[string][]byte), 0, err +} +func (m *mockDynamodbClient) Delete(ctx context.Context, key dynamodbKey) error { + m.Called(ctx, key) + return nil +} +func (m *mockDynamodbClient) Put(ctx context.Context, key dynamodbKey, data []byte) error { + m.Called(ctx, key, data) + return nil +} +func (m *mockDynamodbClient) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { + m.Called(ctx, put, delete) + return nil +} + +type TestLogger struct { +} + +func (l TestLogger) Log(...interface{}) error { + return nil +} + +// String is a code for strings. +type CodecMock struct { + mock.Mock +} + +func (*CodecMock) CodecID() string { + return "CodecMock" +} + +// Decode implements Codec. +func (m *CodecMock) Decode(bytes []byte) (interface{}, error) { + m.Called() + return string(bytes), nil +} + +// Encode implements Codec. +func (m *CodecMock) Encode(i interface{}) ([]byte, error) { + m.Called() + return []byte(i.(string)), nil +} + +func (m *CodecMock) EncodeMultiKey(interface{}) (map[string][]byte, error) { + args := m.Called() + return args.Get(0).(map[string][]byte), nil +} + +func (m *CodecMock) DecodeMultiKey(map[string][]byte) (interface{}, error) { + args := m.Called() + var err error + if args.Get(1) != nil { + err = args.Get(1).(error) + } + return args.Get(0), err +} + +type DescMock struct { + mock.Mock +} + +func (m *DescMock) Clone() interface{} { + args := m.Called() + return args.Get(0) +} + +func (m *DescMock) SplitByID() map[string]interface{} { + args := m.Called() + return args.Get(0).(map[string]interface{}) +} + +func (m *DescMock) JoinIds(map[string]interface{}) { + m.Called() +} + +func (m *DescMock) GetItemFactory() proto.Message { + args := m.Called() + return args.Get(0).(proto.Message) +} + +func (m *DescMock) FindDifference(that codec.MultiKey) (interface{}, []string, error) { + args := m.Called(that) + var err error + if args.Get(2) != nil { + err = args.Get(2).(error) + } + return args.Get(0), args.Get(1).([]string), err +} diff --git a/pkg/ring/kv/dynamodb/dynamodb.go b/pkg/ring/kv/dynamodb/dynamodb.go new file mode 100644 index 00000000000..e33715d91f6 --- /dev/null +++ b/pkg/ring/kv/dynamodb/dynamodb.go @@ -0,0 +1,250 @@ +package dynamodb + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/go-kit/log" +) + +type dynamodbKey struct { + primaryKey string + sortKey string +} + +type dynamoDbClient interface { + List(ctx context.Context, key dynamodbKey) ([]string, float64, error) + Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) + Delete(ctx context.Context, key dynamodbKey) error + Put(ctx context.Context, key dynamodbKey, data []byte) error + Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error +} + +type dynamodbKV struct { + dynamoDbClient + + ddbClient dynamodbiface.DynamoDBAPI + logger log.Logger + tableName *string + ttlValue time.Duration +} + +var ( + primaryKey = "RingKey" + sortKey = "InstanceKey" + contentData = "Data" + timeToLive = "ttl" +) + +func newDynamodbKV(cfg Config, logger log.Logger) (dynamodbKV, error) { + if err := validateConfigInput(cfg); err != nil { + return dynamodbKV{}, err + } + + sess, err := session.NewSession() + if err != nil { + return dynamodbKV{}, err + } + + if len(cfg.Region) > 0 { + sess.Config = &aws.Config{ + Region: aws.String(cfg.Region), + } + } + + dynamoDB := dynamodb.New(sess) + + ddbKV := &dynamodbKV{ + ddbClient: dynamoDB, + logger: logger, + tableName: aws.String(cfg.TableName), + ttlValue: cfg.TTL, + } + + return *ddbKV, nil +} + +func validateConfigInput(cfg Config) error { + if len(cfg.TableName) < 3 { + return fmt.Errorf("invalid dynamodb table name: %s", cfg.TableName) + } + + return nil +} + +// for testing +func (kv dynamodbKV) getTTL() time.Duration { + return kv.ttlValue +} + +func (kv dynamodbKV) List(ctx context.Context, key dynamodbKey) ([]string, float64, error) { + var keys []string + var totalCapacity float64 + input := &dynamodb.QueryInput{ + TableName: kv.tableName, + ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), + KeyConditions: map[string]*dynamodb.Condition{ + primaryKey: { + ComparisonOperator: aws.String("EQ"), + AttributeValueList: []*dynamodb.AttributeValue{ + { + S: aws.String(key.primaryKey), + }, + }, + }, + }, + AttributesToGet: []*string{aws.String(sortKey)}, + } + + err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool { + totalCapacity += getCapacityUnits(output.ConsumedCapacity) + for _, item := range output.Items { + keys = append(keys, item[sortKey].String()) + } + return true + }) + if err != nil { + return nil, totalCapacity, err + } + + return keys, totalCapacity, nil +} + +func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { + keys := make(map[string][]byte) + var totalCapacity float64 + co := dynamodb.ComparisonOperatorEq + if isPrefix { + co = dynamodb.ComparisonOperatorBeginsWith + } + input := &dynamodb.QueryInput{ + TableName: kv.tableName, + ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), + KeyConditions: map[string]*dynamodb.Condition{ + primaryKey: { + ComparisonOperator: aws.String(co), + AttributeValueList: []*dynamodb.AttributeValue{ + { + S: aws.String(key.primaryKey), + }, + }, + }, + }, + } + + err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool { + totalCapacity += getCapacityUnits(output.ConsumedCapacity) + for _, item := range output.Items { + keys[*item[sortKey].S] = item[contentData].B + } + return true + }) + if err != nil { + return nil, totalCapacity, err + } + + return keys, totalCapacity, nil +} + +func (kv dynamodbKV) Delete(ctx context.Context, key dynamodbKey) error { + input := &dynamodb.DeleteItemInput{ + TableName: kv.tableName, + Key: generateItemKey(key), + } + _, err := kv.ddbClient.DeleteItemWithContext(ctx, input) + return err +} + +func (kv dynamodbKV) Put(ctx context.Context, key dynamodbKey, data []byte) error { + input := &dynamodb.PutItemInput{ + TableName: kv.tableName, + Item: kv.generatePutItemRequest(key, data), + } + _, err := kv.ddbClient.PutItemWithContext(ctx, input) + return err +} + +func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { + writeRequestSize := len(put) + len(delete) + if writeRequestSize == 0 { + return nil + } + + writeRequests := make([]*dynamodb.WriteRequest, 0, writeRequestSize) + for key, data := range put { + item := kv.generatePutItemRequest(key, data) + writeRequests = append(writeRequests, &dynamodb.WriteRequest{ + PutRequest: &dynamodb.PutRequest{ + Item: item, + }, + }) + } + + for _, key := range delete { + item := generateItemKey(key) + writeRequests = append(writeRequests, &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: item, + }, + }) + } + + input := &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ + *kv.tableName: writeRequests, + }, + } + + resp, err := kv.ddbClient.BatchWriteItemWithContext(ctx, input) + if err != nil { + return err + } + + if resp.UnprocessedItems != nil && len(resp.UnprocessedItems) > 0 { + return fmt.Errorf("error processing batch request for %s requests", resp.UnprocessedItems) + } + + return nil +} + +func (kv dynamodbKV) generatePutItemRequest(key dynamodbKey, data []byte) map[string]*dynamodb.AttributeValue { + item := generateItemKey(key) + item[contentData] = &dynamodb.AttributeValue{ + B: data, + } + if kv.getTTL() > 0 { + item[timeToLive] = &dynamodb.AttributeValue{ + N: aws.String(strconv.FormatInt(time.Now().UTC().Add(kv.getTTL()).Unix(), 10)), + } + } + + return item +} + +func generateItemKey(key dynamodbKey) map[string]*dynamodb.AttributeValue { + resp := map[string]*dynamodb.AttributeValue{ + primaryKey: { + S: aws.String(key.primaryKey), + }, + } + if len(key.sortKey) > 0 { + resp[sortKey] = &dynamodb.AttributeValue{ + S: aws.String(key.sortKey), + } + } + + return resp +} + +func getCapacityUnits(cap *dynamodb.ConsumedCapacity) float64 { + if cap != nil && cap.CapacityUnits != nil { + return *cap.CapacityUnits + } + return 0 +} diff --git a/pkg/ring/kv/dynamodb/dynamodb_test.go b/pkg/ring/kv/dynamodb/dynamodb_test.go new file mode 100644 index 00000000000..180b2e23102 --- /dev/null +++ b/pkg/ring/kv/dynamodb/dynamodb_test.go @@ -0,0 +1,172 @@ +package dynamodb + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/stretchr/testify/require" +) + +func Test_TTLDisabled(t *testing.T) { + ddbClientMock := &mockDynamodb{ + putItem: func(input *dynamodb.PutItemInput) *dynamodb.PutItemOutput { + require.Nil(t, input.Item["ttl"]) + return &dynamodb.PutItemOutput{} + }, + } + + ddb := newDynamodbClientMock("TEST", ddbClientMock, 0) + err := ddb.Put(context.TODO(), dynamodbKey{primaryKey: "test", sortKey: "test1"}, []byte("TEST")) + require.NoError(t, err) + +} + +func Test_TTL(t *testing.T) { + ddbClientMock := &mockDynamodb{ + putItem: func(input *dynamodb.PutItemInput) *dynamodb.PutItemOutput { + require.NotNil(t, input.Item["ttl"].N) + parsedTime, err := strconv.ParseInt(*input.Item["ttl"].N, 10, 64) + require.NoError(t, err) + require.Greater(t, time.Unix(parsedTime, 0), time.Now().UTC().Add(4*time.Hour), 10) + require.LessOrEqual(t, time.Unix(parsedTime, 0), time.Now().UTC().Add(6*time.Hour), 10) + return &dynamodb.PutItemOutput{} + }, + } + + ddb := newDynamodbClientMock("TEST", ddbClientMock, 5*time.Hour) + err := ddb.Put(context.TODO(), dynamodbKey{primaryKey: "test", sortKey: "test1"}, []byte("TEST")) + require.NoError(t, err) +} + +func Test_Batch(t *testing.T) { + tableName := "TEST" + ddbKeyUpdate := dynamodbKey{ + primaryKey: "PKUpdate", + sortKey: "SKUpdate", + } + ddbKeyDelete := dynamodbKey{ + primaryKey: "PKDelete", + sortKey: "SKDelete", + } + update := map[dynamodbKey][]byte{ + ddbKeyUpdate: {}, + } + delete := []dynamodbKey{ddbKeyDelete} + + ddbClientMock := &mockDynamodb{ + batchWriteItem: func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) { + require.NotNil(t, input.RequestItems[tableName]) + require.EqualValues(t, 2, len(input.RequestItems[tableName])) + require.True(t, + (checkPutRequestForItem(input.RequestItems[tableName][0], ddbKeyUpdate) || checkPutRequestForItem(input.RequestItems[tableName][1], ddbKeyUpdate)) && + (checkDeleteRequestForItem(input.RequestItems[tableName][0], ddbKeyDelete) || checkDeleteRequestForItem(input.RequestItems[tableName][1], ddbKeyDelete))) + return &dynamodb.BatchWriteItemOutput{}, nil + }, + } + + ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) + err := ddb.Batch(context.TODO(), update, delete) + require.NoError(t, err) +} + +func Test_EmptyBatch(t *testing.T) { + tableName := "TEST" + ddbClientMock := &mockDynamodb{} + + ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) + err := ddb.Batch(context.TODO(), nil, nil) + require.NoError(t, err) +} + +func Test_Batch_UnprocessedItems(t *testing.T) { + tableName := "TEST" + ddbKeyDelete := dynamodbKey{ + primaryKey: "PKDelete", + sortKey: "SKDelete", + } + delete := []dynamodbKey{ddbKeyDelete} + + ddbClientMock := &mockDynamodb{ + batchWriteItem: func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) { + return &dynamodb.BatchWriteItemOutput{ + UnprocessedItems: map[string][]*dynamodb.WriteRequest{ + tableName: {&dynamodb.WriteRequest{ + PutRequest: &dynamodb.PutRequest{Item: generateItemKey(ddbKeyDelete)}}, + }, + }, + }, nil + }, + } + + ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) + err := ddb.Batch(context.TODO(), nil, delete) + require.Errorf(t, err, "error processing batch dynamodb") +} + +func Test_Batch_Error(t *testing.T) { + tableName := "TEST" + ddbKeyDelete := dynamodbKey{ + primaryKey: "PKDelete", + sortKey: "SKDelete", + } + delete := []dynamodbKey{ddbKeyDelete} + + ddbClientMock := &mockDynamodb{ + batchWriteItem: func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) { + return &dynamodb.BatchWriteItemOutput{}, fmt.Errorf("mocked error") + }, + } + + ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) + err := ddb.Batch(context.TODO(), nil, delete) + require.Errorf(t, err, "mocked error") +} + +func checkPutRequestForItem(request *dynamodb.WriteRequest, key dynamodbKey) bool { + return request.PutRequest != nil && + request.PutRequest.Item[primaryKey] != nil && + request.PutRequest.Item[sortKey] != nil && + *request.PutRequest.Item[primaryKey].S == key.primaryKey && + *request.PutRequest.Item[sortKey].S == key.sortKey +} + +func checkDeleteRequestForItem(request *dynamodb.WriteRequest, key dynamodbKey) bool { + return request.DeleteRequest != nil && + request.DeleteRequest.Key[primaryKey] != nil && + request.DeleteRequest.Key[sortKey] != nil && + *request.DeleteRequest.Key[primaryKey].S == key.primaryKey && + *request.DeleteRequest.Key[sortKey].S == key.sortKey +} + +type mockDynamodb struct { + putItem func(input *dynamodb.PutItemInput) *dynamodb.PutItemOutput + batchWriteItem func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) + + dynamodbiface.DynamoDBAPI +} + +func (m *mockDynamodb) PutItemWithContext(_ context.Context, input *dynamodb.PutItemInput, _ ...request.Option) (*dynamodb.PutItemOutput, error) { + return m.putItem(input), nil +} + +func (m *mockDynamodb) BatchWriteItemWithContext(_ context.Context, input *dynamodb.BatchWriteItemInput, _ ...request.Option) (*dynamodb.BatchWriteItemOutput, error) { + return m.batchWriteItem(input) +} + +func newDynamodbClientMock(tableName string, mock *mockDynamodb, ttl time.Duration) *dynamodbKV { + ddbKV := &dynamodbKV{ + ddbClient: mock, + logger: TestLogger{}, + tableName: aws.String(tableName), + ttlValue: ttl, + } + + return ddbKV +} diff --git a/pkg/ring/kv/dynamodb/metrics.go b/pkg/ring/kv/dynamodb/metrics.go new file mode 100644 index 00000000000..d47f2fe3929 --- /dev/null +++ b/pkg/ring/kv/dynamodb/metrics.go @@ -0,0 +1,99 @@ +package dynamodb + +import ( + "context" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + grpcUtils "github.com/weaveworks/common/grpc" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/instrument" +) + +type dynamodbInstrumentation struct { + kv dynamodbKV + ddbMetrics *dynamodbMetrics +} + +type dynamodbMetrics struct { + dynamodbRequestDuration *instrument.HistogramCollector + dynamodbUsageMetrics *prometheus.CounterVec +} + +func newDynamoDbMetrics(registerer prometheus.Registerer) *dynamodbMetrics { + dynamodbRequestDurationCollector := instrument.NewHistogramCollector(promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ + Name: "dynamodb_kv_request_duration_seconds", + Help: "Time spent on dynamodb requests.", + Buckets: prometheus.DefBuckets, + }, []string{"operation", "status_code"})) + + dynamodbUsageMetrics := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "dynamodb_kv_read_capacity_total", + Help: "Total used read capacity on dynamodb", + }, []string{"operation"}) + + dynamodbMetrics := dynamodbMetrics{ + dynamodbRequestDuration: dynamodbRequestDurationCollector, + dynamodbUsageMetrics: dynamodbUsageMetrics, + } + return &dynamodbMetrics +} + +func (d dynamodbInstrumentation) List(ctx context.Context, key dynamodbKey) ([]string, float64, error) { + var resp []string + var totalCapacity float64 + err := instrument.CollectedRequest(ctx, "List", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + var err error + resp, totalCapacity, err = d.kv.List(ctx, key) + return err + }) + d.ddbMetrics.dynamodbUsageMetrics.WithLabelValues("List").Add(totalCapacity) + return resp, totalCapacity, err +} + +func (d dynamodbInstrumentation) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { + var resp map[string][]byte + var totalCapacity float64 + err := instrument.CollectedRequest(ctx, "Query", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + var err error + resp, totalCapacity, err = d.kv.Query(ctx, key, isPrefix) + return err + }) + d.ddbMetrics.dynamodbUsageMetrics.WithLabelValues("Query").Add(totalCapacity) + return resp, totalCapacity, err +} + +func (d dynamodbInstrumentation) Delete(ctx context.Context, key dynamodbKey) error { + return instrument.CollectedRequest(ctx, "Delete", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + return d.kv.Delete(ctx, key) + }) +} + +func (d dynamodbInstrumentation) Put(ctx context.Context, key dynamodbKey, data []byte) error { + return instrument.CollectedRequest(ctx, "Put", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + return d.kv.Put(ctx, key, data) + }) +} + +func (d dynamodbInstrumentation) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { + return instrument.CollectedRequest(ctx, "Batch", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + return d.kv.Batch(ctx, put, delete) + }) +} + +// errorCode converts an error into an error code string. +func errorCode(err error) string { + if err == nil { + return "2xx" + } + + if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { + statusFamily := int(errResp.Code / 100) + return strconv.Itoa(statusFamily) + "xx" + } else if grpcUtils.IsCanceled(err) { + return "cancel" + } else { + return "error" + } +} diff --git a/pkg/ring/kv/etcd/etcd.go b/pkg/ring/kv/etcd/etcd.go index 3cea7a0cc99..591845fe884 100644 --- a/pkg/ring/kv/etcd/etcd.go +++ b/pkg/ring/kv/etcd/etcd.go @@ -298,3 +298,7 @@ func (c *Client) Delete(ctx context.Context, key string) error { _, err := c.cli.Delete(ctx, key) return err } + +func (c *Client) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} diff --git a/pkg/ring/kv/memberlist/memberlist_client.go b/pkg/ring/kv/memberlist/memberlist_client.go index db38617abef..6b1e1744d15 100644 --- a/pkg/ring/kv/memberlist/memberlist_client.go +++ b/pkg/ring/kv/memberlist/memberlist_client.go @@ -105,6 +105,10 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, c.kv.WatchPrefix(ctx, prefix, c.codec, f) } +func (c *Client) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} + // We want to use KV in Running and Stopping states. func (c *Client) awaitKVRunningOrStopping(ctx context.Context) error { s := c.kv.State() @@ -314,7 +318,7 @@ type valueDesc struct { func (v valueDesc) Clone() (result valueDesc) { result = v if v.value != nil { - result.value = v.value.Clone() + result.value = v.value.Clone().(Mergeable) } return } @@ -1253,7 +1257,7 @@ func (m *KV) mergeValueForKey(key string, incomingValue Mergeable, casVersion ui // The "changes" returned by Merge() can contain references to the "result" // state. Therefore, make sure we clone it before releasing the lock. - change = change.Clone() + change = change.Clone().(Mergeable) return change, newVersion, nil } diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index 0a66b52c7b0..39c4836f801 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -111,7 +111,7 @@ func (m member) clone() member { return out } -func (d *data) Clone() Mergeable { +func (d *data) Clone() interface{} { out := &data{ Members: make(map[string]member, len(d.Members)), } @@ -144,6 +144,14 @@ func (d dataCodec) Decode(b []byte) (interface{}, error) { return out, err } +func (d dataCodec) DecodeMultiKey(map[string][]byte) (interface{}, error) { + return nil, errors.New("dataCodec does not support DecodeMultiKey") +} + +func (d dataCodec) EncodeMultiKey(interface{}) (map[string][]byte, error) { + return nil, errors.New("dataCodec does not support EncodeMultiKey") +} + func (d dataCodec) Encode(val interface{}) ([]byte, error) { buf := bytes.Buffer{} enc := gob.NewEncoder(&buf) @@ -937,7 +945,7 @@ func (dc distributedCounter) RemoveTombstones(limit time.Time) (_, _ int) { return } -func (dc distributedCounter) Clone() Mergeable { +func (dc distributedCounter) Clone() interface{} { out := make(distributedCounter, len(dc)) for k, v := range dc { out[k] = v @@ -965,6 +973,14 @@ func (d distributedCounterCodec) Encode(val interface{}) ([]byte, error) { return buf.Bytes(), err } +func (d distributedCounterCodec) DecodeMultiKey(map[string][]byte) (interface{}, error) { + return nil, errors.New("distributedCounterCodec does not support DecodeMultiKey") +} + +func (d distributedCounterCodec) EncodeMultiKey(interface{}) (map[string][]byte, error) { + return nil, errors.New("distributedCounterCodec does not support EncodeMultiKey") +} + var _ codec.Codec = &distributedCounterCodec{} func TestMultipleCodecs(t *testing.T) { diff --git a/pkg/ring/kv/memberlist/mergeable.go b/pkg/ring/kv/memberlist/mergeable.go index 2c02acfa468..f2120c1d920 100644 --- a/pkg/ring/kv/memberlist/mergeable.go +++ b/pkg/ring/kv/memberlist/mergeable.go @@ -1,10 +1,16 @@ package memberlist -import "time" +import ( + "time" + + "github.com/cortexproject/cortex/pkg/ring/kv/codec" +) // Mergeable is an interface that values used in gossiping KV Client must implement. // It allows merging of different states, obtained via gossiping or CAS function. type Mergeable interface { + codec.Clonable + // Merge with other value in place. Returns change, that can be sent to other clients. // If merge doesn't result in any change, returns nil. // Error can be returned if merging with given 'other' value is not possible. @@ -35,17 +41,14 @@ type Mergeable interface { // used when doing CAS operation) Merge(other Mergeable, localCAS bool) (change Mergeable, error error) - // Describes the content of this mergeable value. Used by memberlist client to decide if + // MergeContent Describes the content of this mergeable value. Used by memberlist client to decide if // one change-value can invalidate some other value, that was received previously. // Invalidation can happen only if output of MergeContent is a superset of some other MergeContent. MergeContent() []string - // Remove tombstones older than given limit from this mergeable. + // RemoveTombstones Remove tombstones older than given limit from this mergeable. // If limit is zero time, remove all tombstones. Memberlist client calls this method with zero limit each // time when client is accessing value from the store. It can be used to hide tombstones from the clients. // Returns the total number of tombstones present and the number of removed tombstones by this invocation. RemoveTombstones(limit time.Time) (total, removed int) - - // Clone should return a deep copy of the state. - Clone() Mergeable } diff --git a/pkg/ring/kv/metrics.go b/pkg/ring/kv/metrics.go index 66fe9fa91be..38ec3b59cd6 100644 --- a/pkg/ring/kv/metrics.go +++ b/pkg/ring/kv/metrics.go @@ -3,6 +3,7 @@ package kv import ( "context" "strconv" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -106,3 +107,7 @@ func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, return nil }) } + +func (m metrics) LastUpdateTime(key string) time.Time { + return m.c.LastUpdateTime(key) +} diff --git a/pkg/ring/kv/mock.go b/pkg/ring/kv/mock.go index 5b446d854d6..cbe23106a8e 100644 --- a/pkg/ring/kv/mock.go +++ b/pkg/ring/kv/mock.go @@ -2,6 +2,7 @@ package kv import ( "context" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -37,3 +38,7 @@ func (m mockClient) WatchKey(ctx context.Context, key string, f func(interface{} func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { } + +func (m mockClient) LastUpdateTime(key string) time.Time { + return time.Now().UTC() +} diff --git a/pkg/ring/kv/multi.go b/pkg/ring/kv/multi.go index 8a3382e9859..e4ac994d769 100644 --- a/pkg/ring/kv/multi.go +++ b/pkg/ring/kv/multi.go @@ -335,6 +335,11 @@ func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(str }) } +func (m *MultiClient) LastUpdateTime(key string) time.Time { + _, kv := m.getPrimaryClient() + return kv.client.LastUpdateTime(key) +} + func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue interface{}) { if m.mirrorTimeout > 0 { var cfn context.CancelFunc diff --git a/pkg/ring/kv/prefix.go b/pkg/ring/kv/prefix.go index 5775cf17b94..aba9b7a092c 100644 --- a/pkg/ring/kv/prefix.go +++ b/pkg/ring/kv/prefix.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strings" + "time" ) type prefixedKVClient struct { @@ -61,3 +62,7 @@ func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, er func (c *prefixedKVClient) Delete(ctx context.Context, key string) error { return c.client.Delete(ctx, c.prefix+key) } + +func (c *prefixedKVClient) LastUpdateTime(key string) time.Time { + return c.client.LastUpdateTime(c.prefix + key) +} diff --git a/pkg/ring/lifecycler.go b/pkg/ring/lifecycler.go index f49a5b65dd0..1520356a142 100644 --- a/pkg/ring/lifecycler.go +++ b/pkg/ring/lifecycler.go @@ -247,7 +247,7 @@ func (i *Lifecycler) checkRingHealthForReadiness(ctx context.Context) error { } if i.cfg.ReadinessCheckRingHealth { - if err := ringDesc.IsReady(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil { + if err := ringDesc.IsReady(i.KVStore.LastUpdateTime(i.RingKey), i.cfg.RingConfig.HeartbeatTimeout); err != nil { level.Warn(i.logger).Log("msg", "found an existing instance(s) with a problem in the ring, "+ "this instance cannot become ready until this problem is resolved. "+ "The /ring http endpoint on the distributor (or single binary) provides visibility into the ring.", @@ -260,7 +260,7 @@ func (i *Lifecycler) checkRingHealthForReadiness(ctx context.Context) error { return fmt.Errorf("instance %s not found in the ring", i.ID) } - if err := instance.IsReady(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil { + if err := instance.IsReady(i.KVStore.LastUpdateTime(i.RingKey), i.cfg.RingConfig.HeartbeatTimeout); err != nil { return err } } @@ -818,13 +818,13 @@ func (i *Lifecycler) updateCounters(ringDesc *Desc) { zones := map[string]struct{}{} if ringDesc != nil { - now := time.Now() + lastUpdated := i.KVStore.LastUpdateTime(i.RingKey) for _, ingester := range ringDesc.Ingesters { zones[ingester.Zone] = struct{}{} // Count the number of healthy instances for Write operation. - if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout, now) { + if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout, lastUpdated) { healthyInstancesCount++ } } diff --git a/pkg/ring/lifecycler_test.go b/pkg/ring/lifecycler_test.go index f19a21f1b19..033ddbcc6df 100644 --- a/pkg/ring/lifecycler_test.go +++ b/pkg/ring/lifecycler_test.go @@ -389,6 +389,10 @@ func (m *MockClient) WatchPrefix(ctx context.Context, prefix string, f func(stri } } +func (m *MockClient) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} + // Ensure a check ready returns error when consul returns a nil key and the ingester already holds keys. This happens if the ring key gets deleted func TestCheckReady_NoRingInKVStore(t *testing.T) { ctx := context.Background() diff --git a/pkg/ring/model.go b/pkg/ring/model.go index 8b07cb09d6e..88fcdd2ade6 100644 --- a/pkg/ring/model.go +++ b/pkg/ring/model.go @@ -100,10 +100,10 @@ func (d *Desc) FindIngestersByState(state InstanceState) []InstanceDesc { // IsReady returns no error when all instance are ACTIVE and healthy, // and the ring has some tokens. -func (d *Desc) IsReady(now time.Time, heartbeatTimeout time.Duration) error { +func (d *Desc) IsReady(storageLastUpdated time.Time, heartbeatTimeout time.Duration) error { numTokens := 0 for _, instance := range d.Ingesters { - if err := instance.IsReady(now, heartbeatTimeout); err != nil { + if err := instance.IsReady(storageLastUpdated, heartbeatTimeout); err != nil { return err } numTokens += len(instance.Tokens) @@ -133,24 +133,24 @@ func (i *InstanceDesc) GetRegisteredAt() time.Time { return time.Unix(i.RegisteredTimestamp, 0) } -func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool { +func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, storageLastUpdated time.Time) bool { healthy := op.IsInstanceInStateHealthy(i.State) - return healthy && i.IsHeartbeatHealthy(heartbeatTimeout, now) + return healthy && i.IsHeartbeatHealthy(heartbeatTimeout, storageLastUpdated) } // IsHeartbeatHealthy returns whether the heartbeat timestamp for the ingester is within the // specified timeout period. A timeout of zero disables the timeout; the heartbeat is ignored. -func (i *InstanceDesc) IsHeartbeatHealthy(heartbeatTimeout time.Duration, now time.Time) bool { +func (i *InstanceDesc) IsHeartbeatHealthy(heartbeatTimeout time.Duration, storageLastUpdated time.Time) bool { if heartbeatTimeout == 0 { return true } - return now.Sub(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout + return storageLastUpdated.Sub(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout } // IsReady returns no error if the instance is ACTIVE and healthy. -func (i *InstanceDesc) IsReady(now time.Time, heartbeatTimeout time.Duration) error { - if !i.IsHeartbeatHealthy(heartbeatTimeout, now) { +func (i *InstanceDesc) IsReady(storageLastUpdated time.Time, heartbeatTimeout time.Duration) error { + if !i.IsHeartbeatHealthy(heartbeatTimeout, storageLastUpdated) { return fmt.Errorf("instance %s past heartbeat timeout", i.Addr) } if i.State != ACTIVE { @@ -439,7 +439,7 @@ func (d *Desc) RemoveTombstones(limit time.Time) (total, removed int) { } // Clone returns a deep copy of the ring state. -func (d *Desc) Clone() memberlist.Mergeable { +func (d *Desc) Clone() interface{} { return proto.Clone(d).(*Desc) } @@ -649,3 +649,91 @@ func MergeTokensByZone(zones map[string][][]uint32) map[string][]uint32 { } return out } + +func (d *Desc) SplitByID() map[string]interface{} { + out := make(map[string]interface{}, len(d.Ingesters)) + for key := range d.Ingesters { + in := d.Ingesters[key] + out[key] = &in + } + return out +} + +func (d *Desc) JoinIds(in map[string]interface{}) { + for key, value := range in { + d.Ingesters[key] = *(value.(*InstanceDesc)) + } +} + +func (d *Desc) GetItemFactory() proto.Message { + return &InstanceDesc{} +} + +func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) { + out, ok := o.(*Desc) + if !ok { + // This method only deals with non-nil rings. + return nil, nil, fmt.Errorf("expected *ring.Desc, got %T", out) + } + + toUpdated := NewDesc() + toDelete := make([]string, 0) + tokensChanged := false + + // If both are null + if d == nil && out == nil { + return toUpdated, toDelete, nil + } + + // If new data is empty + if out == nil { + for k := range d.Ingesters { + toDelete = append(toDelete, k) + } + return toUpdated, toDelete, nil + } + + //If existent data is empty + if d == nil { + for key, value := range out.Ingesters { + toUpdated.Ingesters[key] = value + } + return toUpdated, toDelete, nil + } + + //If new added + for name, oing := range out.Ingesters { + if _, ok := d.Ingesters[name]; !ok { + tokensChanged = true + toUpdated.Ingesters[name] = oing + } + } + + // If removed or updated + for name, ing := range d.Ingesters { + oing, ok := out.Ingesters[name] + if !ok { + toDelete = append(toDelete, name) + } else if !ing.Equal(oing) { + if !tokensEqual(ing.Tokens, oing.Tokens) { + tokensChanged = true + } + toUpdated.Ingesters[name] = oing + } + } + + // resolveConflicts allocates a lot of memory, so if we can avoid it, do that. + if tokensChanged && conflictingTokensExist(out.Ingesters) { + resolveConflicts(out.Ingesters) + + //Recheck if any instance was updated by the resolveConflict + for name, oing := range out.Ingesters { + ing, ok := d.Ingesters[name] + if !ok || !ing.Equal(oing) { + toUpdated.Ingesters[name] = oing + } + } + } + + return toUpdated, toDelete, nil +} diff --git a/pkg/ring/model_test.go b/pkg/ring/model_test.go index ae39b1ba031..f20eb411755 100644 --- a/pkg/ring/model_test.go +++ b/pkg/ring/model_test.go @@ -416,3 +416,187 @@ func TestMergeTokensByZone(t *testing.T) { }) } } + +func TestDesc_SplitById_JoinIds(t *testing.T) { + tests := map[string]struct { + ring *Desc + split map[string]interface{} + }{ + "empty ring": { + ring: &Desc{Ingesters: map[string]InstanceDesc{}}, + split: map[string]interface{}{}, + }, + "single instance": { + ring: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}}, + split: map[string]interface{}{"ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}, + }, + "two instances": { + ring: &Desc{Ingesters: map[string]InstanceDesc{ + "ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}, + "ing2": {Addr: "addr2", Tokens: []uint32{3, 4, 5}, Timestamp: 5678, State: ACTIVE, Zone: "zone2", RegisteredTimestamp: 567}, + }}, + split: map[string]interface{}{ + "ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}, + "ing2": &InstanceDesc{Addr: "addr2", Tokens: []uint32{3, 4, 5}, Timestamp: 5678, State: ACTIVE, Zone: "zone2", RegisteredTimestamp: 567}, + }, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + assert.Equal(t, testData.split, testData.ring.SplitByID(), "Error on SplitById") + + r := NewDesc() + r.JoinIds(testData.split) + assert.Equal(t, testData.ring, r, "Error on JoinIds") + }) + } +} + +func TestDesc_EncodingMultikey(t *testing.T) { + codec := GetCodec() + ring := &Desc{Ingesters: map[string]InstanceDesc{ + "ing1": {Addr: "addr1", Timestamp: 123456}, + "ing2": {Addr: "addr2", Timestamp: 5678}, + "ing3": {}, + }} + encoded, err := codec.EncodeMultiKey(ring) + assert.NoError(t, err) + decoded, err := codec.DecodeMultiKey(encoded) + assert.NoError(t, err) + + assert.Equal(t, ring, decoded) +} + +func TestDesc_FindDifference(t *testing.T) { + tests := map[string]struct { + r1 *Desc + r2 *Desc + toUpdate interface{} + toDelete interface{} + }{ + "nil rings": { + r1: nil, + r2: nil, + toUpdate: NewDesc(), + toDelete: []string{}, + }, + "one nil, one empty ring": { + r1: nil, + r2: &Desc{Ingesters: map[string]InstanceDesc{}}, + toUpdate: NewDesc(), + toDelete: []string{}, + }, + "one empty, one nil": { + r1: &Desc{Ingesters: map[string]InstanceDesc{}}, + r2: nil, + toUpdate: NewDesc(), + toDelete: []string{}, + }, + "two empty rings": { + r1: &Desc{Ingesters: map[string]InstanceDesc{}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{}}, + toUpdate: NewDesc(), + toDelete: []string{}, + }, + "same single instance": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + toUpdate: NewDesc(), + toDelete: []string{}, + }, + "one nil, single instance": { + r1: nil, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + toDelete: []string{}, + }, + "one instance, nil": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + r2: nil, + toUpdate: NewDesc(), + toDelete: []string{"ing1"}, + }, + "same single instance, different timestamp": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Timestamp: 123456}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Timestamp: 789012}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Timestamp: 789012}}}, + toDelete: []string{}, + }, + "same single instance, different state": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", State: ACTIVE}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", State: JOINING}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", State: JOINING}}}, + toDelete: []string{}, + }, + "same single instance, different registered timestamp": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", State: ACTIVE, RegisteredTimestamp: 1}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", State: ACTIVE, RegisteredTimestamp: 2}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", State: ACTIVE, RegisteredTimestamp: 2}}}, + toDelete: []string{}, + }, + "instance in different zone": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Zone: "one"}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Zone: "two"}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Zone: "two"}}}, + toDelete: []string{}, + }, + "same instance, different address": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr2"}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr2"}}}, + toDelete: []string{}, + }, + "more instances in one ring": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}, "ing2": {Addr: "ing2"}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + toUpdate: NewDesc(), + toDelete: []string{"ing2"}, + }, + "more instances in second ring": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}, "ing2": {Addr: "ing2"}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing2": {Addr: "ing2"}}}, + toDelete: []string{}, + }, + "different tokens": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1"}}}, + toDelete: []string{}, + }, + "different tokens 2": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 4}}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 4}}}}, + toDelete: []string{}, + }, + "different instances, conflictTokens new lose": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}, "ing2": {Addr: "addr1", Tokens: []uint32{1, 2, 4}}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing2": {Addr: "addr1", Tokens: []uint32{4}}}}, + toDelete: []string{}, + }, + "different instances, conflictTokens new win": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing5": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing5": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}, "ing2": {Addr: "addr1", Tokens: []uint32{1, 2, 4}}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing2": {Addr: "addr1", Tokens: []uint32{1, 2, 4}}, "ing5": {Addr: "addr1", Tokens: []uint32{3}}}}, + toDelete: []string{}, + }, + "same number of instances, using different IDs": { + r1: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}}}, + r2: &Desc{Ingesters: map[string]InstanceDesc{"ing2": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}}}, + toUpdate: &Desc{Ingesters: map[string]InstanceDesc{"ing2": {Addr: "addr1", Tokens: []uint32{1, 2, 3}}}}, + toDelete: []string{"ing1"}, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + toUpdate, toDelete, err := testData.r1.FindDifference(testData.r2) + assert.Equal(t, testData.toUpdate, toUpdate) + assert.Equal(t, testData.toDelete, toDelete) + assert.NoError(t, err) + }) + } +} diff --git a/pkg/ring/replication_strategy.go b/pkg/ring/replication_strategy.go index 32eff1384a5..df57448a8a1 100644 --- a/pkg/ring/replication_strategy.go +++ b/pkg/ring/replication_strategy.go @@ -10,7 +10,7 @@ type ReplicationStrategy interface { // Filter out unhealthy instances and checks if there're enough instances // for an operation to succeed. Returns an error if there are not enough // instances. - Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []InstanceDesc, maxFailures int, err error) + Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool, storageLastUpdate time.Time) (healthy []InstanceDesc, maxFailures int, err error) } type defaultReplicationStrategy struct{} @@ -25,13 +25,11 @@ func NewDefaultReplicationStrategy() ReplicationStrategy { // - Filters out unhealthy instances so the one doesn't even try to write to them. // - Checks there are enough instances for an operation to succeed. // The instances argument may be overwritten. -func (s *defaultReplicationStrategy) Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]InstanceDesc, int, error) { - now := time.Now() - +func (s *defaultReplicationStrategy) Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool, storageLastUpdate time.Time) ([]InstanceDesc, int, error) { // Skip those that have not heartbeated in a while. var unhealthy []string for i := 0; i < len(instances); { - if instances[i].IsHealthy(op, heartbeatTimeout, now) { + if instances[i].IsHealthy(op, heartbeatTimeout, storageLastUpdate) { i++ } else { unhealthy = append(unhealthy, instances[i].Addr) @@ -74,12 +72,11 @@ func NewIgnoreUnhealthyInstancesReplicationStrategy() ReplicationStrategy { return &ignoreUnhealthyInstancesReplicationStrategy{} } -func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []InstanceDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []InstanceDesc, maxFailures int, err error) { - now := time.Now() +func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []InstanceDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool, storageLastUpdate time.Time) (healthy []InstanceDesc, maxFailures int, err error) { // Filter out unhealthy instances. var unhealthy []string for i := 0; i < len(instances); { - if instances[i].IsHealthy(op, heartbeatTimeout, now) { + if instances[i].IsHealthy(op, heartbeatTimeout, storageLastUpdate) { i++ } else { unhealthy = append(unhealthy, instances[i].Addr) @@ -99,8 +96,8 @@ func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []Instanc return instances, len(instances) - 1, nil } -func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, now time.Time) bool { - return instance.IsHealthy(op, r.cfg.HeartbeatTimeout, now) +func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, storageLastUpdate time.Time) bool { + return instance.IsHealthy(op, r.cfg.HeartbeatTimeout, storageLastUpdate) } // ReplicationFactor of the ring. diff --git a/pkg/ring/replication_strategy_test.go b/pkg/ring/replication_strategy_test.go index 75af12671ce..c7b9d556b9e 100644 --- a/pkg/ring/replication_strategy_test.go +++ b/pkg/ring/replication_strategy_test.go @@ -90,7 +90,7 @@ func TestRingReplicationStrategy(t *testing.T) { t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { strategy := NewDefaultReplicationStrategy() - liveIngesters, maxFailure, err := strategy.Filter(ingesters, Read, tc.replicationFactor, 100*time.Second, false) + liveIngesters, maxFailure, err := strategy.Filter(ingesters, Read, tc.replicationFactor, 100*time.Second, false, time.Now().UTC()) if tc.expectedError == "" { assert.NoError(t, err) assert.Equal(t, tc.liveIngesters, len(liveIngesters)) @@ -152,7 +152,7 @@ func TestIgnoreUnhealthyInstancesReplicationStrategy(t *testing.T) { t.Run(tc.name, func(t *testing.T) { strategy := NewIgnoreUnhealthyInstancesReplicationStrategy() - liveIngesters, maxFailure, err := strategy.Filter(ingesters, Read, 3, 100*time.Second, false) + liveIngesters, maxFailure, err := strategy.Filter(ingesters, Read, 3, 100*time.Second, false, time.Now().UTC()) if tc.expectedError == "" { assert.NoError(t, err) assert.Equal(t, tc.liveIngesters, len(liveIngesters)) diff --git a/pkg/ring/ring.go b/pkg/ring/ring.go index 51bc6231f08..d12b579ac08 100644 --- a/pkg/ring/ring.go +++ b/pkg/ring/ring.go @@ -395,7 +395,7 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, instances = append(instances, instance) } - healthyInstances, maxFailure, err := r.strategy.Filter(instances, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled) + healthyInstances, maxFailure, err := r.strategy.Filter(instances, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled, r.KVClient.LastUpdateTime(r.key)) if err != nil { return ReplicationSet{}, err } @@ -415,10 +415,10 @@ func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) { return ReplicationSet{}, ErrEmptyRing } - now := time.Now() + storageLastUpdate := r.KVClient.LastUpdateTime(r.key) instances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) for _, instance := range r.ringDesc.Ingesters { - if r.IsHealthy(&instance, op, now) { + if r.IsHealthy(&instance, op, storageLastUpdate) { instances = append(instances, instance) } } @@ -441,10 +441,10 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro // Build the initial replication set, excluding unhealthy instances. healthyInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) zoneFailures := make(map[string]struct{}) - now := time.Now() + storageLastUpdate := r.KVClient.LastUpdateTime(r.key) for _, instance := range r.ringDesc.Ingesters { - if r.IsHealthy(&instance, op, now) { + if r.IsHealthy(&instance, op, storageLastUpdate) { healthyInstances = append(healthyInstances, instance) } else { zoneFailures[instance.Zone] = struct{}{} @@ -559,7 +559,7 @@ func (r *Ring) updateRingMetrics(compareResult CompareResult) { for _, instance := range r.ringDesc.Ingesters { s := instance.State.String() - if !r.IsHealthy(&instance, Reporting, time.Now()) { + if !r.IsHealthy(&instance, Reporting, r.KVClient.LastUpdateTime(r.key)) { s = unhealthy } numByState[s]++ @@ -744,6 +744,7 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur ringTokens: shardDesc.GetTokens(), ringTokensByZone: shardTokensByZone, ringZones: getZones(shardTokensByZone), + KVClient: r.KVClient, // We reference the original map as is in order to avoid copying. It's safe to do // because this map is immutable by design and it's a superset of the actual instances diff --git a/pkg/ring/ring_test.go b/pkg/ring/ring_test.go index 1836e5f2cb9..9964e387d98 100644 --- a/pkg/ring/ring_test.go +++ b/pkg/ring/ring_test.go @@ -108,7 +108,7 @@ func benchmarkUpdateRingState(b *testing.B, numInstances, numZones, numTokens in // create the ring to set up metrics, but do not start registry := prometheus.NewRegistry() - ring, err := NewWithStoreClientAndStrategy(cfg, testRingName, testRingKey, nil, NewDefaultReplicationStrategy(), registry, log.NewNopLogger()) + ring, err := NewWithStoreClientAndStrategy(cfg, testRingName, testRingKey, &MockClient{}, NewDefaultReplicationStrategy(), registry, log.NewNopLogger()) require.NoError(b, err) // Make a random ring with N instances, and M tokens per ingests @@ -257,6 +257,7 @@ func TestRing_Get_ZoneAwarenessWithIngesterLeaving(t *testing.T) { ringInstanceByToken: r.getTokensInfo(), ringZones: getZones(r.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } _, bufHosts, bufZones := MakeBuffersForGet() @@ -349,6 +350,7 @@ func TestRing_Get_ZoneAwareness(t *testing.T) { ringInstanceByToken: r.getTokensInfo(), ringZones: getZones(r.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } instances := make([]InstanceDesc, 0, len(r.GetIngesters())) @@ -443,6 +445,7 @@ func TestRing_GetAllHealthy(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } set, err := ring.GetAllHealthy(Read) @@ -573,6 +576,7 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } set, err := ring.GetReplicationSetForOperation(Read) @@ -891,6 +895,7 @@ func TestRing_GetReplicationSetForOperation_WithZoneAwarenessEnabled(t *testing. ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } // Check the replication set has the correct settings @@ -1027,6 +1032,7 @@ func TestRing_ShuffleShard(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } shardRing := ring.ShuffleShard("tenant-id", testData.shardSize) @@ -1079,6 +1085,7 @@ func TestRing_ShuffleShard_Stability(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } for i := 1; i <= numTenants; i++ { @@ -1147,6 +1154,7 @@ func TestRing_ShuffleShard_Shuffling(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } // Compute the shard for each tenant. @@ -1246,6 +1254,7 @@ func TestRing_ShuffleShard_Consistency(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } // Compute the initial shard for each tenant. @@ -1310,6 +1319,7 @@ func TestRing_ShuffleShard_ConsistencyOnShardSizeChanged(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } // Get the replication set with shard size = 3. @@ -1387,6 +1397,7 @@ func TestRing_ShuffleShard_ConsistencyOnZonesChanged(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } // Get the replication set with shard size = 2. @@ -1646,6 +1657,7 @@ func TestRing_ShuffleShardWithLookback(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } // Replay the events on the timeline. @@ -1711,6 +1723,7 @@ func TestRing_ShuffleShardWithLookback_CorrectnessWithFuzzy(t *testing.T) { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } // The simulation starts with the minimum shard size. Random events can later increase it. @@ -1865,6 +1878,7 @@ func benchmarkShuffleSharding(b *testing.B, numInstances, numZones, numTokens, s shuffledSubringCache: map[subringCacheKey]*Ring{}, strategy: NewDefaultReplicationStrategy(), lastTopologyChange: time.Now(), + KVClient: &MockClient{}, } b.ResetTimer() @@ -1893,6 +1907,7 @@ func BenchmarkRing_Get(b *testing.B) { shuffledSubringCache: map[subringCacheKey]*Ring{}, strategy: NewDefaultReplicationStrategy(), lastTopologyChange: time.Now(), + KVClient: &MockClient{}, } buf, bufHosts, bufZones := MakeBuffersForGet() @@ -1921,6 +1936,7 @@ func TestRing_Get_NoMemoryAllocations(t *testing.T) { shuffledSubringCache: map[subringCacheKey]*Ring{}, strategy: NewDefaultReplicationStrategy(), lastTopologyChange: time.Now(), + KVClient: &MockClient{}, } buf, bufHosts, bufZones := MakeBuffersForGet() @@ -2235,7 +2251,7 @@ func TestUpdateMetrics(t *testing.T) { registry := prometheus.NewRegistry() // create the ring to set up metrics, but do not start - ring, err := NewWithStoreClientAndStrategy(cfg, testRingName, testRingKey, nil, NewDefaultReplicationStrategy(), registry, log.NewNopLogger()) + ring, err := NewWithStoreClientAndStrategy(cfg, testRingName, testRingKey, &MockClient{}, NewDefaultReplicationStrategy(), registry, log.NewNopLogger()) require.NoError(t, err) ringDesc := Desc{ @@ -2287,7 +2303,7 @@ func TestUpdateMetricsWithRemoval(t *testing.T) { registry := prometheus.NewRegistry() // create the ring to set up metrics, but do not start - ring, err := NewWithStoreClientAndStrategy(cfg, testRingName, testRingKey, nil, NewDefaultReplicationStrategy(), registry, log.NewNopLogger()) + ring, err := NewWithStoreClientAndStrategy(cfg, testRingName, testRingKey, &MockClient{}, NewDefaultReplicationStrategy(), registry, log.NewNopLogger()) require.NoError(t, err) ringDesc := Desc{ diff --git a/pkg/ring/util_test.go b/pkg/ring/util_test.go index 8a4563e5dcb..73a013862a1 100644 --- a/pkg/ring/util_test.go +++ b/pkg/ring/util_test.go @@ -114,6 +114,7 @@ func createStartingRing() *Ring { ringInstanceByToken: ringDesc.getTokensInfo(), ringZones: getZones(ringDesc.getTokensByZone()), strategy: NewDefaultReplicationStrategy(), + KVClient: &MockClient{}, } return ring diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go new file mode 100644 index 00000000000..c07f6731ea7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go @@ -0,0 +1,122 @@ +package crr + +import ( + "sync/atomic" +) + +// EndpointCache is an LRU cache that holds a series of endpoints +// based on some key. The datastructure makes use of a read write +// mutex to enable asynchronous use. +type EndpointCache struct { + endpoints syncMap + endpointLimit int64 + // size is used to count the number elements in the cache. + // The atomic package is used to ensure this size is accurate when + // using multiple goroutines. + size int64 +} + +// NewEndpointCache will return a newly initialized cache with a limit +// of endpointLimit entries. +func NewEndpointCache(endpointLimit int64) *EndpointCache { + return &EndpointCache{ + endpointLimit: endpointLimit, + endpoints: newSyncMap(), + } +} + +// get is a concurrent safe get operation that will retrieve an endpoint +// based on endpointKey. A boolean will also be returned to illustrate whether +// or not the endpoint had been found. +func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) { + endpoint, ok := c.endpoints.Load(endpointKey) + if !ok { + return Endpoint{}, false + } + + ev := endpoint.(Endpoint) + ev.Prune() + + c.endpoints.Store(endpointKey, ev) + return endpoint.(Endpoint), true +} + +// Has returns if the enpoint cache contains a valid entry for the endpoint key +// provided. +func (c *EndpointCache) Has(endpointKey string) bool { + endpoint, ok := c.get(endpointKey) + _, found := endpoint.GetValidAddress() + + return ok && found +} + +// Get will retrieve a weighted address based off of the endpoint key. If an endpoint +// should be retrieved, due to not existing or the current endpoint has expired +// the Discoverer object that was passed in will attempt to discover a new endpoint +// and add that to the cache. +func (c *EndpointCache) Get(d Discoverer, endpointKey string, required bool) (WeightedAddress, error) { + var err error + endpoint, ok := c.get(endpointKey) + weighted, found := endpoint.GetValidAddress() + shouldGet := !ok || !found + + if required && shouldGet { + if endpoint, err = c.discover(d, endpointKey); err != nil { + return WeightedAddress{}, err + } + + weighted, _ = endpoint.GetValidAddress() + } else if shouldGet { + go c.discover(d, endpointKey) + } + + return weighted, nil +} + +// Add is a concurrent safe operation that will allow new endpoints to be added +// to the cache. If the cache is full, the number of endpoints equal endpointLimit, +// then this will remove the oldest entry before adding the new endpoint. +func (c *EndpointCache) Add(endpoint Endpoint) { + // de-dups multiple adds of an endpoint with a pre-existing key + if iface, ok := c.endpoints.Load(endpoint.Key); ok { + e := iface.(Endpoint) + if e.Len() > 0 { + return + } + } + c.endpoints.Store(endpoint.Key, endpoint) + + size := atomic.AddInt64(&c.size, 1) + if size > 0 && size > c.endpointLimit { + c.deleteRandomKey() + } +} + +// deleteRandomKey will delete a random key from the cache. If +// no key was deleted false will be returned. +func (c *EndpointCache) deleteRandomKey() bool { + atomic.AddInt64(&c.size, -1) + found := false + + c.endpoints.Range(func(key, value interface{}) bool { + found = true + c.endpoints.Delete(key) + + return false + }) + + return found +} + +// discover will get and store and endpoint using the Discoverer. +func (c *EndpointCache) discover(d Discoverer, endpointKey string) (Endpoint, error) { + endpoint, err := d.Discover() + if err != nil { + return Endpoint{}, err + } + + endpoint.Key = endpointKey + c.Add(endpoint) + + return endpoint, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go new file mode 100644 index 00000000000..2b088bdbc74 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go @@ -0,0 +1,132 @@ +package crr + +import ( + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +// Endpoint represents an endpoint used in endpoint discovery. +type Endpoint struct { + Key string + Addresses WeightedAddresses +} + +// WeightedAddresses represents a list of WeightedAddress. +type WeightedAddresses []WeightedAddress + +// WeightedAddress represents an address with a given weight. +type WeightedAddress struct { + URL *url.URL + Expired time.Time +} + +// HasExpired will return whether or not the endpoint has expired with +// the exception of a zero expiry meaning does not expire. +func (e WeightedAddress) HasExpired() bool { + return e.Expired.Before(time.Now()) +} + +// Add will add a given WeightedAddress to the address list of Endpoint. +func (e *Endpoint) Add(addr WeightedAddress) { + e.Addresses = append(e.Addresses, addr) +} + +// Len returns the number of valid endpoints where valid means the endpoint +// has not expired. +func (e *Endpoint) Len() int { + validEndpoints := 0 + for _, endpoint := range e.Addresses { + if endpoint.HasExpired() { + continue + } + + validEndpoints++ + } + return validEndpoints +} + +// GetValidAddress will return a non-expired weight endpoint +func (e *Endpoint) GetValidAddress() (WeightedAddress, bool) { + for i := 0; i < len(e.Addresses); i++ { + we := e.Addresses[i] + + if we.HasExpired() { + e.Addresses = append(e.Addresses[:i], e.Addresses[i+1:]...) + i-- + continue + } + + we.URL = cloneURL(we.URL) + + return we, true + } + + return WeightedAddress{}, false +} + +// Prune will prune the expired addresses from the endpoint by allocating a new []WeightAddress. +// This is not concurrent safe, and should be called from a single owning thread. +func (e *Endpoint) Prune() bool { + validLen := e.Len() + if validLen == len(e.Addresses) { + return false + } + wa := make([]WeightedAddress, 0, validLen) + for i := range e.Addresses { + if e.Addresses[i].HasExpired() { + continue + } + wa = append(wa, e.Addresses[i]) + } + e.Addresses = wa + return true +} + +// Discoverer is an interface used to discovery which endpoint hit. This +// allows for specifics about what parameters need to be used to be contained +// in the Discoverer implementor. +type Discoverer interface { + Discover() (Endpoint, error) +} + +// BuildEndpointKey will sort the keys in alphabetical order and then retrieve +// the values in that order. Those values are then concatenated together to form +// the endpoint key. +func BuildEndpointKey(params map[string]*string) string { + keys := make([]string, len(params)) + i := 0 + + for k := range params { + keys[i] = k + i++ + } + sort.Strings(keys) + + values := make([]string, len(params)) + for i, k := range keys { + if params[k] == nil { + continue + } + + values[i] = aws.StringValue(params[k]) + } + + return strings.Join(values, ".") +} + +func cloneURL(u *url.URL) (clone *url.URL) { + clone = &url.URL{} + + *clone = *u + + if u.User != nil { + user := *u.User + clone.User = &user + } + + return clone +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go new file mode 100644 index 00000000000..f7b65ac0134 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go @@ -0,0 +1,30 @@ +//go:build go1.9 +// +build go1.9 + +package crr + +import ( + "sync" +) + +type syncMap sync.Map + +func newSyncMap() syncMap { + return syncMap{} +} + +func (m *syncMap) Load(key interface{}) (interface{}, bool) { + return (*sync.Map)(m).Load(key) +} + +func (m *syncMap) Store(key interface{}, value interface{}) { + (*sync.Map)(m).Store(key, value) +} + +func (m *syncMap) Delete(key interface{}) { + (*sync.Map)(m).Delete(key) +} + +func (m *syncMap) Range(f func(interface{}, interface{}) bool) { + (*sync.Map)(m).Range(f) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go new file mode 100644 index 00000000000..eb4f6aca2f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go @@ -0,0 +1,49 @@ +//go:build !go1.9 +// +build !go1.9 + +package crr + +import ( + "sync" +) + +type syncMap struct { + container map[interface{}]interface{} + lock sync.RWMutex +} + +func newSyncMap() syncMap { + return syncMap{ + container: map[interface{}]interface{}{}, + } +} + +func (m *syncMap) Load(key interface{}) (interface{}, bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + v, ok := m.container[key] + return v, ok +} + +func (m *syncMap) Store(key interface{}, value interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + + m.container[key] = value +} + +func (m *syncMap) Delete(key interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.container, key) +} + +func (m *syncMap) Range(f func(interface{}, interface{}) bool) { + for k, v := range m.container { + if !f(k, v) { + return + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go new file mode 100644 index 00000000000..c7727107eb8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -0,0 +1,26975 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/crr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opBatchExecuteStatement = "BatchExecuteStatement" + +// BatchExecuteStatementRequest generates a "aws/request.Request" representing the +// client's request for the BatchExecuteStatement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchExecuteStatement for more information on using the BatchExecuteStatement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchExecuteStatementRequest method. +// req, resp := client.BatchExecuteStatementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement +func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInput) (req *request.Request, output *BatchExecuteStatementOutput) { + op := &request.Operation{ + Name: opBatchExecuteStatement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchExecuteStatementInput{} + } + + output = &BatchExecuteStatementOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchExecuteStatement API operation for Amazon DynamoDB. +// +// This operation allows you to perform batch reads or writes on data stored +// in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement +// must specify an equality condition on all key attributes. This enforces that +// each SELECT statement in a batch returns at most a single item. +// +// The entire batch must consist of either read statements or write statements, +// you cannot mix both in one batch. +// +// A HTTP 200 response does not mean that all statements in the BatchExecuteStatement +// succeeded. Error details for individual statements can be found under the +// Error (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error) +// field of the BatchStatementResponse for each statement. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchExecuteStatement for usage and error information. +// +// Returned Error Types: +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement +func (c *DynamoDB) BatchExecuteStatement(input *BatchExecuteStatementInput) (*BatchExecuteStatementOutput, error) { + req, out := c.BatchExecuteStatementRequest(input) + return out, req.Send() +} + +// BatchExecuteStatementWithContext is the same as BatchExecuteStatement with the addition of +// the ability to pass a context and additional request options. +// +// See BatchExecuteStatement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchExecuteStatementWithContext(ctx aws.Context, input *BatchExecuteStatementInput, opts ...request.Option) (*BatchExecuteStatementOutput, error) { + req, out := c.BatchExecuteStatementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchGetItem = "BatchGetItem" + +// BatchGetItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetItem for more information on using the BatchGetItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchGetItemRequest method. +// req, resp := client.BatchGetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem +func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { + op := &request.Operation{ + Name: opBatchGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"RequestItems"}, + OutputTokens: []string{"UnprocessedKeys"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &BatchGetItemInput{} + } + + output = &BatchGetItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// BatchGetItem API operation for Amazon DynamoDB. +// +// The BatchGetItem operation returns the attributes of one or more items from +// one or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as +// many as 100 items. BatchGetItem returns a partial result if the response +// size limit is exceeded, the table's provisioned throughput is exceeded, or +// an internal processing failure occurs. If a partial result is returned, the +// operation returns a value for UnprocessedKeys. You can use this value to +// retry the operation starting with the next item to get. +// +// If you request more than 100 items, BatchGetItem returns a ValidationException +// with the message "Too many items requested for the BatchGetItem call." +// +// For example, if you ask to retrieve 100 items, but each individual item is +// 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB +// limit). It also returns an appropriate UnprocessedKeys value so you can get +// the next page of results. If desired, your application can include its own +// logic to assemble the pages of results into one dataset. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. +// If at least one of the items is successfully processed, then BatchGetItem +// completes successfully, while returning the keys of the unread items in UnprocessedKeys. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every table +// in the request. If you want strongly consistent reads instead, you can set +// ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem retrieves items in parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// items in any particular order. To help parse the response by item, include +// the primary key values for the items in your request in the ProjectionExpression +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to +// the type of read. For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchGetItem for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem +func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + return out, req.Send() +} + +// BatchGetItemWithContext is the same as BatchGetItem with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemInput, opts ...request.Option) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// BatchGetItemPages iterates over the pages of a BatchGetItem operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See BatchGetItem method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a BatchGetItem operation. +// pageNum := 0 +// err := client.BatchGetItemPages(params, +// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool) error { + return c.BatchGetItemPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// BatchGetItemPagesWithContext same as BatchGetItemPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchGetItemPagesWithContext(ctx aws.Context, input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *BatchGetItemInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.BatchGetItemRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opBatchWriteItem = "BatchWriteItem" + +// BatchWriteItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchWriteItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchWriteItem for more information on using the BatchWriteItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchWriteItemRequest method. +// req, resp := client.BatchWriteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem +func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { + op := &request.Operation{ + Name: opBatchWriteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchWriteItemInput{} + } + + output = &BatchWriteItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// BatchWriteItem API operation for Amazon DynamoDB. +// +// The BatchWriteItem operation puts or deletes multiple items in one or more +// tables. A single call to BatchWriteItem can transmit up to 16MB of data over +// the network, consisting of up to 25 item put or delete operations. While +// individual items can be up to 400 KB once stored, it's important to note +// that an item's representation might be greater than 400KB while being sent +// in DynamoDB's JSON format for the API call. For more details on this distinction, +// see Naming Rules and Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html). +// +// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation +// on an existing item, that item's values will be overwritten by the operation +// and it will appear like it was updated. To update items, we recommend you +// use the UpdateItem action. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested operations +// fail because the table's provisioned throughput is exceeded or an internal +// processing failure occurs, the failed operations are returned in the UnprocessedItems +// response parameter. You can investigate and optionally resend the requests. +// Typically, you would call BatchWriteItem in a loop. Each iteration would +// check for unprocessed items and submit a new BatchWriteItem request with +// those unprocessed items until all items have been processed. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem, you can efficiently write or delete large amounts of +// data, such as from Amazon EMR, or copy data from another database into DynamoDB. +// In order to improve performance with these large-scale operations, BatchWriteItem +// does not behave in the same way as individual PutItem and DeleteItem calls +// would. For example, you cannot specify conditions on individual put and delete +// requests, and BatchWriteItem does not return deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, +// you must update or delete the specified items one at a time. In both situations, +// BatchWriteItem performs the specified put and delete operations in parallel, +// giving you the power of the thread pool approach without having to introduce +// complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed +// in parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// +// - One or more tables specified in the BatchWriteItem request does not +// exist. +// +// - Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. +// +// - You try to perform multiple operations on the same item in the same +// BatchWriteItem request. For example, you cannot put and delete the same +// item in the same BatchWriteItem request. +// +// - Your request contains at least two items with identical hash and range +// keys (which essentially is two put operations). +// +// - There are more than 25 requests in the batch. +// +// - Any individual item in a batch exceeds 400 KB. +// +// - The total request size exceeds 16 MB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchWriteItem for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem +func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + return out, req.Send() +} + +// BatchWriteItemWithContext is the same as BatchWriteItem with the addition of +// the ability to pass a context and additional request options. +// +// See BatchWriteItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchWriteItemWithContext(ctx aws.Context, input *BatchWriteItemInput, opts ...request.Option) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBackup = "CreateBackup" + +// CreateBackupRequest generates a "aws/request.Request" representing the +// client's request for the CreateBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBackup for more information on using the CreateBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateBackupRequest method. +// req, resp := client.CreateBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup +func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) { + op := &request.Operation{ + Name: opCreateBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBackupInput{} + } + + output = &CreateBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// CreateBackup API operation for Amazon DynamoDB. +// +// Creates a backup for an existing table. +// +// Each time you create an on-demand backup, the entire table data is backed +// up. There is no limit to the number of on-demand backups that can be taken. +// +// When you create an on-demand backup, a time marker of the request is cataloged, +// and the backup is created asynchronously, by applying all changes until the +// time of the request to the last full table snapshot. Backup requests are +// processed instantaneously and become available for restore within minutes. +// +// You can call CreateBackup at a maximum rate of 50 times per second. +// +// All backups in DynamoDB work without consuming any provisioned throughput +// on the table. +// +// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed +// to contain all data committed to the table up to 14:24:00, and data committed +// after 14:26:00 will not be. The backup might contain data modifications made +// between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency. +// +// Along with data, the following are also included on the backups: +// +// - Global secondary indexes (GSIs) +// +// - Local secondary indexes (LSIs) +// +// - Streams +// +// - Provisioned read and write capacity +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateBackup for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. +// +// - ContinuousBackupsUnavailableException +// Backups have not yet been enabled for this table. +// +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup +func (c *DynamoDB) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + return out, req.Send() +} + +// CreateBackupWithContext is the same as CreateBackup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateBackupWithContext(ctx aws.Context, input *CreateBackupInput, opts ...request.Option) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateGlobalTable = "CreateGlobalTable" + +// CreateGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGlobalTable for more information on using the CreateGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateGlobalTableRequest method. +// req, resp := client.CreateGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable +func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req *request.Request, output *CreateGlobalTableOutput) { + op := &request.Operation{ + Name: opCreateGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGlobalTableInput{} + } + + output = &CreateGlobalTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// CreateGlobalTable API operation for Amazon DynamoDB. +// +// Creates a global table from an existing table. A global table creates a replication +// relationship between two or more DynamoDB tables with the same table name +// in the provided Regions. +// +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. +// +// If you want to add a new replica table to a global table, each of the following +// conditions must be true: +// +// - The table must have the same primary key as all of the other replicas. +// +// - The table must have the same name as all of the other replicas. +// +// - The table must have DynamoDB Streams enabled, with the stream containing +// both the new and the old images of the item. +// +// - None of the replica tables in the global table can contain any data. +// +// If global secondary indexes are specified, then the following conditions +// must also be met: +// +// - The global secondary indexes must have the same name. +// +// - The global secondary indexes must have the same hash key and sort key +// (if present). +// +// If local secondary indexes are specified, then the following conditions must +// also be met: +// +// - The local secondary indexes must have the same name. +// +// - The local secondary indexes must have the same hash key and sort key +// (if present). +// +// Write capacity settings should be set consistently across your replica tables +// and secondary indexes. DynamoDB strongly recommends enabling auto scaling +// to manage the write capacity settings for all of your global tables replicas +// and indexes. +// +// If you prefer to manage write capacity settings manually, you should provision +// equal replicated write capacity units to your replica tables. You should +// also provision equal replicated write capacity units to matching secondary +// indexes across your global table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateGlobalTable for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// - GlobalTableAlreadyExistsException +// The specified global table already exists. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable +func (c *DynamoDB) CreateGlobalTable(input *CreateGlobalTableInput) (*CreateGlobalTableOutput, error) { + req, out := c.CreateGlobalTableRequest(input) + return out, req.Send() +} + +// CreateGlobalTableWithContext is the same as CreateGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateGlobalTableWithContext(ctx aws.Context, input *CreateGlobalTableInput, opts ...request.Option) (*CreateGlobalTableOutput, error) { + req, out := c.CreateGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTable = "CreateTable" + +// CreateTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTable for more information on using the CreateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTableRequest method. +// req, resp := client.CreateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable +func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { + op := &request.Operation{ + Name: opCreateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTableInput{} + } + + output = &CreateTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// CreateTable API operation for Amazon DynamoDB. +// +// The CreateTable operation adds a new table to your account. In an Amazon +// Web Services account, table names must be unique within each Region. That +// is, you can have two tables with same name if you create the tables in different +// Regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING. After +// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of +// the CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table +// with secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable action to check the table status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable +func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + return out, req.Send() +} + +// CreateTableWithContext is the same as CreateTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateTableWithContext(ctx aws.Context, input *CreateTableInput, opts ...request.Option) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBackup = "DeleteBackup" + +// DeleteBackupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBackup for more information on using the DeleteBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBackupRequest method. +// req, resp := client.DeleteBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup +func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) { + op := &request.Operation{ + Name: opDeleteBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBackupInput{} + } + + output = &DeleteBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DeleteBackup API operation for Amazon DynamoDB. +// +// Deletes an existing backup of a table. +// +// You can call DeleteBackup at a maximum rate of 10 times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteBackup for usage and error information. +// +// Returned Error Types: +// +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup +func (c *DynamoDB) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + return out, req.Send() +} + +// DeleteBackupWithContext is the same as DeleteBackup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteBackupWithContext(ctx aws.Context, input *DeleteBackupInput, opts ...request.Option) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteItem = "DeleteItem" + +// DeleteItemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteItem for more information on using the DeleteItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteItemRequest method. +// req, resp := client.DeleteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem +func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { + op := &request.Operation{ + Name: opDeleteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteItemInput{} + } + + output = &DeleteItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DeleteItem API operation for Amazon DynamoDB. +// +// Deletes a single item in a table by primary key. You can perform a conditional +// delete operation that deletes the item if it exists, or if it has an expected +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// Unless you specify conditions, the DeleteItem is an idempotent operation; +// running it multiple times on the same item or attribute does not result in +// an error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteItem for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem +func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + return out, req.Send() +} + +// DeleteItemWithContext is the same as DeleteItem with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteItemWithContext(ctx aws.Context, input *DeleteItemInput, opts ...request.Option) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTable = "DeleteTable" + +// DeleteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTable for more information on using the DeleteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteTableRequest method. +// req, resp := client.DeleteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable +func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { + op := &request.Operation{ + Name: opDeleteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTableInput{} + } + + output = &DeleteTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DeleteTable API operation for Amazon DynamoDB. +// +// The DeleteTable operation deletes a table and all of its items. After a DeleteTable +// request, the specified table is in the DELETING state until DynamoDB completes +// the deletion. If the table is in the ACTIVE state, you can delete it. If +// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. +// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. +// If table is already in the DELETING state, no error is returned. +// +// DynamoDB might continue to accept data read and write operations, such as +// GetItem and PutItem, on a table in the DELETING state until the table deletion +// is complete. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is automatically +// deleted after 24 hours. +// +// Use the DescribeTable action to check the status of the table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable +func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + return out, req.Send() +} + +// DeleteTableWithContext is the same as DeleteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteTableWithContext(ctx aws.Context, input *DeleteTableInput, opts ...request.Option) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeBackup = "DescribeBackup" + +// DescribeBackupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBackup for more information on using the DescribeBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeBackupRequest method. +// req, resp := client.DescribeBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup +func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *request.Request, output *DescribeBackupOutput) { + op := &request.Operation{ + Name: opDescribeBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBackupInput{} + } + + output = &DescribeBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeBackup API operation for Amazon DynamoDB. +// +// Describes an existing backup of a table. +// +// You can call DescribeBackup at a maximum rate of 10 times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeBackup for usage and error information. +// +// Returned Error Types: +// +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup +func (c *DynamoDB) DescribeBackup(input *DescribeBackupInput) (*DescribeBackupOutput, error) { + req, out := c.DescribeBackupRequest(input) + return out, req.Send() +} + +// DescribeBackupWithContext is the same as DescribeBackup with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeBackupWithContext(ctx aws.Context, input *DescribeBackupInput, opts ...request.Option) (*DescribeBackupOutput, error) { + req, out := c.DescribeBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeContinuousBackups = "DescribeContinuousBackups" + +// DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContinuousBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeContinuousBackups for more information on using the DescribeContinuousBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeContinuousBackupsRequest method. +// req, resp := client.DescribeContinuousBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups +func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) (req *request.Request, output *DescribeContinuousBackupsOutput) { + op := &request.Operation{ + Name: opDescribeContinuousBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContinuousBackupsInput{} + } + + output = &DescribeContinuousBackupsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeContinuousBackups API operation for Amazon DynamoDB. +// +// Checks the status of continuous backups and point in time recovery on the +// specified table. Continuous backups are ENABLED on all tables at table creation. +// If point in time recovery is enabled, PointInTimeRecoveryStatus will be set +// to ENABLED. +// +// After continuous backups and point in time recovery are enabled, you can +// restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// +// LatestRestorableDateTime is typically 5 minutes before the current time. +// You can restore your table to any point in time during the last 35 days. +// +// You can call DescribeContinuousBackups at a maximum rate of 10 times per +// second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeContinuousBackups for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups +func (c *DynamoDB) DescribeContinuousBackups(input *DescribeContinuousBackupsInput) (*DescribeContinuousBackupsOutput, error) { + req, out := c.DescribeContinuousBackupsRequest(input) + return out, req.Send() +} + +// DescribeContinuousBackupsWithContext is the same as DescribeContinuousBackups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeContinuousBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeContinuousBackupsWithContext(ctx aws.Context, input *DescribeContinuousBackupsInput, opts ...request.Option) (*DescribeContinuousBackupsOutput, error) { + req, out := c.DescribeContinuousBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeContributorInsights = "DescribeContributorInsights" + +// DescribeContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContributorInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeContributorInsights for more information on using the DescribeContributorInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeContributorInsightsRequest method. +// req, resp := client.DescribeContributorInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights +func (c *DynamoDB) DescribeContributorInsightsRequest(input *DescribeContributorInsightsInput) (req *request.Request, output *DescribeContributorInsightsOutput) { + op := &request.Operation{ + Name: opDescribeContributorInsights, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContributorInsightsInput{} + } + + output = &DescribeContributorInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeContributorInsights API operation for Amazon DynamoDB. +// +// Returns information about contributor insights, for a given table or global +// secondary index. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeContributorInsights for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights +func (c *DynamoDB) DescribeContributorInsights(input *DescribeContributorInsightsInput) (*DescribeContributorInsightsOutput, error) { + req, out := c.DescribeContributorInsightsRequest(input) + return out, req.Send() +} + +// DescribeContributorInsightsWithContext is the same as DescribeContributorInsights with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeContributorInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeContributorInsightsWithContext(ctx aws.Context, input *DescribeContributorInsightsInput, opts ...request.Option) (*DescribeContributorInsightsOutput, error) { + req, out := c.DescribeContributorInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEndpoints = "DescribeEndpoints" + +// DescribeEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEndpoints for more information on using the DescribeEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeEndpointsRequest method. +// req, resp := client.DescribeEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints +func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEndpointsInput{} + } + + output = &DescribeEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEndpoints API operation for Amazon DynamoDB. +// +// Returns the regional endpoint information. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints +func (c *DynamoDB) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) { + req, out := c.DescribeEndpointsRequest(input) + return out, req.Send() +} + +// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) { + req, out := c.DescribeEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type discovererDescribeEndpoints struct { + Client *DynamoDB + Required bool + EndpointCache *crr.EndpointCache + Params map[string]*string + Key string + req *request.Request +} + +func (d *discovererDescribeEndpoints) Discover() (crr.Endpoint, error) { + input := &DescribeEndpointsInput{} + + resp, err := d.Client.DescribeEndpoints(input) + if err != nil { + return crr.Endpoint{}, err + } + + endpoint := crr.Endpoint{ + Key: d.Key, + } + + for _, e := range resp.Endpoints { + if e.Address == nil { + continue + } + + address := *e.Address + + var scheme string + if idx := strings.Index(address, "://"); idx != -1 { + scheme = address[:idx] + } + + if len(scheme) == 0 { + address = fmt.Sprintf("%s://%s", d.req.HTTPRequest.URL.Scheme, address) + } + + cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes) + u, err := url.Parse(address) + if err != nil { + continue + } + + addr := crr.WeightedAddress{ + URL: u, + Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute), + } + + endpoint.Add(addr) + } + + d.EndpointCache.Add(endpoint) + + return endpoint, nil +} + +func (d *discovererDescribeEndpoints) Handler(r *request.Request) { + endpointKey := crr.BuildEndpointKey(d.Params) + d.Key = endpointKey + d.req = r + + endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required) + if err != nil { + r.Error = err + return + } + + if endpoint.URL != nil && len(endpoint.URL.String()) > 0 { + r.HTTPRequest.URL = endpoint.URL + } +} + +const opDescribeExport = "DescribeExport" + +// DescribeExportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExport for more information on using the DescribeExport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeExportRequest method. +// req, resp := client.DescribeExportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *request.Request, output *DescribeExportOutput) { + op := &request.Operation{ + Name: opDescribeExport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportInput{} + } + + output = &DescribeExportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExport API operation for Amazon DynamoDB. +// +// Describes an existing table export. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeExport for usage and error information. +// +// Returned Error Types: +// +// - ExportNotFoundException +// The specified export was not found. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExport(input *DescribeExportInput) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + return out, req.Send() +} + +// DescribeExportWithContext is the same as DescribeExport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeExportWithContext(ctx aws.Context, input *DescribeExportInput, opts ...request.Option) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGlobalTable = "DescribeGlobalTable" + +// DescribeGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGlobalTable for more information on using the DescribeGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeGlobalTableRequest method. +// req, resp := client.DescribeGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable +func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) (req *request.Request, output *DescribeGlobalTableOutput) { + op := &request.Operation{ + Name: opDescribeGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGlobalTableInput{} + } + + output = &DescribeGlobalTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeGlobalTable API operation for Amazon DynamoDB. +// +// Returns information about the specified global table. +// +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. If you are using global tables Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// you can use DescribeTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html) +// instead. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeGlobalTable for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable +func (c *DynamoDB) DescribeGlobalTable(input *DescribeGlobalTableInput) (*DescribeGlobalTableOutput, error) { + req, out := c.DescribeGlobalTableRequest(input) + return out, req.Send() +} + +// DescribeGlobalTableWithContext is the same as DescribeGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeGlobalTableWithContext(ctx aws.Context, input *DescribeGlobalTableInput, opts ...request.Option) (*DescribeGlobalTableOutput, error) { + req, out := c.DescribeGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings" + +// DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGlobalTableSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGlobalTableSettings for more information on using the DescribeGlobalTableSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeGlobalTableSettingsRequest method. +// req, resp := client.DescribeGlobalTableSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings +func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) (req *request.Request, output *DescribeGlobalTableSettingsOutput) { + op := &request.Operation{ + Name: opDescribeGlobalTableSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGlobalTableSettingsInput{} + } + + output = &DescribeGlobalTableSettingsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeGlobalTableSettings API operation for Amazon DynamoDB. +// +// Describes Region-specific settings for a global table. +// +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeGlobalTableSettings for usage and error information. +// +// Returned Error Types: +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings +func (c *DynamoDB) DescribeGlobalTableSettings(input *DescribeGlobalTableSettingsInput) (*DescribeGlobalTableSettingsOutput, error) { + req, out := c.DescribeGlobalTableSettingsRequest(input) + return out, req.Send() +} + +// DescribeGlobalTableSettingsWithContext is the same as DescribeGlobalTableSettings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGlobalTableSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input *DescribeGlobalTableSettingsInput, opts ...request.Option) (*DescribeGlobalTableSettingsOutput, error) { + req, out := c.DescribeGlobalTableSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImport = "DescribeImport" + +// DescribeImportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImport for more information on using the DescribeImport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeImportRequest method. +// req, resp := client.DescribeImportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport +func (c *DynamoDB) DescribeImportRequest(input *DescribeImportInput) (req *request.Request, output *DescribeImportOutput) { + op := &request.Operation{ + Name: opDescribeImport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportInput{} + } + + output = &DescribeImportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImport API operation for Amazon DynamoDB. +// +// Represents the properties of the import. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeImport for usage and error information. +// +// Returned Error Types: +// - ImportNotFoundException +// The specified import was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport +func (c *DynamoDB) DescribeImport(input *DescribeImportInput) (*DescribeImportOutput, error) { + req, out := c.DescribeImportRequest(input) + return out, req.Send() +} + +// DescribeImportWithContext is the same as DescribeImport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeImportWithContext(ctx aws.Context, input *DescribeImportInput, opts ...request.Option) (*DescribeImportOutput, error) { + req, out := c.DescribeImportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeKinesisStreamingDestination = "DescribeKinesisStreamingDestination" + +// DescribeKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeKinesisStreamingDestination for more information on using the DescribeKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeKinesisStreamingDestinationRequest method. +// req, resp := client.DescribeKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination +func (c *DynamoDB) DescribeKinesisStreamingDestinationRequest(input *DescribeKinesisStreamingDestinationInput) (req *request.Request, output *DescribeKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opDescribeKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKinesisStreamingDestinationInput{} + } + + output = &DescribeKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// Returns information about the status of Kinesis streaming. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination +func (c *DynamoDB) DescribeKinesisStreamingDestination(input *DescribeKinesisStreamingDestinationInput) (*DescribeKinesisStreamingDestinationOutput, error) { + req, out := c.DescribeKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// DescribeKinesisStreamingDestinationWithContext is the same as DescribeKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeKinesisStreamingDestinationWithContext(ctx aws.Context, input *DescribeKinesisStreamingDestinationInput, opts ...request.Option) (*DescribeKinesisStreamingDestinationOutput, error) { + req, out := c.DescribeKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLimits = "DescribeLimits" + +// DescribeLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLimits operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLimits for more information on using the DescribeLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeLimitsRequest method. +// req, resp := client.DescribeLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits +func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) { + op := &request.Operation{ + Name: opDescribeLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLimitsInput{} + } + + output = &DescribeLimitsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeLimits API operation for Amazon DynamoDB. +// +// Returns the current provisioned-capacity quotas for your Amazon Web Services +// account in a Region, both for the Region as a whole and for any one DynamoDB +// table that you create there. +// +// When you establish an Amazon Web Services account, the account has initial +// quotas on the maximum read capacity units and write capacity units that you +// can provision across all of your DynamoDB tables in a given Region. Also, +// there are per-table quotas that apply when you create a table there. For +// more information, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// page in the Amazon DynamoDB Developer Guide. +// +// Although you can increase these quotas by filing a case at Amazon Web Services +// Support Center (https://console.aws.amazon.com/support/home#/), obtaining +// the increase is not instantaneous. The DescribeLimits action lets you write +// code to compare the capacity you are currently using to those quotas imposed +// by your account so that you have enough time to apply for an increase before +// you hit a quota. +// +// For example, you could use one of the Amazon Web Services SDKs to do the +// following: +// +// Call DescribeLimits for a particular Region to obtain your current account +// quotas on provisioned capacity there. +// +// Create a variable to hold the aggregate read capacity units provisioned for +// all your tables in that Region, and one to hold the aggregate write capacity +// units. Zero them both. +// +// Call ListTables to obtain a list of all your DynamoDB tables. +// +// For each table name listed by ListTables, do the following: +// +// - Call DescribeTable with the table name. +// +// - Use the data returned by DescribeTable to add the read capacity units +// and write capacity units provisioned for the table itself to your variables. +// +// - If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables +// as well. +// +// Report the account quotas for that Region returned by DescribeLimits, along +// with the total current provisioned capacity levels you have calculated. +// +// This will let you see whether you are getting close to your account-level +// quotas. +// +// The per-table quotas apply only when you are creating a new table. They restrict +// the sum of the provisioned capacity of the new table itself and all its global +// secondary indexes. +// +// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned +// capacity extremely rapidly, but the only quota that applies is that the aggregate +// provisioned capacity over all your tables and GSIs cannot exceed either of +// the per-account quotas. +// +// DescribeLimits should only be called periodically. You can expect throttling +// errors if you call it more than once in a minute. +// +// The DescribeLimits Request element has no content. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeLimits for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits +func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) { + req, out := c.DescribeLimitsRequest(input) + return out, req.Send() +} + +// DescribeLimitsWithContext is the same as DescribeLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeLimitsWithContext(ctx aws.Context, input *DescribeLimitsInput, opts ...request.Option) (*DescribeLimitsOutput, error) { + req, out := c.DescribeLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTable = "DescribeTable" + +// DescribeTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTable for more information on using the DescribeTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTableRequest method. +// req, resp := client.DescribeTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable +func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { + op := &request.Operation{ + Name: opDescribeTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableInput{} + } + + output = &DescribeTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeTable API operation for Amazon DynamoDB. +// +// Returns information about the table, including the current status of the +// table, when it was created, the primary key schema, and any indexes on the +// table. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable +// uses an eventually consistent query, and the metadata for your table might +// not be available at that moment. Wait for a few seconds, and then try the +// DescribeTable request again. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable +func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + return out, req.Send() +} + +// DescribeTableWithContext is the same as DescribeTable with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTableWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.Option) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTableReplicaAutoScaling = "DescribeTableReplicaAutoScaling" + +// DescribeTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTableReplicaAutoScaling operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTableReplicaAutoScaling for more information on using the DescribeTableReplicaAutoScaling +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTableReplicaAutoScalingRequest method. +// req, resp := client.DescribeTableReplicaAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling +func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableReplicaAutoScalingInput) (req *request.Request, output *DescribeTableReplicaAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeTableReplicaAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableReplicaAutoScalingInput{} + } + + output = &DescribeTableReplicaAutoScalingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTableReplicaAutoScaling API operation for Amazon DynamoDB. +// +// Describes auto scaling settings across replicas of the global table at once. +// +// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTableReplicaAutoScaling for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling +func (c *DynamoDB) DescribeTableReplicaAutoScaling(input *DescribeTableReplicaAutoScalingInput) (*DescribeTableReplicaAutoScalingOutput, error) { + req, out := c.DescribeTableReplicaAutoScalingRequest(input) + return out, req.Send() +} + +// DescribeTableReplicaAutoScalingWithContext is the same as DescribeTableReplicaAutoScaling with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTableReplicaAutoScaling for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTableReplicaAutoScalingWithContext(ctx aws.Context, input *DescribeTableReplicaAutoScalingInput, opts ...request.Option) (*DescribeTableReplicaAutoScalingOutput, error) { + req, out := c.DescribeTableReplicaAutoScalingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTimeToLive = "DescribeTimeToLive" + +// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTimeToLive operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTimeToLive for more information on using the DescribeTimeToLive +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTimeToLiveRequest method. +// req, resp := client.DescribeTimeToLiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive +func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (req *request.Request, output *DescribeTimeToLiveOutput) { + op := &request.Operation{ + Name: opDescribeTimeToLive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTimeToLiveInput{} + } + + output = &DescribeTimeToLiveOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeTimeToLive API operation for Amazon DynamoDB. +// +// Gives a description of the Time to Live (TTL) status on the specified table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTimeToLive for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive +func (c *DynamoDB) DescribeTimeToLive(input *DescribeTimeToLiveInput) (*DescribeTimeToLiveOutput, error) { + req, out := c.DescribeTimeToLiveRequest(input) + return out, req.Send() +} + +// DescribeTimeToLiveWithContext is the same as DescribeTimeToLive with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTimeToLive for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *DescribeTimeToLiveInput, opts ...request.Option) (*DescribeTimeToLiveOutput, error) { + req, out := c.DescribeTimeToLiveRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableKinesisStreamingDestination = "DisableKinesisStreamingDestination" + +// DisableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DisableKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableKinesisStreamingDestination for more information on using the DisableKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisableKinesisStreamingDestinationRequest method. +// req, resp := client.DisableKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination +func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKinesisStreamingDestinationInput) (req *request.Request, output *DisableKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opDisableKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableKinesisStreamingDestinationInput{} + } + + output = &DisableKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DisableKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// Stops replication from the DynamoDB table to the Kinesis data stream. This +// is done without deleting either of the resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DisableKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination +func (c *DynamoDB) DisableKinesisStreamingDestination(input *DisableKinesisStreamingDestinationInput) (*DisableKinesisStreamingDestinationOutput, error) { + req, out := c.DisableKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// DisableKinesisStreamingDestinationWithContext is the same as DisableKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DisableKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DisableKinesisStreamingDestinationWithContext(ctx aws.Context, input *DisableKinesisStreamingDestinationInput, opts ...request.Option) (*DisableKinesisStreamingDestinationOutput, error) { + req, out := c.DisableKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableKinesisStreamingDestination = "EnableKinesisStreamingDestination" + +// EnableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the EnableKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableKinesisStreamingDestination for more information on using the EnableKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableKinesisStreamingDestinationRequest method. +// req, resp := client.EnableKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination +func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesisStreamingDestinationInput) (req *request.Request, output *EnableKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opEnableKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableKinesisStreamingDestinationInput{} + } + + output = &EnableKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// EnableKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// Starts table data replication to the specified Kinesis data stream at a timestamp +// chosen during the enable workflow. If this operation doesn't return results +// immediately, use DescribeKinesisStreamingDestination to check if streaming +// to the Kinesis data stream is ACTIVE. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation EnableKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination +func (c *DynamoDB) EnableKinesisStreamingDestination(input *EnableKinesisStreamingDestinationInput) (*EnableKinesisStreamingDestinationOutput, error) { + req, out := c.EnableKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// EnableKinesisStreamingDestinationWithContext is the same as EnableKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See EnableKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) EnableKinesisStreamingDestinationWithContext(ctx aws.Context, input *EnableKinesisStreamingDestinationInput, opts ...request.Option) (*EnableKinesisStreamingDestinationOutput, error) { + req, out := c.EnableKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExecuteStatement = "ExecuteStatement" + +// ExecuteStatementRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteStatement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExecuteStatement for more information on using the ExecuteStatement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ExecuteStatementRequest method. +// req, resp := client.ExecuteStatementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement +func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *request.Request, output *ExecuteStatementOutput) { + op := &request.Operation{ + Name: opExecuteStatement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecuteStatementInput{} + } + + output = &ExecuteStatementOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExecuteStatement API operation for Amazon DynamoDB. +// +// This operation allows you to perform reads and singleton writes on data stored +// in DynamoDB, using PartiQL. +// +// For PartiQL reads (SELECT statement), if the total number of processed items +// exceeds the maximum dataset size limit of 1 MB, the read stops and results +// are returned to the user as a LastEvaluatedKey value to continue the read +// in a subsequent operation. If the filter criteria in WHERE clause does not +// match any data, the read will return an empty result set. +// +// A single SELECT statement response can return up to the maximum number of +// items (if using the Limit parameter) or a maximum of 1 MB of data (and then +// apply any filtering to the results using WHERE clause). If LastEvaluatedKey +// is present in the response, you need to paginate the result set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExecuteStatement for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// - DuplicateItemException +// There was an attempt to insert an item with the same primary key as an item +// that already exists in the DynamoDB table. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement +func (c *DynamoDB) ExecuteStatement(input *ExecuteStatementInput) (*ExecuteStatementOutput, error) { + req, out := c.ExecuteStatementRequest(input) + return out, req.Send() +} + +// ExecuteStatementWithContext is the same as ExecuteStatement with the addition of +// the ability to pass a context and additional request options. +// +// See ExecuteStatement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExecuteStatementWithContext(ctx aws.Context, input *ExecuteStatementInput, opts ...request.Option) (*ExecuteStatementOutput, error) { + req, out := c.ExecuteStatementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExecuteTransaction = "ExecuteTransaction" + +// ExecuteTransactionRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteTransaction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExecuteTransaction for more information on using the ExecuteTransaction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ExecuteTransactionRequest method. +// req, resp := client.ExecuteTransactionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction +func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (req *request.Request, output *ExecuteTransactionOutput) { + op := &request.Operation{ + Name: opExecuteTransaction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecuteTransactionInput{} + } + + output = &ExecuteTransactionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExecuteTransaction API operation for Amazon DynamoDB. +// +// This operation allows you to perform transactional reads or writes on data +// stored in DynamoDB, using PartiQL. +// +// The entire transaction must consist of either read statements or write statements, +// you cannot mix both in one transaction. The EXISTS function is an exception +// and can be used to check the condition of specific attributes of the item +// in a similar manner to ConditionCheck in the TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems) +// API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExecuteTransaction for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// - TransactionInProgressException +// The transaction with the given request token is already in progress. +// +// - IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction +func (c *DynamoDB) ExecuteTransaction(input *ExecuteTransactionInput) (*ExecuteTransactionOutput, error) { + req, out := c.ExecuteTransactionRequest(input) + return out, req.Send() +} + +// ExecuteTransactionWithContext is the same as ExecuteTransaction with the addition of +// the ability to pass a context and additional request options. +// +// See ExecuteTransaction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExecuteTransactionWithContext(ctx aws.Context, input *ExecuteTransactionInput, opts ...request.Option) (*ExecuteTransactionOutput, error) { + req, out := c.ExecuteTransactionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExportTableToPointInTime = "ExportTableToPointInTime" + +// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the ExportTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ExportTableToPointInTimeRequest method. +// req, resp := client.ExportTableToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { + op := &request.Operation{ + Name: opExportTableToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportTableToPointInTimeInput{} + } + + output = &ExportTableToPointInTimeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportTableToPointInTime API operation for Amazon DynamoDB. +// +// Exports table data to an S3 bucket. The table must have point in time recovery +// enabled, and you can export data from any time within the point in time recovery +// window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExportTableToPointInTime for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InvalidExportTimeException +// The specified ExportTime is outside of the point in time recovery window. +// +// - ExportConflictException +// There was a conflict when writing to the specified S3 bucket. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + return out, req.Send() +} + +// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of +// the ability to pass a context and additional request options. +// +// See ExportTableToPointInTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetItem = "GetItem" + +// GetItemRequest generates a "aws/request.Request" representing the +// client's request for the GetItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetItem for more information on using the GetItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetItemRequest method. +// req, resp := client.GetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { + op := &request.Operation{ + Name: opGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetItemInput{} + } + + output = &GetItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// GetItem API operation for Amazon DynamoDB. +// +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data +// and there will be no Item element in the response. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true. Although +// a strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation GetItem for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + return out, req.Send() +} + +// GetItemWithContext is the same as GetItem with the addition of +// the ability to pass a context and additional request options. +// +// See GetItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportTable = "ImportTable" + +// ImportTableRequest generates a "aws/request.Request" representing the +// client's request for the ImportTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportTable for more information on using the ImportTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ImportTableRequest method. +// req, resp := client.ImportTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable +func (c *DynamoDB) ImportTableRequest(input *ImportTableInput) (req *request.Request, output *ImportTableOutput) { + op := &request.Operation{ + Name: opImportTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportTableInput{} + } + + output = &ImportTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportTable API operation for Amazon DynamoDB. +// +// Imports table data from an S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ImportTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ImportConflictException +// There was a conflict when importing from the specified S3 source. This can +// occur when the current import conflicts with a previous import request that +// had the same client token. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable +func (c *DynamoDB) ImportTable(input *ImportTableInput) (*ImportTableOutput, error) { + req, out := c.ImportTableRequest(input) + return out, req.Send() +} + +// ImportTableWithContext is the same as ImportTable with the addition of +// the ability to pass a context and additional request options. +// +// See ImportTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ImportTableWithContext(ctx aws.Context, input *ImportTableInput, opts ...request.Option) (*ImportTableOutput, error) { + req, out := c.ImportTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBackups = "ListBackups" + +// ListBackupsRequest generates a "aws/request.Request" representing the +// client's request for the ListBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBackups for more information on using the ListBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBackupsRequest method. +// req, resp := client.ListBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) { + op := &request.Operation{ + Name: opListBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListBackupsInput{} + } + + output = &ListBackupsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListBackups API operation for Amazon DynamoDB. +// +// List backups associated with an Amazon Web Services account. To list backups +// for a given table, specify TableName. ListBackups returns a paginated list +// of results with at most 1 MB worth of items in a page. You can also specify +// a maximum number of entries to be returned in a page. +// +// In the request, start time is inclusive, but end time is exclusive. Note +// that these boundaries are for the time at which the original backup was requested. +// +// You can call ListBackups a maximum of five times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListBackups for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) + return out, req.Send() +} + +// ListBackupsWithContext is the same as ListBackups with the addition of +// the ability to pass a context and additional request options. +// +// See ListBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListContributorInsights = "ListContributorInsights" + +// ListContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the ListContributorInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListContributorInsights for more information on using the ListContributorInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListContributorInsightsRequest method. +// req, resp := client.ListContributorInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights +func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsightsInput) (req *request.Request, output *ListContributorInsightsOutput) { + op := &request.Operation{ + Name: opListContributorInsights, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListContributorInsightsInput{} + } + + output = &ListContributorInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListContributorInsights API operation for Amazon DynamoDB. +// +// Returns a list of ContributorInsightsSummary for a table and all its global +// secondary indexes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListContributorInsights for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights +func (c *DynamoDB) ListContributorInsights(input *ListContributorInsightsInput) (*ListContributorInsightsOutput, error) { + req, out := c.ListContributorInsightsRequest(input) + return out, req.Send() +} + +// ListContributorInsightsWithContext is the same as ListContributorInsights with the addition of +// the ability to pass a context and additional request options. +// +// See ListContributorInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListContributorInsightsWithContext(ctx aws.Context, input *ListContributorInsightsInput, opts ...request.Option) (*ListContributorInsightsOutput, error) { + req, out := c.ListContributorInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListContributorInsightsPages iterates over the pages of a ListContributorInsights operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListContributorInsights method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListContributorInsights operation. +// pageNum := 0 +// err := client.ListContributorInsightsPages(params, +// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListContributorInsightsPages(input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool) error { + return c.ListContributorInsightsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListContributorInsightsPagesWithContext same as ListContributorInsightsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListContributorInsightsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListContributorInsightsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListContributorInsightsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListExports = "ListExports" + +// ListExportsRequest generates a "aws/request.Request" representing the +// client's request for the ListExports operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListExports for more information on using the ListExports +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListExportsRequest method. +// req, resp := client.ListExportsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { + op := &request.Operation{ + Name: opListExports, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListExportsInput{} + } + + output = &ListExportsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListExports API operation for Amazon DynamoDB. +// +// Lists completed exports within the past 90 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListExports for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) + return out, req.Send() +} + +// ListExportsWithContext is the same as ListExports with the addition of +// the ability to pass a context and additional request options. +// +// See ListExports for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListExportsPages iterates over the pages of a ListExports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListExports method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListExports operation. +// pageNum := 0 +// err := client.ListExportsPages(params, +// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { + return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListExportsPagesWithContext same as ListExportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListExportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListExportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGlobalTables = "ListGlobalTables" + +// ListGlobalTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListGlobalTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGlobalTables for more information on using the ListGlobalTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListGlobalTablesRequest method. +// req, resp := client.ListGlobalTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { + op := &request.Operation{ + Name: opListGlobalTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListGlobalTablesInput{} + } + + output = &ListGlobalTablesOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListGlobalTables API operation for Amazon DynamoDB. +// +// Lists all global tables that have a replica in the specified Region. +// +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListGlobalTables for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + return out, req.Send() +} + +// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListGlobalTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListImports = "ListImports" + +// ListImportsRequest generates a "aws/request.Request" representing the +// client's request for the ListImports operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListImports for more information on using the ListImports +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListImportsRequest method. +// req, resp := client.ListImportsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports +func (c *DynamoDB) ListImportsRequest(input *ListImportsInput) (req *request.Request, output *ListImportsOutput) { + op := &request.Operation{ + Name: opListImports, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListImportsInput{} + } + + output = &ListImportsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListImports API operation for Amazon DynamoDB. +// +// Lists completed imports within the past 90 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListImports for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports +func (c *DynamoDB) ListImports(input *ListImportsInput) (*ListImportsOutput, error) { + req, out := c.ListImportsRequest(input) + return out, req.Send() +} + +// ListImportsWithContext is the same as ListImports with the addition of +// the ability to pass a context and additional request options. +// +// See ListImports for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListImportsWithContext(ctx aws.Context, input *ListImportsInput, opts ...request.Option) (*ListImportsOutput, error) { + req, out := c.ListImportsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListImportsPages iterates over the pages of a ListImports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListImports method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListImports operation. +// pageNum := 0 +// err := client.ListImportsPages(params, +// func(page *dynamodb.ListImportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListImportsPages(input *ListImportsInput, fn func(*ListImportsOutput, bool) bool) error { + return c.ListImportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListImportsPagesWithContext same as ListImportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListImportsPagesWithContext(ctx aws.Context, input *ListImportsInput, fn func(*ListImportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListImportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListImportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListImportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTables for more information on using the ListTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { + op := &request.Operation{ + Name: opListTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTablesInput{} + } + + output = &ListTablesOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListTables API operation for Amazon DynamoDB. +// +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListTables for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + return out, req.Send() +} + +// ListTablesWithContext is the same as ListTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTablesPages iterates over the pages of a ListTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTables operation. +// pageNum := 0 +// err := client.ListTablesPages(params, +// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { + return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTablesPagesWithContext same as ListTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsOfResource = "ListTagsOfResource" + +// ListTagsOfResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsOfResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsOfResource for more information on using the ListTagsOfResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTagsOfResourceRequest method. +// req, resp := client.ListTagsOfResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) { + op := &request.Operation{ + Name: opListTagsOfResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsOfResourceInput{} + } + + output = &ListTagsOfResourceOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListTagsOfResource API operation for Amazon DynamoDB. +// +// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource +// up to 10 times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListTagsOfResource for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) + return out, req.Send() +} + +// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsOfResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutItem = "PutItem" + +// PutItemRequest generates a "aws/request.Request" representing the +// client's request for the PutItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutItem for more information on using the PutItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutItemRequest method. +// req, resp := client.PutItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { + op := &request.Operation{ + Name: opPutItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutItemInput{} + } + + output = &PutItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// PutItem API operation for Amazon DynamoDB. +// +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified +// table, the new item completely replaces the existing item. You can perform +// a conditional put operation (add a new item if one with the specified primary +// key doesn't exist), or replace an existing item if it has certain attribute +// values. You can return the item's attribute values in the same operation, +// using the ReturnValues parameter. +// +// When you add an item, the primary key attributes are the only required attributes. +// +// Empty String and Binary attribute values are allowed. Attribute values of +// type String and Binary must have a length greater than zero if the attribute +// is used as a key attribute for a table or index. Set type attributes cannot +// be empty. +// +// Invalid Requests with empty values will be rejected with a ValidationException +// exception. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name +// of the attribute being used as the partition key for the table. Since every +// record must contain that attribute, the attribute_not_exists function will +// only succeed if no matching item exists. +// +// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation PutItem for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + return out, req.Send() +} + +// PutItemWithContext is the same as PutItem with the addition of +// the ability to pass a context and additional request options. +// +// See PutItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opQuery = "Query" + +// QueryRequest generates a "aws/request.Request" representing the +// client's request for the Query operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Query for more information on using the Query +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the QueryRequest method. +// req, resp := client.QueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { + op := &request.Operation{ + Name: opQuery, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &QueryInput{} + } + + output = &QueryOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// Query API operation for Amazon DynamoDB. +// +// You must provide the name of the partition key attribute and a single value +// for that attribute. Query returns all items with that partition key value. +// Optionally, you can provide a sort key attribute and use a comparison operator +// to refine the search results. +// +// Use the KeyConditionExpression parameter to provide a specific value for +// the partition key. The Query operation will return all of the items from +// the table or index with that partition key value. You can optionally narrow +// the scope of the Query operation by specifying a sort key value and a comparison +// operator in KeyConditionExpression. To further refine the Query results, +// you can optionally provide a FilterExpression. A FilterExpression determines +// which items within the results should be returned to you. All of the other +// results are discarded. +// +// A Query operation always returns a result set. If no matching items are found, +// the result set will be empty. Queries that do not return results consume +// the minimum number of read capacity units for that type of read operation. +// +// DynamoDB calculates the number of read capacity units consumed based on item +// size, not on the amount of data that is returned to an application. The number +// of capacity units consumed will be the same whether you request all of the +// attributes (the default behavior) or just some of them (using a projection +// expression). The number will also be the same whether or not you use a FilterExpression. +// +// Query results are always sorted by the sort key value. If the data type of +// the sort key is Number, the results are returned in numeric order; otherwise, +// the results are returned in order of UTF-8 bytes. By default, the sort order +// is ascending. To reverse the order, set the ScanIndexForward parameter to +// false. +// +// A single Query operation will read up to the maximum number of items set +// (if using the Limit parameter) or a maximum of 1 MB of data and then apply +// any filtering to the results using FilterExpression. If LastEvaluatedKey +// is present in the response, you will need to paginate the result set. For +// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) +// in the Amazon DynamoDB Developer Guide. +// +// FilterExpression is applied after a Query finishes, but before the results +// are returned. A FilterExpression cannot contain partition key or sort key +// attributes. You need to specify those attributes in the KeyConditionExpression. +// +// A Query operation can return an empty result set and a LastEvaluatedKey if +// all the items read for the page of results are filtered out. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the ConsistentRead +// parameter to true and obtain a strongly consistent result. Global secondary +// indexes support eventually consistent reads only, so do not specify ConsistentRead +// when querying a global secondary index. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation Query for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + return out, req.Send() +} + +// QueryWithContext is the same as Query with the addition of +// the ability to pass a context and additional request options. +// +// See Query for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// QueryPages iterates over the pages of a Query operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Query method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Query operation. +// pageNum := 0 +// err := client.QueryPages(params, +// func(page *dynamodb.QueryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error { + return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// QueryPagesWithContext same as QueryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *QueryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.QueryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*QueryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opRestoreTableFromBackup = "RestoreTableFromBackup" + +// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableFromBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RestoreTableFromBackupRequest method. +// req, resp := client.RestoreTableFromBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) { + op := &request.Operation{ + Name: opRestoreTableFromBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreTableFromBackupInput{} + } + + output = &RestoreTableFromBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// RestoreTableFromBackup API operation for Amazon DynamoDB. +// +// Creates a new table from an existing backup. Any number of users can execute +// up to 4 concurrent restores (any type of restore) in a given account. +// +// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. +// +// You must manually set up the following on the restored table: +// +// - Auto scaling policies +// +// - IAM policies +// +// - Amazon CloudWatch metrics and alarms +// +// - Tags +// +// - Stream settings +// +// - Time to Live (TTL) settings +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation RestoreTableFromBackup for usage and error information. +// +// Returned Error Types: +// +// - TableAlreadyExistsException +// A target table with the specified name already exists. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. +// +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) + return out, req.Send() +} + +// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreTableFromBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreTableToPointInTime = "RestoreTableToPointInTime" + +// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RestoreTableToPointInTimeRequest method. +// req, resp := client.RestoreTableToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreTableToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreTableToPointInTimeInput{} + } + + output = &RestoreTableToPointInTimeOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// RestoreTableToPointInTime API operation for Amazon DynamoDB. +// +// Restores the specified table to the specified point in time within EarliestRestorableDateTime +// and LatestRestorableDateTime. You can restore your table to any point in +// time during the last 35 days. Any number of users can execute up to 4 concurrent +// restores (any type of restore) in a given account. +// +// When you restore using point in time recovery, DynamoDB restores your table +// data to the state based on the selected date and time (day:hour:minute:second) +// to a new table. +// +// Along with data, the following are also included on the new restored table +// using point in time recovery: +// +// - Global secondary indexes (GSIs) +// +// - Local secondary indexes (LSIs) +// +// - Provisioned read and write capacity +// +// - Encryption settings All these settings come from the current settings +// of the source table at the time of restore. +// +// You must manually set up the following on the restored table: +// +// - Auto scaling policies +// +// - IAM policies +// +// - Amazon CloudWatch metrics and alarms +// +// - Tags +// +// - Stream settings +// +// - Time to Live (TTL) settings +// +// - Point in time recovery settings +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation RestoreTableToPointInTime for usage and error information. +// +// Returned Error Types: +// +// - TableAlreadyExistsException +// A target table with the specified name already exists. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InvalidRestoreTimeException +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. +// +// - PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) + return out, req.Send() +} + +// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreTableToPointInTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opScan = "Scan" + +// ScanRequest generates a "aws/request.Request" representing the +// client's request for the Scan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Scan for more information on using the Scan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ScanRequest method. +// req, resp := client.ScanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { + op := &request.Operation{ + Name: opScan, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ScanInput{} + } + + output = &ScanOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// Scan API operation for Amazon DynamoDB. +// +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer +// items, you can provide a FilterExpression operation. +// +// If the total number of scanned items exceeds the maximum dataset size limit +// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey +// value to continue the scan in a subsequent operation. The results also include +// the number of items exceeding the limit. A scan can result in no table data +// meeting the filter criteria. +// +// A single Scan operation reads up to the maximum number of items set (if using +// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering +// to the results using FilterExpression. If LastEvaluatedKey is present in +// the response, you need to paginate the result set. For more information, +// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) +// in the Amazon DynamoDB Developer Guide. +// +// Scan operations proceed sequentially; however, for faster performance on +// a large table or secondary index, applications can request a parallel Scan +// operation by providing the Segment and TotalSegments parameters. For more +// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) +// in the Amazon DynamoDB Developer Guide. +// +// Scan uses eventually consistent reads when accessing the data in a table; +// therefore, the result set might not include the changes to data in the table +// immediately before the operation began. If you need a consistent copy of +// the data, as of the time that the Scan begins, you can set the ConsistentRead +// parameter to true. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation Scan for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + return out, req.Send() +} + +// ScanWithContext is the same as Scan with the addition of +// the ability to pass a context and additional request options. +// +// See Scan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ScanPages iterates over the pages of a Scan operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Scan method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Scan operation. +// pageNum := 0 +// err := client.ScanPages(params, +// func(page *dynamodb.ScanOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error { + return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ScanPagesWithContext same as ScanPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ScanInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ScanRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ScanOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// TagResource API operation for Amazon DynamoDB. +// +// Associate a set of tags with an Amazon DynamoDB resource. You can then activate +// these user-defined tags so that they appear on the Billing and Cost Management +// console for cost allocation tracking. You can call TagResource up to five +// times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTransactGetItems = "TransactGetItems" + +// TransactGetItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactGetItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TransactGetItems for more information on using the TransactGetItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TransactGetItemsRequest method. +// req, resp := client.TransactGetItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) { + op := &request.Operation{ + Name: opTransactGetItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransactGetItemsInput{} + } + + output = &TransactGetItemsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// TransactGetItems API operation for Amazon DynamoDB. +// +// TransactGetItems is a synchronous operation that atomically retrieves multiple +// items from one or more tables (but not from indexes) in a single account +// and Region. A TransactGetItems call can contain up to 100 TransactGetItem +// objects, each of which contains a Get structure that specifies an item to +// retrieve from a table in the account and Region. A call to TransactGetItems +// cannot retrieve items from tables in more than one Amazon Web Services account +// or Region. The aggregate size of the items in the transaction cannot exceed +// 4 MB. +// +// DynamoDB rejects the entire TransactGetItems request if any of the following +// is true: +// +// - A conflicting operation is in the process of updating an item to be +// read. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// - The aggregate size of the items in the transaction cannot exceed 4 MB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TransactGetItems for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) + return out, req.Send() +} + +// TransactGetItemsWithContext is the same as TransactGetItems with the addition of +// the ability to pass a context and additional request options. +// +// See TransactGetItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTransactWriteItems = "TransactWriteItems" + +// TransactWriteItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactWriteItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TransactWriteItems for more information on using the TransactWriteItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TransactWriteItemsRequest method. +// req, resp := client.TransactWriteItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) { + op := &request.Operation{ + Name: opTransactWriteItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransactWriteItemsInput{} + } + + output = &TransactWriteItemsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// TransactWriteItems API operation for Amazon DynamoDB. +// +// TransactWriteItems is a synchronous write operation that groups up to 100 +// action requests. These actions can target items in different tables, but +// not in different Amazon Web Services accounts or Regions, and no two actions +// can target the same item. For example, you cannot both ConditionCheck and +// Update the same item. The aggregate size of the items in the transaction +// cannot exceed 4 MB. +// +// The actions are completed atomically so that either all of them succeed, +// or all of them fail. They are defined by the following objects: +// +// - Put — Initiates a PutItem operation to write a new item. This structure +// specifies the primary key of the item to be written, the name of the table +// to write it in, an optional condition expression that must be satisfied +// for the write to succeed, a list of the item's attributes, and a field +// indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// - Update — Initiates an UpdateItem operation to update an existing item. +// This structure specifies the primary key of the item to be updated, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the update to succeed, an expression that defines +// one or more attributes to be updated, and a field indicating whether to +// retrieve the item's attributes if the condition is not met. +// +// - Delete — Initiates a DeleteItem operation to delete an existing item. +// This structure specifies the primary key of the item to be deleted, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the deletion to succeed, and a field indicating +// whether to retrieve the item's attributes if the condition is not met. +// +// - ConditionCheck — Applies a condition to an item that is not being +// modified by the transaction. This structure specifies the primary key +// of the item to be checked, the name of the table where it resides, a condition +// expression that must be satisfied for the transaction to succeed, and +// a field indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// DynamoDB rejects the entire TransactWriteItems request if any of the following +// is true: +// +// - A condition in one of the condition expressions is not met. +// +// - An ongoing operation is in the process of updating the same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (bigger than 400 KB), a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - The aggregate size of the items in the transaction exceeds 4 MB. +// +// - There is a user error, such as an invalid data format. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TransactWriteItems for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// - TransactionInProgressException +// The transaction with the given request token is already in progress. +// +// - IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) + return out, req.Send() +} + +// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of +// the ability to pass a context and additional request options. +// +// See TransactWriteItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UntagResource API operation for Amazon DynamoDB. +// +// Removes the association of tags from an Amazon DynamoDB resource. You can +// call UntagResource up to five times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateContinuousBackups = "UpdateContinuousBackups" + +// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContinuousBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateContinuousBackupsRequest method. +// req, resp := client.UpdateContinuousBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) { + op := &request.Operation{ + Name: opUpdateContinuousBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContinuousBackupsInput{} + } + + output = &UpdateContinuousBackupsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateContinuousBackups API operation for Amazon DynamoDB. +// +// UpdateContinuousBackups enables or disables point in time recovery for the +// specified table. A successful UpdateContinuousBackups call returns the current +// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables +// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus +// will be set to ENABLED. +// +// Once continuous backups and point in time recovery are enabled, you can restore +// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// +// LatestRestorableDateTime is typically 5 minutes before the current time. +// You can restore your table to any point in time during the last 35 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateContinuousBackups for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - ContinuousBackupsUnavailableException +// Backups have not yet been enabled for this table. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) + return out, req.Send() +} + +// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateContinuousBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateContributorInsights = "UpdateContributorInsights" + +// UpdateContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContributorInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateContributorInsights for more information on using the UpdateContributorInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateContributorInsightsRequest method. +// req, resp := client.UpdateContributorInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights +func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsightsInput) (req *request.Request, output *UpdateContributorInsightsOutput) { + op := &request.Operation{ + Name: opUpdateContributorInsights, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContributorInsightsInput{} + } + + output = &UpdateContributorInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateContributorInsights API operation for Amazon DynamoDB. +// +// Updates the status for contributor insights for a specific table or index. +// CloudWatch Contributor Insights for DynamoDB graphs display the partition +// key and (if applicable) sort key of frequently accessed items and frequently +// throttled items in plaintext. If you require the use of Amazon Web Services +// Key Management Service (KMS) to encrypt this table’s partition key and +// sort key data with an Amazon Web Services managed key or customer managed +// key, you should not enable CloudWatch Contributor Insights for DynamoDB for +// this table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateContributorInsights for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights +func (c *DynamoDB) UpdateContributorInsights(input *UpdateContributorInsightsInput) (*UpdateContributorInsightsOutput, error) { + req, out := c.UpdateContributorInsightsRequest(input) + return out, req.Send() +} + +// UpdateContributorInsightsWithContext is the same as UpdateContributorInsights with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateContributorInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateContributorInsightsWithContext(ctx aws.Context, input *UpdateContributorInsightsInput, opts ...request.Option) (*UpdateContributorInsightsOutput, error) { + req, out := c.UpdateContributorInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGlobalTable = "UpdateGlobalTable" + +// UpdateGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGlobalTable for more information on using the UpdateGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateGlobalTableRequest method. +// req, resp := client.UpdateGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) { + op := &request.Operation{ + Name: opUpdateGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGlobalTableInput{} + } + + output = &UpdateGlobalTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateGlobalTable API operation for Amazon DynamoDB. +// +// Adds or removes replicas in the specified global table. The global table +// must already exist to be able to use this operation. Any replica to be added +// must be empty, have the same name as the global table, have the same key +// schema, have DynamoDB Streams enabled, and have the same provisioned and +// maximum write capacity units. +// +// Although you can use UpdateGlobalTable to add replicas and remove replicas +// in a single request, for simplicity we recommend that you issue separate +// requests for adding or removing replicas. +// +// If global secondary indexes are specified, then the following conditions +// must also be met: +// +// - The global secondary indexes must have the same name. +// +// - The global secondary indexes must have the same hash key and sort key +// (if present). +// +// - The global secondary indexes must have the same provisioned and maximum +// write capacity units. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateGlobalTable for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// - ReplicaAlreadyExistsException +// The specified replica is already part of the global table. +// +// - ReplicaNotFoundException +// The specified replica is no longer part of the global table. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) + return out, req.Send() +} + +// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" + +// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTableSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateGlobalTableSettingsRequest method. +// req, resp := client.UpdateGlobalTableSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) { + op := &request.Operation{ + Name: opUpdateGlobalTableSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGlobalTableSettingsInput{} + } + + output = &UpdateGlobalTableSettingsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateGlobalTableSettings API operation for Amazon DynamoDB. +// +// Updates settings for a global table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateGlobalTableSettings for usage and error information. +// +// Returned Error Types: +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// - ReplicaNotFoundException +// The specified replica is no longer part of the global table. +// +// - IndexNotFoundException +// The operation tried to access a nonexistent index. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) + return out, req.Send() +} + +// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGlobalTableSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateItem = "UpdateItem" + +// UpdateItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateItem for more information on using the UpdateItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateItemRequest method. +// req, resp := client.UpdateItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { + op := &request.Operation{ + Name: opUpdateItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateItemInput{} + } + + output = &UpdateItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateItem API operation for Amazon DynamoDB. +// +// Edits an existing item's attributes, or adds a new item to the table if it +// does not already exist. You can put, delete, or add attribute values. You +// can also perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair +// if it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem operation +// using the ReturnValues parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateItem for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + return out, req.Send() +} + +// UpdateItemWithContext is the same as UpdateItem with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTable for more information on using the UpdateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + output = &UpdateTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateTable API operation for Amazon DynamoDB. +// +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// You can only perform one of the following operations at once: +// +// - Modify the provisioned throughput settings of the table. +// +// - Remove a global secondary index from the table. +// +// - Create a new global secondary index on the table. After the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it is executing, the table +// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// issue another UpdateTable request. When the table returns to the ACTIVE state, +// the UpdateTable operation is complete. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + return out, req.Send() +} + +// UpdateTableWithContext is the same as UpdateTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTableReplicaAutoScaling = "UpdateTableReplicaAutoScaling" + +// UpdateTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTableReplicaAutoScaling operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTableReplicaAutoScaling for more information on using the UpdateTableReplicaAutoScaling +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method. +// req, resp := client.UpdateTableReplicaAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling +func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplicaAutoScalingInput) (req *request.Request, output *UpdateTableReplicaAutoScalingOutput) { + op := &request.Operation{ + Name: opUpdateTableReplicaAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableReplicaAutoScalingInput{} + } + + output = &UpdateTableReplicaAutoScalingOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTableReplicaAutoScaling API operation for Amazon DynamoDB. +// +// Updates auto scaling settings on your global tables at once. +// +// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTableReplicaAutoScaling for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling +func (c *DynamoDB) UpdateTableReplicaAutoScaling(input *UpdateTableReplicaAutoScalingInput) (*UpdateTableReplicaAutoScalingOutput, error) { + req, out := c.UpdateTableReplicaAutoScalingRequest(input) + return out, req.Send() +} + +// UpdateTableReplicaAutoScalingWithContext is the same as UpdateTableReplicaAutoScaling with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTableReplicaAutoScaling for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTableReplicaAutoScalingWithContext(ctx aws.Context, input *UpdateTableReplicaAutoScalingInput, opts ...request.Option) (*UpdateTableReplicaAutoScalingOutput, error) { + req, out := c.UpdateTableReplicaAutoScalingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTimeToLive = "UpdateTimeToLive" + +// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTimeToLive operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTimeToLive for more information on using the UpdateTimeToLive +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateTimeToLiveRequest method. +// req, resp := client.UpdateTimeToLiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) { + op := &request.Operation{ + Name: opUpdateTimeToLive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTimeToLiveInput{} + } + + output = &UpdateTimeToLiveOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateTimeToLive API operation for Amazon DynamoDB. +// +// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the +// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. +// It can take up to one hour for the change to fully process. Any additional +// UpdateTimeToLive calls for the same table during this one hour duration result +// in a ValidationException. +// +// TTL compares the current time in epoch time format to the time stored in +// the TTL attribute of an item. If the epoch time value stored in the attribute +// is less than the current time, the item is marked as expired and subsequently +// deleted. +// +// The epoch time format is the number of seconds elapsed since 12:00:00 AM +// January 1, 1970 UTC. +// +// DynamoDB deletes expired items on a best-effort basis to ensure availability +// of throughput for other data operations. +// +// DynamoDB typically deletes expired items within two days of expiration. The +// exact duration within which an item gets deleted after expiration is specific +// to the nature of the workload. Items that have expired and not been deleted +// will still show up in reads, queries, and scans. +// +// As items are deleted, they are removed from any local secondary index and +// global secondary index immediately in the same eventually consistent way +// as a standard delete operation. +// +// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTimeToLive for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + return out, req.Send() +} + +// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTimeToLive for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Contains details of a table archival operation. +type ArchivalSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the backup the table was archived to, when + // applicable in the archival reason. If you wish to restore this backup to + // the same table name, you will need to delete the original table. + ArchivalBackupArn *string `min:"37" type:"string"` + + // The date and time when table archival was initiated by DynamoDB, in UNIX + // epoch time format. + ArchivalDateTime *time.Time `type:"timestamp"` + + // The reason DynamoDB archived the table. Currently, the only possible value + // is: + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to + // the table's KMS key being inaccessible for more than seven days. An On-Demand + // backup was created at the archival time. + ArchivalReason *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ArchivalSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ArchivalSummary) GoString() string { + return s.String() +} + +// SetArchivalBackupArn sets the ArchivalBackupArn field's value. +func (s *ArchivalSummary) SetArchivalBackupArn(v string) *ArchivalSummary { + s.ArchivalBackupArn = &v + return s +} + +// SetArchivalDateTime sets the ArchivalDateTime field's value. +func (s *ArchivalSummary) SetArchivalDateTime(v time.Time) *ArchivalSummary { + s.ArchivalDateTime = &v + return s +} + +// SetArchivalReason sets the ArchivalReason field's value. +func (s *ArchivalSummary) SetArchivalReason(v string) *ArchivalSummary { + s.ArchivalReason = &v + return s +} + +// Represents an attribute for describing the key schema for the table and indexes. +type AttributeDefinition struct { + _ struct{} `type:"structure"` + + // A name for the attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // The data type for the attribute, where: + // + // * S - the attribute is of type String + // + // * N - the attribute is of type Number + // + // * B - the attribute is of type Binary + // + // AttributeType is a required field + AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeDefinition"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.AttributeType == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *AttributeDefinition) SetAttributeName(v string) *AttributeDefinition { + s.AttributeName = &v + return s +} + +// SetAttributeType sets the AttributeType field's value. +func (s *AttributeDefinition) SetAttributeType(v string) *AttributeDefinition { + s.AttributeType = &v + return s +} + +// Represents the data for an attribute. +// +// Each attribute value is described as a name-value pair. The name is the data +// type, and the value is the data itself. +// +// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) +// in the Amazon DynamoDB Developer Guide. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // An attribute of type Binary. For example: + // + // "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" + // B is automatically base64 encoded/decoded by the SDK. + B []byte `type:"blob"` + + // An attribute of type Boolean. For example: + // + // "BOOL": true + BOOL *bool `type:"boolean"` + + // An attribute of type Binary Set. For example: + // + // "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] + BS [][]byte `type:"list"` + + // An attribute of type List. For example: + // + // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] + L []*AttributeValue `type:"list"` + + // An attribute of type Map. For example: + // + // "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} + M map[string]*AttributeValue `type:"map"` + + // An attribute of type Number. For example: + // + // "N": "123.45" + // + // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + // across languages and libraries. However, DynamoDB treats them as number type + // attributes for mathematical operations. + N *string `type:"string"` + + // An attribute of type Number Set. For example: + // + // "NS": ["42.2", "-19", "7.5", "3.14"] + // + // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + // across languages and libraries. However, DynamoDB treats them as number type + // attributes for mathematical operations. + NS []*string `type:"list"` + + // An attribute of type Null. For example: + // + // "NULL": true + NULL *bool `type:"boolean"` + + // An attribute of type String. For example: + // + // "S": "Hello" + S *string `type:"string"` + + // An attribute of type String Set. For example: + // + // "SS": ["Giraffe", "Hippo" ,"Zebra"] + SS []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValue) GoString() string { + return s.String() +} + +// SetB sets the B field's value. +func (s *AttributeValue) SetB(v []byte) *AttributeValue { + s.B = v + return s +} + +// SetBOOL sets the BOOL field's value. +func (s *AttributeValue) SetBOOL(v bool) *AttributeValue { + s.BOOL = &v + return s +} + +// SetBS sets the BS field's value. +func (s *AttributeValue) SetBS(v [][]byte) *AttributeValue { + s.BS = v + return s +} + +// SetL sets the L field's value. +func (s *AttributeValue) SetL(v []*AttributeValue) *AttributeValue { + s.L = v + return s +} + +// SetM sets the M field's value. +func (s *AttributeValue) SetM(v map[string]*AttributeValue) *AttributeValue { + s.M = v + return s +} + +// SetN sets the N field's value. +func (s *AttributeValue) SetN(v string) *AttributeValue { + s.N = &v + return s +} + +// SetNS sets the NS field's value. +func (s *AttributeValue) SetNS(v []*string) *AttributeValue { + s.NS = v + return s +} + +// SetNULL sets the NULL field's value. +func (s *AttributeValue) SetNULL(v bool) *AttributeValue { + s.NULL = &v + return s +} + +// SetS sets the S field's value. +func (s *AttributeValue) SetS(v string) *AttributeValue { + s.S = &v + return s +} + +// SetSS sets the SS field's value. +func (s *AttributeValue) SetSS(v []*string) *AttributeValue { + s.SS = v + return s +} + +// For the UpdateItem operation, represents the attributes to be modified, the +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, +// you will need to delete the item, and then use PutItem to create a new item +// with new attributes. +// +// Attribute values cannot be null; string and binary type attributes must have +// lengths greater than zero; and set type attributes must not be empty. Requests +// with empty values will be rejected with a ValidationException exception. +type AttributeValueUpdate struct { + _ struct{} `type:"structure"` + + // Specifies how to perform the update. Valid values are PUT (default), DELETE, + // and ADD. The behavior depends on whether the specified primary key already + // exists in the table. + // + // If an item with the specified Key is found in the table: + // + // * PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // * DELETE - If no value is specified, the attribute and its value are removed + // from the item. The data type of the specified value must match the existing + // value's data type. If a set of values is specified, then those values + // are subtracted from the old set. For example, if the attribute value was + // the set [a,b,c] and the DELETE action specified [a,c], then the final + // attribute value would be [b]. Specifying an empty set is an error. + // + // * ADD - If the attribute does not already exist, then the attribute and + // its values are added to the item. If the attribute does exist, then the + // behavior of ADD depends on the data type of the attribute: If the existing + // attribute is a number, and if Value is also a number, then the Value is + // mathematically added to the existing attribute. If Value is a negative + // number, then it is subtracted from the existing attribute. If you use + // ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. In addition, + // if you use ADD to update an existing item, and intend to increment or + // decrement an attribute value which does not yet exist, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // does not yet have an attribute named itemcount, but you decide to ADD + // the number 3 to this attribute anyway, even though it currently does not + // exist. DynamoDB will create the itemcount attribute, set its initial value + // to 0, and finally add 3 to it. The result will be a new itemcount attribute + // in the item, with a value of 3. If the existing data type is a set, and + // if the Value is also a set, then the Value is added to the existing set. + // (This is a set operation, not mathematical addition.) For example, if + // the attribute value was the set [1,2], and the ADD action specified [3], + // then the final attribute value would be [1,2,3]. An error occurs if an + // Add action is specified for a set attribute and the attribute type specified + // does not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The same holds true for number + // sets and binary sets. This action is only valid for an existing attribute + // whose data type is number or is a set. Do not use ADD for any other data + // types. + // + // If no item with the specified Key is found: + // + // * PUT - DynamoDB creates a new item with the specified primary key, and + // then adds the attribute. + // + // * DELETE - Nothing happens; there is no attribute to delete. + // + // * ADD - DynamoDB creates a new item with the supplied primary key and + // number (or set) for the attribute value. The only data types allowed are + // number, number set, string set or binary set. + Action *string `type:"string" enum:"AttributeAction"` + + // Represents the data for an attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValueUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValueUpdate) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *AttributeValueUpdate) SetAction(v string) *AttributeValueUpdate { + s.Action = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AttributeValueUpdate) SetValue(v *AttributeValue) *AttributeValueUpdate { + s.Value = v + return s +} + +// Represents the properties of the scaling policy. +type AutoScalingPolicyDescription struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // Represents a target tracking scaling policy configuration. + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyDescription) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AutoScalingPolicyDescription) SetPolicyName(v string) *AutoScalingPolicyDescription { + s.PolicyName = &v + return s +} + +// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. +func (s *AutoScalingPolicyDescription) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) *AutoScalingPolicyDescription { + s.TargetTrackingScalingPolicyConfiguration = v + return s +} + +// Represents the auto scaling policy to be modified. +type AutoScalingPolicyUpdate struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // Represents a target tracking scaling policy configuration. + // + // TargetTrackingScalingPolicyConfiguration is a required field + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingPolicyUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingPolicyUpdate"} + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.TargetTrackingScalingPolicyConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration")) + } + if s.TargetTrackingScalingPolicyConfiguration != nil { + if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil { + invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AutoScalingPolicyUpdate) SetPolicyName(v string) *AutoScalingPolicyUpdate { + s.PolicyName = &v + return s +} + +// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. +func (s *AutoScalingPolicyUpdate) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) *AutoScalingPolicyUpdate { + s.TargetTrackingScalingPolicyConfiguration = v + return s +} + +// Represents the auto scaling settings for a global table or global secondary +// index. +type AutoScalingSettingsDescription struct { + _ struct{} `type:"structure"` + + // Disabled auto scaling for this global table or global secondary index. + AutoScalingDisabled *bool `type:"boolean"` + + // Role ARN used for configuring the auto scaling policy. + AutoScalingRoleArn *string `type:"string"` + + // The maximum capacity units that a global table or global secondary index + // should be scaled up to. + MaximumUnits *int64 `min:"1" type:"long"` + + // The minimum capacity units that a global table or global secondary index + // should be scaled down to. + MinimumUnits *int64 `min:"1" type:"long"` + + // Information about the scaling policies. + ScalingPolicies []*AutoScalingPolicyDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsDescription) GoString() string { + return s.String() +} + +// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. +func (s *AutoScalingSettingsDescription) SetAutoScalingDisabled(v bool) *AutoScalingSettingsDescription { + s.AutoScalingDisabled = &v + return s +} + +// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. +func (s *AutoScalingSettingsDescription) SetAutoScalingRoleArn(v string) *AutoScalingSettingsDescription { + s.AutoScalingRoleArn = &v + return s +} + +// SetMaximumUnits sets the MaximumUnits field's value. +func (s *AutoScalingSettingsDescription) SetMaximumUnits(v int64) *AutoScalingSettingsDescription { + s.MaximumUnits = &v + return s +} + +// SetMinimumUnits sets the MinimumUnits field's value. +func (s *AutoScalingSettingsDescription) SetMinimumUnits(v int64) *AutoScalingSettingsDescription { + s.MinimumUnits = &v + return s +} + +// SetScalingPolicies sets the ScalingPolicies field's value. +func (s *AutoScalingSettingsDescription) SetScalingPolicies(v []*AutoScalingPolicyDescription) *AutoScalingSettingsDescription { + s.ScalingPolicies = v + return s +} + +// Represents the auto scaling settings to be modified for a global table or +// global secondary index. +type AutoScalingSettingsUpdate struct { + _ struct{} `type:"structure"` + + // Disabled auto scaling for this global table or global secondary index. + AutoScalingDisabled *bool `type:"boolean"` + + // Role ARN used for configuring auto scaling policy. + AutoScalingRoleArn *string `min:"1" type:"string"` + + // The maximum capacity units that a global table or global secondary index + // should be scaled up to. + MaximumUnits *int64 `min:"1" type:"long"` + + // The minimum capacity units that a global table or global secondary index + // should be scaled down to. + MinimumUnits *int64 `min:"1" type:"long"` + + // The scaling policy to apply for scaling target global table or global secondary + // index capacity units. + ScalingPolicyUpdate *AutoScalingPolicyUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingSettingsUpdate"} + if s.AutoScalingRoleArn != nil && len(*s.AutoScalingRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingRoleArn", 1)) + } + if s.MaximumUnits != nil && *s.MaximumUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumUnits", 1)) + } + if s.MinimumUnits != nil && *s.MinimumUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("MinimumUnits", 1)) + } + if s.ScalingPolicyUpdate != nil { + if err := s.ScalingPolicyUpdate.Validate(); err != nil { + invalidParams.AddNested("ScalingPolicyUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. +func (s *AutoScalingSettingsUpdate) SetAutoScalingDisabled(v bool) *AutoScalingSettingsUpdate { + s.AutoScalingDisabled = &v + return s +} + +// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. +func (s *AutoScalingSettingsUpdate) SetAutoScalingRoleArn(v string) *AutoScalingSettingsUpdate { + s.AutoScalingRoleArn = &v + return s +} + +// SetMaximumUnits sets the MaximumUnits field's value. +func (s *AutoScalingSettingsUpdate) SetMaximumUnits(v int64) *AutoScalingSettingsUpdate { + s.MaximumUnits = &v + return s +} + +// SetMinimumUnits sets the MinimumUnits field's value. +func (s *AutoScalingSettingsUpdate) SetMinimumUnits(v int64) *AutoScalingSettingsUpdate { + s.MinimumUnits = &v + return s +} + +// SetScalingPolicyUpdate sets the ScalingPolicyUpdate field's value. +func (s *AutoScalingSettingsUpdate) SetScalingPolicyUpdate(v *AutoScalingPolicyUpdate) *AutoScalingSettingsUpdate { + s.ScalingPolicyUpdate = v + return s +} + +// Represents the properties of a target tracking scaling policy. +type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether scale in by the target tracking policy is disabled. If + // the value is true, scale in is disabled and the target tracking policy won't + // remove capacity from the scalable resource. Otherwise, scale in is enabled + // and the target tracking policy can remove capacity from the scalable resource. + // The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the cooldown period after a scale-in, application + // auto scaling scales out your scalable target immediately. + ScaleInCooldown *int64 `type:"integer"` + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int64 `type:"integer"` + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) GoString() string { + return s.String() +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.DisableScaleIn = &v + return s +} + +// SetScaleInCooldown sets the ScaleInCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.ScaleInCooldown = &v + return s +} + +// SetScaleOutCooldown sets the ScaleOutCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.ScaleOutCooldown = &v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.TargetValue = &v + return s +} + +// Represents the settings of a target tracking scaling policy that will be +// modified. +type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct { + _ struct{} `type:"structure"` + + // Indicates whether scale in by the target tracking policy is disabled. If + // the value is true, scale in is disabled and the target tracking policy won't + // remove capacity from the scalable resource. Otherwise, scale in is enabled + // and the target tracking policy can remove capacity from the scalable resource. + // The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the cooldown period after a scale-in, application + // auto scaling scales out your scalable target immediately. + ScaleInCooldown *int64 `type:"integer"` + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int64 `type:"integer"` + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"} + if s.TargetValue == nil { + invalidParams.Add(request.NewErrParamRequired("TargetValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.DisableScaleIn = &v + return s +} + +// SetScaleInCooldown sets the ScaleInCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.ScaleInCooldown = &v + return s +} + +// SetScaleOutCooldown sets the ScaleOutCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.ScaleOutCooldown = &v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.TargetValue = &v + return s +} + +// Contains the description of the backup created for the table. +type BackupDescription struct { + _ struct{} `type:"structure"` + + // Contains the details of the backup created for the table. + BackupDetails *BackupDetails `type:"structure"` + + // Contains the details of the table when the backup was created. + SourceTableDetails *SourceTableDetails `type:"structure"` + + // Contains the details of the features enabled on the table when the backup + // was created. For example, LSIs, GSIs, streams, TTL. + SourceTableFeatureDetails *SourceTableFeatureDetails `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDescription) GoString() string { + return s.String() +} + +// SetBackupDetails sets the BackupDetails field's value. +func (s *BackupDescription) SetBackupDetails(v *BackupDetails) *BackupDescription { + s.BackupDetails = v + return s +} + +// SetSourceTableDetails sets the SourceTableDetails field's value. +func (s *BackupDescription) SetSourceTableDetails(v *SourceTableDetails) *BackupDescription { + s.SourceTableDetails = v + return s +} + +// SetSourceTableFeatureDetails sets the SourceTableFeatureDetails field's value. +func (s *BackupDescription) SetSourceTableFeatureDetails(v *SourceTableFeatureDetails) *BackupDescription { + s.SourceTableFeatureDetails = v + return s +} + +// Contains the details of the backup created for the table. +type BackupDetails struct { + _ struct{} `type:"structure"` + + // ARN associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` + + // Time at which the backup was created. This is the request time of the backup. + // + // BackupCreationDateTime is a required field + BackupCreationDateTime *time.Time `type:"timestamp" required:"true"` + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time `type:"timestamp"` + + // Name of the requested backup. + // + // BackupName is a required field + BackupName *string `min:"3" type:"string" required:"true"` + + // Size of the backup in bytes. DynamoDB updates this value approximately every + // six hours. Recent changes might not be reflected in this value. + BackupSizeBytes *int64 `type:"long"` + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + // + // BackupStatus is a required field + BackupStatus *string `type:"string" required:"true" enum:"BackupStatus"` + + // BackupType: + // + // * USER - You create and manage these using the on-demand backup feature. + // + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at + // no additional cost). System backups allow you to restore the deleted table + // to the state it was in just before the point of deletion. + // + // * AWS_BACKUP - On-demand backup created by you from Backup service. + // + // BackupType is a required field + BackupType *string `type:"string" required:"true" enum:"BackupType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDetails) GoString() string { + return s.String() +} + +// SetBackupArn sets the BackupArn field's value. +func (s *BackupDetails) SetBackupArn(v string) *BackupDetails { + s.BackupArn = &v + return s +} + +// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. +func (s *BackupDetails) SetBackupCreationDateTime(v time.Time) *BackupDetails { + s.BackupCreationDateTime = &v + return s +} + +// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. +func (s *BackupDetails) SetBackupExpiryDateTime(v time.Time) *BackupDetails { + s.BackupExpiryDateTime = &v + return s +} + +// SetBackupName sets the BackupName field's value. +func (s *BackupDetails) SetBackupName(v string) *BackupDetails { + s.BackupName = &v + return s +} + +// SetBackupSizeBytes sets the BackupSizeBytes field's value. +func (s *BackupDetails) SetBackupSizeBytes(v int64) *BackupDetails { + s.BackupSizeBytes = &v + return s +} + +// SetBackupStatus sets the BackupStatus field's value. +func (s *BackupDetails) SetBackupStatus(v string) *BackupDetails { + s.BackupStatus = &v + return s +} + +// SetBackupType sets the BackupType field's value. +func (s *BackupDetails) SetBackupType(v string) *BackupDetails { + s.BackupType = &v + return s +} + +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +type BackupInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupInUseException) GoString() string { + return s.String() +} + +func newErrorBackupInUseException(v protocol.ResponseMetadata) error { + return &BackupInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BackupInUseException) Code() string { + return "BackupInUseException" +} + +// Message returns the exception's message. +func (s *BackupInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BackupInUseException) OrigErr() error { + return nil +} + +func (s *BackupInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BackupInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BackupInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Backup not found for the given BackupARN. +type BackupNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupNotFoundException) GoString() string { + return s.String() +} + +func newErrorBackupNotFoundException(v protocol.ResponseMetadata) error { + return &BackupNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BackupNotFoundException) Code() string { + return "BackupNotFoundException" +} + +// Message returns the exception's message. +func (s *BackupNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BackupNotFoundException) OrigErr() error { + return nil +} + +func (s *BackupNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BackupNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BackupNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains details for the backup. +type BackupSummary struct { + _ struct{} `type:"structure"` + + // ARN associated with the backup. + BackupArn *string `min:"37" type:"string"` + + // Time at which the backup was created. + BackupCreationDateTime *time.Time `type:"timestamp"` + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time `type:"timestamp"` + + // Name of the specified backup. + BackupName *string `min:"3" type:"string"` + + // Size of the backup in bytes. + BackupSizeBytes *int64 `type:"long"` + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + BackupStatus *string `type:"string" enum:"BackupStatus"` + + // BackupType: + // + // * USER - You create and manage these using the on-demand backup feature. + // + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at + // no additional cost). System backups allow you to restore the deleted table + // to the state it was in just before the point of deletion. + // + // * AWS_BACKUP - On-demand backup created by you from Backup service. + BackupType *string `type:"string" enum:"BackupType"` + + // ARN associated with the table. + TableArn *string `type:"string"` + + // Unique identifier for the table. + TableId *string `type:"string"` + + // Name of the table. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupSummary) GoString() string { + return s.String() +} + +// SetBackupArn sets the BackupArn field's value. +func (s *BackupSummary) SetBackupArn(v string) *BackupSummary { + s.BackupArn = &v + return s +} + +// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. +func (s *BackupSummary) SetBackupCreationDateTime(v time.Time) *BackupSummary { + s.BackupCreationDateTime = &v + return s +} + +// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. +func (s *BackupSummary) SetBackupExpiryDateTime(v time.Time) *BackupSummary { + s.BackupExpiryDateTime = &v + return s +} + +// SetBackupName sets the BackupName field's value. +func (s *BackupSummary) SetBackupName(v string) *BackupSummary { + s.BackupName = &v + return s +} + +// SetBackupSizeBytes sets the BackupSizeBytes field's value. +func (s *BackupSummary) SetBackupSizeBytes(v int64) *BackupSummary { + s.BackupSizeBytes = &v + return s +} + +// SetBackupStatus sets the BackupStatus field's value. +func (s *BackupSummary) SetBackupStatus(v string) *BackupSummary { + s.BackupStatus = &v + return s +} + +// SetBackupType sets the BackupType field's value. +func (s *BackupSummary) SetBackupType(v string) *BackupSummary { + s.BackupType = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *BackupSummary) SetTableArn(v string) *BackupSummary { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *BackupSummary) SetTableId(v string) *BackupSummary { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BackupSummary) SetTableName(v string) *BackupSummary { + s.TableName = &v + return s +} + +type BatchExecuteStatementInput struct { + _ struct{} `type:"structure"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The list of PartiQL statements representing the batch to run. + // + // Statements is a required field + Statements []*BatchStatementRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchExecuteStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchExecuteStatementInput"} + if s.Statements == nil { + invalidParams.Add(request.NewErrParamRequired("Statements")) + } + if s.Statements != nil && len(s.Statements) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statements", 1)) + } + if s.Statements != nil { + for i, v := range s.Statements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Statements", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchExecuteStatementInput) SetReturnConsumedCapacity(v string) *BatchExecuteStatementInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetStatements sets the Statements field's value. +func (s *BatchExecuteStatementInput) SetStatements(v []*BatchStatementRequest) *BatchExecuteStatementInput { + s.Statements = v + return s +} + +type BatchExecuteStatementOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire operation. The values of the list + // are ordered according to the ordering of the statements. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // The response to each PartiQL statement in the batch. + Responses []*BatchStatementResponse `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchExecuteStatementOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchExecuteStatementOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *BatchExecuteStatementOutput) SetResponses(v []*BatchStatementResponse) *BatchExecuteStatementOutput { + s.Responses = v + return s +} + +// Represents the input of a BatchGetItem operation. +type BatchGetItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a map that describes + // one or more items to retrieve from that table. Each table name can be used + // only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // + // * ConsistentRead - If true, a strongly consistent read is used; if false + // (the default), an eventually consistent read is used. + // + // * ExpressionAttributeNames - One or more substitution tokens for attribute + // names in the ProjectionExpression parameter. The following are some use + // cases for using ExpressionAttributeNames: To access an attribute whose + // name conflicts with a DynamoDB reserved word. To create a placeholder + // for repeating occurrences of an attribute name in an expression. To prevent + // special characters in an attribute name from being misinterpreted in an + // expression. Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: Percentile The + // name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could + // specify the following for ExpressionAttributeNames: {"#P":"Percentile"} + // You could then use this substitution in an expression, as in this example: + // #P = :val Tokens that begin with the : character are expression attribute + // values, which are placeholders for the actual value at runtime. For more + // information about expression attribute names, see Accessing Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // * Keys - An array of primary key attribute values that define specific + // items in the table. For each primary key, you must provide all of the + // key attributes. For example, with a simple primary key, you only need + // to provide the partition key value. For a composite key, you must provide + // both the partition key value and the sort key value. + // + // * ProjectionExpression - A string that identifies one or more attributes + // to retrieve from the table. These attributes can include scalars, sets, + // or elements of a JSON document. The attributes in the expression must + // be separated by commas. If no attribute names are specified, then all + // attributes are returned. If any of the requested attributes are not found, + // they do not appear in the result. For more information, see Accessing + // Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression + // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + // + // RequestItems is a required field + RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + if s.RequestItems != nil { + for i, v := range s.RequestItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestItems sets the RequestItems field's value. +func (s *BatchGetItemInput) SetRequestItems(v map[string]*KeysAndAttributes) *BatchGetItemInput { + s.RequestItems = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchGetItemInput) SetReturnConsumedCapacity(v string) *BatchGetItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// Represents the output of a BatchGetItem operation. +type BatchGetItemOutput struct { + _ struct{} `type:"structure"` + + // The read capacity units consumed by the entire BatchGetItem operation. + // + // Each element consists of: + // + // * TableName - The table that consumed the provisioned throughput. + // + // * CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A map of table name to a list of items. Each object in Responses consists + // of a table name, along with a map of attribute data consisting of the data + // type and attribute value. + Responses map[string][]map[string]*AttributeValue `type:"map"` + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems, + // so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // * Keys - An array of primary key attribute values that define specific + // items in the table. + // + // * ProjectionExpression - One or more attributes to be retrieved from the + // table or index. By default, all attributes are returned. If a requested + // attribute is not found, it does not appear in the result. + // + // * ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput { + s.Responses = v + return s +} + +// SetUnprocessedKeys sets the UnprocessedKeys field's value. +func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput { + s.UnprocessedKeys = v + return s +} + +// An error associated with a statement in a PartiQL batch that was run. +type BatchStatementError struct { + _ struct{} `type:"structure"` + + // The error code associated with the failed PartiQL batch statement. + Code *string `type:"string" enum:"BatchStatementErrorCodeEnum"` + + // The error message associated with the PartiQL batch response. + Message *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *BatchStatementError) SetCode(v string) *BatchStatementError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *BatchStatementError) SetMessage(v string) *BatchStatementError { + s.Message = &v + return s +} + +// A PartiQL batch statement request. +type BatchStatementRequest struct { + _ struct{} `type:"structure"` + + // The read consistency of the PartiQL batch request. + ConsistentRead *bool `type:"boolean"` + + // The parameters associated with a PartiQL statement in the batch request. + Parameters []*AttributeValue `min:"1" type:"list"` + + // A valid PartiQL statement. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchStatementRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchStatementRequest"} + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *BatchStatementRequest) SetConsistentRead(v bool) *BatchStatementRequest { + s.ConsistentRead = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *BatchStatementRequest) SetParameters(v []*AttributeValue) *BatchStatementRequest { + s.Parameters = v + return s +} + +// SetStatement sets the Statement field's value. +func (s *BatchStatementRequest) SetStatement(v string) *BatchStatementRequest { + s.Statement = &v + return s +} + +// A PartiQL batch statement response.. +type BatchStatementResponse struct { + _ struct{} `type:"structure"` + + // The error associated with a failed PartiQL batch statement. + Error *BatchStatementError `type:"structure"` + + // A DynamoDB item associated with a BatchStatementResponse + Item map[string]*AttributeValue `type:"map"` + + // The table name associated with a failed PartiQL batch statement. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementResponse) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *BatchStatementResponse) SetError(v *BatchStatementError) *BatchStatementResponse { + s.Error = v + return s +} + +// SetItem sets the Item field's value. +func (s *BatchStatementResponse) SetItem(v map[string]*AttributeValue) *BatchStatementResponse { + s.Item = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchStatementResponse) SetTableName(v string) *BatchStatementResponse { + s.TableName = &v + return s +} + +// Represents the input of a BatchWriteItem operation. +type BatchWriteItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a list of operations + // to be performed (DeleteRequest or PutRequest). Each element in the map consists + // of the following: + // + // * DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. + // For each primary key, you must provide all of the key attributes. For + // example, with a simple primary key, you only need to provide a value for + // the partition key. For a composite primary key, you must provide values + // for both the partition key and the sort key. + // + // * PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values are rejected + // with a ValidationException exception. If you specify any attributes that + // are part of an index key, then the data types for those attributes must + // match those of the schema in the table's attribute definition. + // + // RequestItems is a required field + RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchWriteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchWriteItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestItems sets the RequestItems field's value. +func (s *BatchWriteItemInput) SetRequestItems(v map[string][]*WriteRequest) *BatchWriteItemInput { + s.RequestItems = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchWriteItemInput) SetReturnConsumedCapacity(v string) *BatchWriteItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *BatchWriteItemInput) SetReturnItemCollectionMetrics(v string) *BatchWriteItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// Represents the output of a BatchWriteItem operation. +type BatchWriteItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire BatchWriteItem operation. + // + // Each element consists of: + // + // * TableName - The table that consumed the provisioned throughput. + // + // * CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by BatchWriteItem and, for each table, + // information about any item collections that were affected by individual DeleteItem + // or PutItem operations. + // + // Each entry consists of the following subelements: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item. + // + // * SizeEstimateRangeGB - An estimate of item collection size, expressed + // in GB. This is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on the table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` + + // A map of tables and requests against those tables that were not processed. + // The UnprocessedItems value is in the same form as RequestItems, so you can + // provide this value directly to a subsequent BatchWriteItem operation. For + // more information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name and, for that table, + // a list of operations to perform (DeleteRequest or PutRequest). + // + // * DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. + // + // * PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values will + // be rejected with a ValidationException exception. If you specify any attributes + // that are part of an index key, then the data types for those attributes + // must match those of the schema in the table's attribute definition. + // + // If there are no unprocessed items remaining, the response contains an empty + // UnprocessedItems map. + UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchWriteItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchWriteItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *BatchWriteItemOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *BatchWriteItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// SetUnprocessedItems sets the UnprocessedItems field's value. +func (s *BatchWriteItemOutput) SetUnprocessedItems(v map[string][]*WriteRequest) *BatchWriteItemOutput { + s.UnprocessedItems = v + return s +} + +// Contains the details for the read/write capacity mode. This page talks about +// PROVISIONED and PAY_PER_REQUEST billing modes. For more information about +// these modes, see Read/write capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html). +// +// You may need to switch to on-demand mode at least once in order to return +// a BillingModeSummary response. +type BillingModeSummary struct { + _ struct{} `type:"structure"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend + // using PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. + // We recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // Represents the time when PAY_PER_REQUEST was last set as the read/write capacity + // mode. + LastUpdateToPayPerRequestDateTime *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BillingModeSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BillingModeSummary) GoString() string { + return s.String() +} + +// SetBillingMode sets the BillingMode field's value. +func (s *BillingModeSummary) SetBillingMode(v string) *BillingModeSummary { + s.BillingMode = &v + return s +} + +// SetLastUpdateToPayPerRequestDateTime sets the LastUpdateToPayPerRequestDateTime field's value. +func (s *BillingModeSummary) SetLastUpdateToPayPerRequestDateTime(v time.Time) *BillingModeSummary { + s.LastUpdateToPayPerRequestDateTime = &v + return s +} + +// An ordered list of errors for each item in the request which caused the transaction +// to get cancelled. The values of the list are ordered according to the ordering +// of the TransactWriteItems request parameter. If no error occurred for the +// associated item an error with a Null code and Null message will be present. +type CancellationReason struct { + _ struct{} `type:"structure"` + + // Status code for the result of the cancelled transaction. + Code *string `type:"string"` + + // Item in the request which caused the transaction to get cancelled. + Item map[string]*AttributeValue `type:"map"` + + // Cancellation reason message description. + Message *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancellationReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancellationReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *CancellationReason) SetCode(v string) *CancellationReason { + s.Code = &v + return s +} + +// SetItem sets the Item field's value. +func (s *CancellationReason) SetItem(v map[string]*AttributeValue) *CancellationReason { + s.Item = v + return s +} + +// SetMessage sets the Message field's value. +func (s *CancellationReason) SetMessage(v string) *CancellationReason { + s.Message = &v + return s +} + +// Represents the amount of provisioned throughput capacity consumed on a table +// or an index. +type Capacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed on a table or an index. + CapacityUnits *float64 `type:"double"` + + // The total number of read capacity units consumed on a table or an index. + ReadCapacityUnits *float64 `type:"double"` + + // The total number of write capacity units consumed on a table or an index. + WriteCapacityUnits *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Capacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Capacity) GoString() string { + return s.String() +} + +// SetCapacityUnits sets the CapacityUnits field's value. +func (s *Capacity) SetCapacityUnits(v float64) *Capacity { + s.CapacityUnits = &v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *Capacity) SetReadCapacityUnits(v float64) *Capacity { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity { + s.WriteCapacityUnits = &v + return s +} + +// Represents the selection criteria for a Query or Scan operation: +// +// - For a Query operation, Condition is used for specifying the KeyConditions +// to use when querying a table or an index. For KeyConditions, only the +// following comparison operators are supported: EQ | LE | LT | GE | GT | +// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates +// the query results and returns only the desired values. +// +// - For a Scan operation, Condition is used in a ScanFilter, which evaluates +// the scan results and returns only the desired values. +type Condition struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes. For example, equals, greater than, + // less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // * EQ : Equal. EQ is supported for all data types, including lists and + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. + // + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see Legacy + // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Condition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Condition"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *Condition) SetAttributeValueList(v []*AttributeValue) *Condition { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *Condition) SetComparisonOperator(v string) *Condition { + s.ComparisonOperator = &v + return s +} + +// Represents a request to perform a check that an item exists or to check the +// condition of specific attributes of the item. +type ConditionCheck struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // ConditionExpression is a required field + ConditionExpression *string `type:"string" required:"true"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be checked. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure, + // the valid values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table for the check item request. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionCheck) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConditionCheck) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConditionCheck"} + if s.ConditionExpression == nil { + invalidParams.Add(request.NewErrParamRequired("ConditionExpression")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *ConditionCheck) SetConditionExpression(v string) *ConditionCheck { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *ConditionCheck) SetExpressionAttributeNames(v map[string]*string) *ConditionCheck { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *ConditionCheck) SetExpressionAttributeValues(v map[string]*AttributeValue) *ConditionCheck { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *ConditionCheck) SetKey(v map[string]*AttributeValue) *ConditionCheck { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *ConditionCheck) SetReturnValuesOnConditionCheckFailure(v string) *ConditionCheck { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ConditionCheck) SetTableName(v string) *ConditionCheck { + s.TableName = &v + return s +} + +// A condition specified in the operation could not be evaluated. +type ConditionalCheckFailedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The conditional request failed. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionalCheckFailedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionalCheckFailedException) GoString() string { + return s.String() +} + +func newErrorConditionalCheckFailedException(v protocol.ResponseMetadata) error { + return &ConditionalCheckFailedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConditionalCheckFailedException) Code() string { + return "ConditionalCheckFailedException" +} + +// Message returns the exception's message. +func (s *ConditionalCheckFailedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConditionalCheckFailedException) OrigErr() error { + return nil +} + +func (s *ConditionalCheckFailedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConditionalCheckFailedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConditionalCheckFailedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The capacity units consumed by an operation. The data returned includes the +// total provisioned throughput consumed, along with statistics for the table +// and any indexes involved in the operation. ConsumedCapacity is only returned +// if the request asked for it. For more information, see Provisioned Throughput +// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// in the Amazon DynamoDB Developer Guide. +type ConsumedCapacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed by the operation. + CapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on each global index affected by the operation. + GlobalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on each local index affected by the operation. + LocalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The total number of read capacity units consumed by the operation. + ReadCapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on the table affected by the operation. + Table *Capacity `type:"structure"` + + // The name of the table that was affected by the operation. + TableName *string `min:"3" type:"string"` + + // The total number of write capacity units consumed by the operation. + WriteCapacityUnits *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConsumedCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConsumedCapacity) GoString() string { + return s.String() +} + +// SetCapacityUnits sets the CapacityUnits field's value. +func (s *ConsumedCapacity) SetCapacityUnits(v float64) *ConsumedCapacity { + s.CapacityUnits = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *ConsumedCapacity) SetGlobalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { + s.GlobalSecondaryIndexes = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *ConsumedCapacity) SetLocalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { + s.LocalSecondaryIndexes = v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ConsumedCapacity) SetReadCapacityUnits(v float64) *ConsumedCapacity { + s.ReadCapacityUnits = &v + return s +} + +// SetTable sets the Table field's value. +func (s *ConsumedCapacity) SetTable(v *Capacity) *ConsumedCapacity { + s.Table = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ConsumedCapacity) SetTableName(v string) *ConsumedCapacity { + s.TableName = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ConsumedCapacity) SetWriteCapacityUnits(v float64) *ConsumedCapacity { + s.WriteCapacityUnits = &v + return s +} + +// Represents the continuous backups and point in time recovery settings on +// the table. +type ContinuousBackupsDescription struct { + _ struct{} `type:"structure"` + + // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED + // + // ContinuousBackupsStatus is a required field + ContinuousBackupsStatus *string `type:"string" required:"true" enum:"ContinuousBackupsStatus"` + + // The description of the point in time recovery settings applied to the table. + PointInTimeRecoveryDescription *PointInTimeRecoveryDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsDescription) GoString() string { + return s.String() +} + +// SetContinuousBackupsStatus sets the ContinuousBackupsStatus field's value. +func (s *ContinuousBackupsDescription) SetContinuousBackupsStatus(v string) *ContinuousBackupsDescription { + s.ContinuousBackupsStatus = &v + return s +} + +// SetPointInTimeRecoveryDescription sets the PointInTimeRecoveryDescription field's value. +func (s *ContinuousBackupsDescription) SetPointInTimeRecoveryDescription(v *PointInTimeRecoveryDescription) *ContinuousBackupsDescription { + s.PointInTimeRecoveryDescription = v + return s +} + +// Backups have not yet been enabled for this table. +type ContinuousBackupsUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsUnavailableException) GoString() string { + return s.String() +} + +func newErrorContinuousBackupsUnavailableException(v protocol.ResponseMetadata) error { + return &ContinuousBackupsUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ContinuousBackupsUnavailableException) Code() string { + return "ContinuousBackupsUnavailableException" +} + +// Message returns the exception's message. +func (s *ContinuousBackupsUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ContinuousBackupsUnavailableException) OrigErr() error { + return nil +} + +func (s *ContinuousBackupsUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ContinuousBackupsUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ContinuousBackupsUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents a Contributor Insights summary entry. +type ContributorInsightsSummary struct { + _ struct{} `type:"structure"` + + // Describes the current status for contributor insights for the given table + // and index, if applicable. + ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` + + // Name of the index associated with the summary, if any. + IndexName *string `min:"3" type:"string"` + + // Name of the table associated with the summary. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContributorInsightsSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContributorInsightsSummary) GoString() string { + return s.String() +} + +// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. +func (s *ContributorInsightsSummary) SetContributorInsightsStatus(v string) *ContributorInsightsSummary { + s.ContributorInsightsStatus = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *ContributorInsightsSummary) SetIndexName(v string) *ContributorInsightsSummary { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ContributorInsightsSummary) SetTableName(v string) *ContributorInsightsSummary { + s.TableName = &v + return s +} + +type CreateBackupInput struct { + _ struct{} `type:"structure"` + + // Specified name for the backup. + // + // BackupName is a required field + BackupName *string `min:"3" type:"string" required:"true"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBackupInput"} + if s.BackupName == nil { + invalidParams.Add(request.NewErrParamRequired("BackupName")) + } + if s.BackupName != nil && len(*s.BackupName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BackupName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupName sets the BackupName field's value. +func (s *CreateBackupInput) SetBackupName(v string) *CreateBackupInput { + s.BackupName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CreateBackupInput) SetTableName(v string) *CreateBackupInput { + s.TableName = &v + return s +} + +type CreateBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the backup created for the table. + BackupDetails *BackupDetails `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDetails sets the BackupDetails field's value. +func (s *CreateBackupOutput) SetBackupDetails(v *BackupDetails) *CreateBackupOutput { + s.BackupDetails = v + return s +} + +// Represents a new global secondary index to be added to an existing table. +type CreateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be created. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The key schema for the global secondary index. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *CreateGlobalSecondaryIndexAction) SetIndexName(v string) *CreateGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) *CreateGlobalSecondaryIndexAction { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *CreateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateGlobalSecondaryIndexAction { + s.ProvisionedThroughput = v + return s +} + +type CreateGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The global table name. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // The Regions where the global table needs to be created. + // + // ReplicationGroup is a required field + ReplicationGroup []*Replica `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.ReplicationGroup == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *CreateGlobalTableInput) SetGlobalTableName(v string) *CreateGlobalTableInput { + s.GlobalTableName = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CreateGlobalTableInput) SetReplicationGroup(v []*Replica) *CreateGlobalTableInput { + s.ReplicationGroup = v + return s +} + +type CreateGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *CreateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *CreateGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +// Represents a replica to be added. +type CreateReplicaAction struct { + _ struct{} `type:"structure"` + + // The Region of the replica to be added. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicaAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *CreateReplicaAction) SetRegionName(v string) *CreateReplicaAction { + s.RegionName = &v + return s +} + +// Represents a replica to be created. +type CreateReplicationGroupMemberAction struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` + + // The KMS key that should be used for KMS encryption in the new replica. To + // specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or + // alias ARN. Note that you should only provide this parameter if the key is + // different from the default DynamoDB KMS key alias/aws/dynamodb. + KMSMasterKeyId *string `type:"string"` + + // Replica-specific provisioned throughput. If not specified, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` + + // The Region where the new replica will be created. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Replica-specific table class. If not specified, uses the source table's table + // class. + TableClassOverride *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicationGroupMemberAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicationGroupMemberAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicationGroupMemberAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupMemberAction"} + if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1)) + } + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *CreateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *CreateReplicationGroupMemberAction { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *CreateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *CreateReplicationGroupMemberAction { + s.KMSMasterKeyId = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *CreateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *CreateReplicationGroupMemberAction { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *CreateReplicationGroupMemberAction) SetRegionName(v string) *CreateReplicationGroupMemberAction { + s.RegionName = &v + return s +} + +// SetTableClassOverride sets the TableClassOverride field's value. +func (s *CreateReplicationGroupMemberAction) SetTableClassOverride(v string) *CreateReplicationGroupMemberAction { + s.TableClassOverride = &v + return s +} + +// Represents the input of a CreateTable operation. +type CreateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // + // AttributeDefinitions is a required field + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + BillingMode *string `type:"string" enum:"BillingMode"` + + // One or more global secondary indexes (the maximum is 20) to be created on + // the table. Each global secondary index in the array includes the following: + // + // * IndexName - The name of the global secondary index. Must be unique only + // for this table. + // + // * KeySchema - Specifies the key schema for the global secondary index. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + // + // * ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // Specifies the attributes that make up the primary key for a table or an index. + // The attributes in KeySchema must also be defined in the AttributeDefinitions + // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) + // in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // + // * AttributeName - The name of this key attribute. + // + // * KeyType - The role that the key attribute will assume: HASH - partition + // key RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from the DynamoDB usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For a simple primary key (partition key), you must provide exactly one element + // with a KeyType of HASH. + // + // For a composite primary key (partition key and sort key), you must provide + // exactly two elements, in this order: The first element must have a KeyType + // of HASH, and the second element must have a KeyType of RANGE. + // + // For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + // in the Amazon DynamoDB Developer Guide. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // One or more local secondary indexes (the maximum is 5) to be created on the + // table. Each index is scoped to a given partition key value. There is a 10 + // GB size limit per partition key value; otherwise, the size of a local secondary + // index is unconstrained. + // + // Each local secondary index in the array includes the following: + // + // * IndexName - The name of the local secondary index. Must be unique only + // for this table. + // + // * KeySchema - Specifies the key schema for the local secondary index. + // The key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // If you set BillingMode as PROVISIONED, you must specify this property. If + // you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the settings used to enable server-side encryption. + SSESpecification *SSESpecification `type:"structure"` + + // The settings for DynamoDB Streams on the table. These settings consist of: + // + // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled + // (true) or disabled (false). + // + // * StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the table's stream. Valid values + // for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified + // item are written to the stream. NEW_IMAGE - The entire item, as it appears + // after it was modified, is written to the stream. OLD_IMAGE - The entire + // item, as it appeared before it was modified, is written to the stream. + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The table class of the new table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + TableClass *string `type:"string" enum:"TableClass"` + + // The name of the table to create. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // A list of key-value pairs to label the table. For more information, see Tagging + // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} + if s.AttributeDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexes != nil { + for i, v := range s.LocalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + if s.StreamSpecification != nil { + if err := s.StreamSpecification.Validate(); err != nil { + invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput { + s.KeySchema = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput { + s.SSESpecification = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput { + s.StreamSpecification = v + return s +} + +// SetTableClass sets the TableClass field's value. +func (s *CreateTableInput) SetTableClass(v string) *CreateTableInput { + s.TableClass = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CreateTableInput) SetTableName(v string) *CreateTableInput { + s.TableName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput { + s.Tags = v + return s +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput { + s.TableDescription = v + return s +} + +// Processing options for the CSV file being imported. +type CsvOptions struct { + _ struct{} `type:"structure"` + + // The delimiter used for separating items in the CSV file being imported. + Delimiter *string `min:"1" type:"string"` + + // List of the headers used to specify a common header for all source CSV files + // being imported. If this field is specified then the first line of each CSV + // file is treated as data instead of the header. If this field is not specified + // the the first line of each CSV file is treated as the header. + HeaderList []*string `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CsvOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CsvOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CsvOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CsvOptions"} + if s.Delimiter != nil && len(*s.Delimiter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Delimiter", 1)) + } + if s.HeaderList != nil && len(s.HeaderList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HeaderList", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDelimiter sets the Delimiter field's value. +func (s *CsvOptions) SetDelimiter(v string) *CsvOptions { + s.Delimiter = &v + return s +} + +// SetHeaderList sets the HeaderList field's value. +func (s *CsvOptions) SetHeaderList(v []*string) *CsvOptions { + s.HeaderList = v + return s +} + +// Represents a request to perform a DeleteItem operation. +type Delete struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional delete to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be deleted. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid + // values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table in which the item to be deleted resides. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Delete) SetConditionExpression(v string) *Delete { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Delete) SetTableName(v string) *Delete { + s.TableName = &v + return s +} + +type DeleteBackupInput struct { + _ struct{} `type:"structure"` + + // The ARN associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput { + s.BackupArn = &v + return s +} + +type DeleteBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDescription sets the BackupDescription field's value. +func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput { + s.BackupDescription = v + return s +} + +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be deleted. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional DeleteItem + // to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were deleted. For DeleteItem, the valid values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - The content of the old item is returned. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table from which to delete the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput { + s.Key = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput { + s.TableName = &v + return s +} + +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute names to AttributeValue objects, representing the item + // as it appeared before the DeleteItem operation. This map appears in the response + // only if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the DeleteItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics + // for the table and any indexes involved in the operation. ConsumedCapacity + // is only returned if the ReturnConsumedCapacity parameter was specified. For + // more information, see Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the DeleteItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a replica to be removed. +type DeleteReplicaAction struct { + _ struct{} `type:"structure"` + + // The Region of the replica to be removed. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction { + s.RegionName = &v + return s +} + +// Represents a replica to be deleted. +type DeleteReplicationGroupMemberAction struct { + _ struct{} `type:"structure"` + + // The Region where the replica exists. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicationGroupMemberAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicationGroupMemberAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationGroupMemberAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupMemberAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *DeleteReplicationGroupMemberAction) SetRegionName(v string) *DeleteReplicationGroupMemberAction { + s.RegionName = &v + return s +} + +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to delete. All of the table's primary key attributes must be + // specified, and their data types must match those of the table's key schema. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRequest) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest { + s.Key = v + return s +} + +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to delete. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput { + s.TableName = &v + return s +} + +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput { + s.TableDescription = v + return s +} + +type DescribeBackupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput { + s.BackupArn = &v + return s +} + +type DescribeBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDescription sets the BackupDescription field's value. +func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput { + s.BackupDescription = v + return s +} + +type DescribeContinuousBackupsInput struct { + _ struct{} `type:"structure"` + + // Name of the table for which the customer wants to check the continuous backups + // and point in time recovery settings. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContinuousBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput { + s.TableName = &v + return s +} + +type DescribeContinuousBackupsOutput struct { + _ struct{} `type:"structure"` + + // Represents the continuous backups and point in time recovery settings on + // the table. + ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsOutput) GoString() string { + return s.String() +} + +// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. +func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput { + s.ContinuousBackupsDescription = v + return s +} + +type DescribeContributorInsightsInput struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to describe, if applicable. + IndexName *string `min:"3" type:"string"` + + // The name of the table to describe. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContributorInsightsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContributorInsightsInput"} + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *DescribeContributorInsightsInput) SetIndexName(v string) *DescribeContributorInsightsInput { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContributorInsightsInput) SetTableName(v string) *DescribeContributorInsightsInput { + s.TableName = &v + return s +} + +type DescribeContributorInsightsOutput struct { + _ struct{} `type:"structure"` + + // List of names of the associated contributor insights rules. + ContributorInsightsRuleList []*string `type:"list"` + + // Current status of contributor insights. + ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` + + // Returns information about the last failure that was encountered. + // + // The most common exceptions for a FAILED status are: + // + // * LimitExceededException - Per-account Amazon CloudWatch Contributor Insights + // rule limit reached. Please disable Contributor Insights for other tables/indexes + // OR disable Contributor Insights rules before retrying. + // + // * AccessDeniedException - Amazon CloudWatch Contributor Insights rules + // cannot be modified due to insufficient permissions. + // + // * AccessDeniedException - Failed to create service-linked role for Contributor + // Insights due to insufficient permissions. + // + // * InternalServerError - Failed to create Amazon CloudWatch Contributor + // Insights rules. Please retry request. + FailureException *FailureException `type:"structure"` + + // The name of the global secondary index being described. + IndexName *string `min:"3" type:"string"` + + // Timestamp of the last time the status was changed. + LastUpdateDateTime *time.Time `type:"timestamp"` + + // The name of the table being described. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsOutput) GoString() string { + return s.String() +} + +// SetContributorInsightsRuleList sets the ContributorInsightsRuleList field's value. +func (s *DescribeContributorInsightsOutput) SetContributorInsightsRuleList(v []*string) *DescribeContributorInsightsOutput { + s.ContributorInsightsRuleList = v + return s +} + +// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. +func (s *DescribeContributorInsightsOutput) SetContributorInsightsStatus(v string) *DescribeContributorInsightsOutput { + s.ContributorInsightsStatus = &v + return s +} + +// SetFailureException sets the FailureException field's value. +func (s *DescribeContributorInsightsOutput) SetFailureException(v *FailureException) *DescribeContributorInsightsOutput { + s.FailureException = v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *DescribeContributorInsightsOutput) SetIndexName(v string) *DescribeContributorInsightsOutput { + s.IndexName = &v + return s +} + +// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. +func (s *DescribeContributorInsightsOutput) SetLastUpdateDateTime(v time.Time) *DescribeContributorInsightsOutput { + s.LastUpdateDateTime = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContributorInsightsOutput) SetTableName(v string) *DescribeContributorInsightsOutput { + s.TableName = &v + return s +} + +type DescribeEndpointsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsInput) GoString() string { + return s.String() +} + +type DescribeEndpointsOutput struct { + _ struct{} `type:"structure"` + + // List of endpoints. + // + // Endpoints is a required field + Endpoints []*Endpoint `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsOutput) GoString() string { + return s.String() +} + +// SetEndpoints sets the Endpoints field's value. +func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { + s.Endpoints = v + return s +} + +type DescribeExportInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the export. + // + // ExportArn is a required field + ExportArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"} + if s.ExportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExportArn")) + } + if s.ExportArn != nil && len(*s.ExportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExportArn sets the ExportArn field's value. +func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput { + s.ExportArn = &v + return s +} + +type DescribeExportOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput { + s.ExportDescription = v + return s +} + +type DescribeGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput { + s.GlobalTableName = &v + return s +} + +type DescribeGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +type DescribeGlobalTableSettingsInput struct { + _ struct{} `type:"structure"` + + // The name of the global table to describe. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGlobalTableSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput { + s.GlobalTableName = &v + return s +} + +type DescribeGlobalTableSettingsOutput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + GlobalTableName *string `min:"3" type:"string"` + + // The Region-specific settings for the global table. + ReplicaSettings []*ReplicaSettingsDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsOutput) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput { + s.GlobalTableName = &v + return s +} + +// SetReplicaSettings sets the ReplicaSettings field's value. +func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput { + s.ReplicaSettings = v + return s +} + +type DescribeImportInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the table you're importing + // to. + // + // ImportArn is a required field + ImportArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImportInput"} + if s.ImportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ImportArn")) + } + if s.ImportArn != nil && len(*s.ImportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ImportArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImportArn sets the ImportArn field's value. +func (s *DescribeImportInput) SetImportArn(v string) *DescribeImportInput { + s.ImportArn = &v + return s +} + +type DescribeImportOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items + // were processed, and how many errors were encountered. + // + // ImportTableDescription is a required field + ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportOutput) GoString() string { + return s.String() +} + +// SetImportTableDescription sets the ImportTableDescription field's value. +func (s *DescribeImportOutput) SetImportTableDescription(v *ImportTableDescription) *DescribeImportOutput { + s.ImportTableDescription = v + return s +} + +type DescribeKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the table being described. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeKinesisStreamingDestinationInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeKinesisStreamingDestinationInput) SetTableName(v string) *DescribeKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +type DescribeKinesisStreamingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The list of replica structures for the table being described. + KinesisDataStreamDestinations []*KinesisDataStreamDestination `type:"list"` + + // The name of the table being described. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationOutput) GoString() string { + return s.String() +} + +// SetKinesisDataStreamDestinations sets the KinesisDataStreamDestinations field's value. +func (s *DescribeKinesisStreamingDestinationOutput) SetKinesisDataStreamDestinations(v []*KinesisDataStreamDestination) *DescribeKinesisStreamingDestinationOutput { + s.KinesisDataStreamDestinations = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DescribeKinesisStreamingDestinationOutput) SetTableName(v string) *DescribeKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// Represents the input of a DescribeLimits operation. Has no content. +type DescribeLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeLimits operation. +type DescribeLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum total read capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum total write capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum read capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the read + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum write capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the write + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsOutput) GoString() string { + return s.String() +} + +// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxReadCapacityUnits = &v + return s +} + +// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxWriteCapacityUnits = &v + return s +} + +// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxReadCapacityUnits = &v + return s +} + +// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxWriteCapacityUnits = &v + return s +} + +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to describe. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput { + s.TableName = &v + return s +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { + _ struct{} `type:"structure"` + + // The properties of the table. + Table *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableOutput) GoString() string { + return s.String() +} + +// SetTable sets the Table field's value. +func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput { + s.Table = v + return s +} + +type DescribeTableReplicaAutoScalingInput struct { + _ struct{} `type:"structure"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableReplicaAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableReplicaAutoScalingInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTableReplicaAutoScalingInput) SetTableName(v string) *DescribeTableReplicaAutoScalingInput { + s.TableName = &v + return s +} + +type DescribeTableReplicaAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // Represents the auto scaling properties of the table. + TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingOutput) GoString() string { + return s.String() +} + +// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. +func (s *DescribeTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *DescribeTableReplicaAutoScalingOutput { + s.TableAutoScalingDescription = v + return s +} + +type DescribeTimeToLiveInput struct { + _ struct{} `type:"structure"` + + // The name of the table to be described. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTimeToLiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput { + s.TableName = &v + return s +} + +type DescribeTimeToLiveOutput struct { + _ struct{} `type:"structure"` + + // The description of the Time to Live (TTL) status on the specified table. + TimeToLiveDescription *TimeToLiveDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveOutput) GoString() string { + return s.String() +} + +// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. +func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput { + s.TimeToLiveDescription = v + return s +} + +type DisableKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN for a Kinesis data stream. + // + // StreamArn is a required field + StreamArn *string `min:"37" type:"string" required:"true"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableKinesisStreamingDestinationInput"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamArn sets the StreamArn field's value. +func (s *DisableKinesisStreamingDestinationInput) SetStreamArn(v string) *DisableKinesisStreamingDestinationInput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DisableKinesisStreamingDestinationInput) SetTableName(v string) *DisableKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +type DisableKinesisStreamingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The current status of the replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The ARN for the specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` + + // The name of the table being modified. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationOutput) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *DisableKinesisStreamingDestinationOutput { + s.DestinationStatus = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetStreamArn(v string) *DisableKinesisStreamingDestinationOutput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetTableName(v string) *DisableKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// There was an attempt to insert an item with the same primary key as an item +// that already exists in the DynamoDB table. +type DuplicateItemException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DuplicateItemException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DuplicateItemException) GoString() string { + return s.String() +} + +func newErrorDuplicateItemException(v protocol.ResponseMetadata) error { + return &DuplicateItemException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DuplicateItemException) Code() string { + return "DuplicateItemException" +} + +// Message returns the exception's message. +func (s *DuplicateItemException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DuplicateItemException) OrigErr() error { + return nil +} + +func (s *DuplicateItemException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DuplicateItemException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DuplicateItemException) RequestID() string { + return s.RespMetadata.RequestID +} + +type EnableKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN for a Kinesis data stream. + // + // StreamArn is a required field + StreamArn *string `min:"37" type:"string" required:"true"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableKinesisStreamingDestinationInput"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamArn sets the StreamArn field's value. +func (s *EnableKinesisStreamingDestinationInput) SetStreamArn(v string) *EnableKinesisStreamingDestinationInput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *EnableKinesisStreamingDestinationInput) SetTableName(v string) *EnableKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +type EnableKinesisStreamingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The current status of the replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The ARN for the specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` + + // The name of the table being modified. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationOutput) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *EnableKinesisStreamingDestinationOutput { + s.DestinationStatus = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetStreamArn(v string) *EnableKinesisStreamingDestinationOutput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetTableName(v string) *EnableKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// An endpoint information details. +type Endpoint struct { + _ struct{} `type:"structure"` + + // IP address of the endpoint. + // + // Address is a required field + Address *string `type:"string" required:"true"` + + // Endpoint cache time to live (TTL) value. + // + // CachePeriodInMinutes is a required field + CachePeriodInMinutes *int64 `type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Endpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *Endpoint) SetAddress(v string) *Endpoint { + s.Address = &v + return s +} + +// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value. +func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint { + s.CachePeriodInMinutes = &v + return s +} + +type ExecuteStatementInput struct { + _ struct{} `type:"structure"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, along with a key in LastEvaluatedKey to apply in a subsequent + // operation so you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. + Limit *int64 `min:"1" type:"integer"` + + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + NextToken *string `min:"1" type:"string"` + + // The parameters for the PartiQL statement, if any. + Parameters []*AttributeValue `min:"1" type:"list"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The PartiQL statement representing the operation to run. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecuteStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteStatementInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *ExecuteStatementInput) SetConsistentRead(v bool) *ExecuteStatementInput { + s.ConsistentRead = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ExecuteStatementInput) SetLimit(v int64) *ExecuteStatementInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ExecuteStatementInput) SetNextToken(v string) *ExecuteStatementInput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *ExecuteStatementInput) SetParameters(v []*AttributeValue) *ExecuteStatementInput { + s.Parameters = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *ExecuteStatementInput) SetReturnConsumedCapacity(v string) *ExecuteStatementInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetStatement sets the Statement field's value. +func (s *ExecuteStatementInput) SetStatement(v string) *ExecuteStatementInput { + s.Statement = &v + return s +} + +type ExecuteStatementOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // If a read operation was used, this property will contain the result of the + // read operation; a map of attribute names and their values. For the write + // operations this value will be empty. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. If LastEvaluatedKey is empty, then the "last page" + // of results has been processed and there is no more data to be retrieved. + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // If the response of a read request exceeds the response payload limit DynamoDB + // will set this value in the response. If set, you can use that this value + // in the subsequent request to get the remaining results. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *ExecuteStatementOutput) SetConsumedCapacity(v *ConsumedCapacity) *ExecuteStatementOutput { + s.ConsumedCapacity = v + return s +} + +// SetItems sets the Items field's value. +func (s *ExecuteStatementOutput) SetItems(v []map[string]*AttributeValue) *ExecuteStatementOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *ExecuteStatementOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ExecuteStatementOutput { + s.LastEvaluatedKey = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ExecuteStatementOutput) SetNextToken(v string) *ExecuteStatementOutput { + s.NextToken = &v + return s +} + +type ExecuteTransactionInput struct { + _ struct{} `type:"structure"` + + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response. For more information, see TransactGetItems + // (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html) + // and TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html). + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The list of PartiQL statements representing the transaction to run. + // + // TransactStatements is a required field + TransactStatements []*ParameterizedStatement `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecuteTransactionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteTransactionInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.TransactStatements == nil { + invalidParams.Add(request.NewErrParamRequired("TransactStatements")) + } + if s.TransactStatements != nil && len(s.TransactStatements) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactStatements", 1)) + } + if s.TransactStatements != nil { + for i, v := range s.TransactStatements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactStatements", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *ExecuteTransactionInput) SetClientRequestToken(v string) *ExecuteTransactionInput { + s.ClientRequestToken = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *ExecuteTransactionInput) SetReturnConsumedCapacity(v string) *ExecuteTransactionInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTransactStatements sets the TransactStatements field's value. +func (s *ExecuteTransactionInput) SetTransactStatements(v []*ParameterizedStatement) *ExecuteTransactionInput { + s.TransactStatements = v + return s +} + +type ExecuteTransactionOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire operation. The values of the list + // are ordered according to the ordering of the statements. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // The response to a PartiQL transaction. + Responses []*ItemResponse `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *ExecuteTransactionOutput) SetConsumedCapacity(v []*ConsumedCapacity) *ExecuteTransactionOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *ExecuteTransactionOutput) SetResponses(v []*ItemResponse) *ExecuteTransactionOutput { + s.Responses = v + return s +} + +// Represents a condition to be compared with an attribute value. This condition +// can be used with DeleteItem, PutItem, or UpdateItem operations; if the comparison +// evaluates to true, the operation succeeds; if not, the operation fails. You +// can use ExpectedAttributeValue in one of two different ways: +// +// - Use AttributeValueList to specify one or more values to compare against +// an attribute. Use ComparisonOperator to specify how you want to perform +// the comparison. If the comparison evaluates to true, then the conditional +// operation succeeds. +// +// - Use Value to specify a value that DynamoDB will compare against an attribute. +// If the values match, then ExpectedAttributeValue evaluates to true and +// the conditional operation succeeds. Optionally, you can also set Exists +// to false, indicating that you do not expect to find the attribute value +// in the table. In this case, the conditional operation succeeds only if +// the comparison evaluates to false. +// +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. +// Note that if you use both sets of parameters at once, DynamoDB will return +// a ValidationException exception. +type ExpectedAttributeValue struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes in the AttributeValueList. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // * EQ : Equal. EQ is supported for all data types, including lists and + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. + // + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // * If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionCheckFailedException. + // + // * If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the + // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // * Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // * Exists is false but you also provide a Value. (You cannot expect an + // attribute to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { + s.ComparisonOperator = &v + return s +} + +// SetExists sets the Exists field's value. +func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { + s.Exists = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { + s.Value = v + return s +} + +// There was a conflict when writing to the specified S3 bucket. +type ExportConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportConflictException) GoString() string { + return s.String() +} + +func newErrorExportConflictException(v protocol.ResponseMetadata) error { + return &ExportConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportConflictException) Code() string { + return "ExportConflictException" +} + +// Message returns the exception's message. +func (s *ExportConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportConflictException) OrigErr() error { + return nil +} + +func (s *ExportConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the properties of the exported table. +type ExportDescription struct { + _ struct{} `type:"structure"` + + // The billable size of the table export. + BilledSizeBytes *int64 `type:"long"` + + // The client token that was provided for the export task. A client token makes + // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple + // identical calls have the same effect as one single call. + ClientToken *string `type:"string"` + + // The time at which the export task completed. + EndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table export. + ExportArn *string `min:"37" type:"string"` + + // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // The name of the manifest file for the export task. + ExportManifest *string `type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` + + // Point in time from which table data was exported. + ExportTime *time.Time `type:"timestamp"` + + // Status code for the result of the failed export. + FailureCode *string `type:"string"` + + // Export failure reason description. + FailureMessage *string `type:"string"` + + // The number of items exported. + ItemCount *int64 `type:"long"` + + // The name of the Amazon S3 bucket containing the export. + S3Bucket *string `type:"string"` + + // The ID of the Amazon Web Services account that owns the bucket containing + // the export. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix used as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data is stored. Valid + // values for S3SseAlgorithm are: + // + // * AES256 - server-side encryption with Amazon S3 managed keys + // + // * KMS - server-side encryption with KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` + + // The ID of the KMS managed key used to encrypt the S3 bucket where export + // data is stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The time at which the export task began. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table that was exported. + TableArn *string `type:"string"` + + // Unique ID of the table that was exported. + TableId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportDescription) GoString() string { + return s.String() +} + +// SetBilledSizeBytes sets the BilledSizeBytes field's value. +func (s *ExportDescription) SetBilledSizeBytes(v int64) *ExportDescription { + s.BilledSizeBytes = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportDescription) SetClientToken(v string) *ExportDescription { + s.ClientToken = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ExportDescription) SetEndTime(v time.Time) *ExportDescription { + s.EndTime = &v + return s +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportDescription) SetExportArn(v string) *ExportDescription { + s.ExportArn = &v + return s +} + +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportDescription) SetExportFormat(v string) *ExportDescription { + s.ExportFormat = &v + return s +} + +// SetExportManifest sets the ExportManifest field's value. +func (s *ExportDescription) SetExportManifest(v string) *ExportDescription { + s.ExportManifest = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportDescription) SetExportStatus(v string) *ExportDescription { + s.ExportStatus = &v + return s +} + +// SetExportTime sets the ExportTime field's value. +func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription { + s.ExportTime = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ExportDescription) SetFailureCode(v string) *ExportDescription { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription { + s.FailureMessage = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *ExportDescription) SetItemCount(v int64) *ExportDescription { + s.ItemCount = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportDescription) SetS3Bucket(v string) *ExportDescription { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportDescription) SetS3BucketOwner(v string) *ExportDescription { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportDescription) SetS3Prefix(v string) *ExportDescription { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportDescription) SetS3SseAlgorithm(v string) *ExportDescription { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportDescription) SetS3SseKmsKeyId(v string) *ExportDescription { + s.S3SseKmsKeyId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ExportDescription) SetStartTime(v time.Time) *ExportDescription { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportDescription) SetTableArn(v string) *ExportDescription { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *ExportDescription) SetTableId(v string) *ExportDescription { + s.TableId = &v + return s +} + +// The specified export was not found. +type ExportNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotFoundException) GoString() string { + return s.String() +} + +func newErrorExportNotFoundException(v protocol.ResponseMetadata) error { + return &ExportNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportNotFoundException) Code() string { + return "ExportNotFoundException" +} + +// Message returns the exception's message. +func (s *ExportNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportNotFoundException) OrigErr() error { + return nil +} + +func (s *ExportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Summary information about an export task. +type ExportSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the export. + ExportArn *string `min:"37" type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportSummary) GoString() string { + return s.String() +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportSummary) SetExportArn(v string) *ExportSummary { + s.ExportArn = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportSummary) SetExportStatus(v string) *ExportSummary { + s.ExportStatus = &v + return s +} + +type ExportTableToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent, + // meaning that multiple identical calls have the same effect as one single + // call. + // + // A client token is valid for 8 hours after the first request that uses it + // is completed. After 8 hours, any request with the same client token is treated + // as a new request. Do not resubmit the same request with the same client token + // for more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an ImportConflictException. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // Time in the past from which to export table data, counted in seconds from + // the start of the Unix epoch. The table export will be a snapshot of the table's + // state at this point in time. + ExportTime *time.Time `type:"timestamp"` + + // The name of the Amazon S3 bucket to export the snapshot to. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The ID of the Amazon Web Services account that owns the bucket the export + // will be stored in. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix to use as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data will be stored. Valid + // values for S3SseAlgorithm are: + // + // * AES256 - server-side encryption with Amazon S3 managed keys + // + // * KMS - server-side encryption with KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` + + // The ID of the KMS managed key used to encrypt the S3 bucket where export + // data will be stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) associated with the table to export. + // + // TableArn is a required field + TableArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTableToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTableToPointInTimeInput"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + if s.S3SseKmsKeyId != nil && len(*s.S3SseKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3SseKmsKeyId", 1)) + } + if s.TableArn == nil { + invalidParams.Add(request.NewErrParamRequired("TableArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportTableToPointInTimeInput) SetClientToken(v string) *ExportTableToPointInTimeInput { + s.ClientToken = &v + return s +} + +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportTableToPointInTimeInput) SetExportFormat(v string) *ExportTableToPointInTimeInput { + s.ExportFormat = &v + return s +} + +// SetExportTime sets the ExportTime field's value. +func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableToPointInTimeInput { + s.ExportTime = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportTableToPointInTimeInput) SetS3BucketOwner(v string) *ExportTableToPointInTimeInput { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTableToPointInTimeInput) SetS3Prefix(v string) *ExportTableToPointInTimeInput { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseAlgorithm(v string) *ExportTableToPointInTimeInput { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseKmsKeyId(v string) *ExportTableToPointInTimeInput { + s.S3SseKmsKeyId = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportTableToPointInTimeInput) SetTableArn(v string) *ExportTableToPointInTimeInput { + s.TableArn = &v + return s +} + +type ExportTableToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains a description of the table export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *ExportTableToPointInTimeOutput) SetExportDescription(v *ExportDescription) *ExportTableToPointInTimeOutput { + s.ExportDescription = v + return s +} + +// Represents a failure a contributor insights operation. +type FailureException struct { + _ struct{} `type:"structure"` + + // Description of the failure. + ExceptionDescription *string `type:"string"` + + // Exception name. + ExceptionName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FailureException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FailureException) GoString() string { + return s.String() +} + +// SetExceptionDescription sets the ExceptionDescription field's value. +func (s *FailureException) SetExceptionDescription(v string) *FailureException { + s.ExceptionDescription = &v + return s +} + +// SetExceptionName sets the ExceptionName field's value. +func (s *FailureException) SetExceptionName(v string) *FailureException { + s.ExceptionName = &v + return s +} + +// Specifies an item and related attribute values to retrieve in a TransactGetItem +// object. +type Get struct { + _ struct{} `type:"structure"` + + // One or more substitution tokens for attribute names in the ProjectionExpression + // parameter. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects that specifies the primary + // key of the item to retrieve. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes of the specified item to + // retrieve from the table. The attributes in the expression must be separated + // by commas. If no attribute names are specified, then all attributes of the + // specified item are returned. If any of the requested attributes are not found, + // they do not appear in the result. + ProjectionExpression *string `type:"string"` + + // The name of the table from which to retrieve the specified item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Get) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Get) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Get) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Get"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Get) SetExpressionAttributeNames(v map[string]*string) *Get { + s.ExpressionAttributeNames = v + return s +} + +// SetKey sets the Key field's value. +func (s *Get) SetKey(v map[string]*AttributeValue) *Get { + s.Key = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *Get) SetProjectionExpression(v string) *Get { + s.ProjectionExpression = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Get) SetTableName(v string) *Get { + s.TableName = &v + return s +} + +// Represents the input of a GetItem operation. +type GetItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes are returned. If + // any of the requested attributes are not found, they do not appear in the + // result. + // + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The name of the table containing the requested item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetItemInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *GetItemInput) SetAttributesToGet(v []*string) *GetItemInput { + s.AttributesToGet = v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *GetItemInput) SetConsistentRead(v bool) *GetItemInput { + s.ConsistentRead = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *GetItemInput) SetExpressionAttributeNames(v map[string]*string) *GetItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetKey sets the Key field's value. +func (s *GetItemInput) SetKey(v map[string]*AttributeValue) *GetItemInput { + s.Key = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *GetItemInput) SetProjectionExpression(v string) *GetItemInput { + s.ProjectionExpression = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *GetItemInput) SetReturnConsumedCapacity(v string) *GetItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GetItemInput) SetTableName(v string) *GetItemInput { + s.TableName = &v + return s +} + +// Represents the output of a GetItem operation. +type GetItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the GetItem operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // A map of attribute names to AttributeValue objects, as specified by ProjectionExpression. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *GetItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *GetItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItem sets the Item field's value. +func (s *GetItemOutput) SetItem(v map[string]*AttributeValue) *GetItemOutput { + s.Item = v + return s +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndex) SetIndexName(v string) *GlobalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndex) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndex { + s.ProvisionedThroughput = v + return s +} + +// Represents the auto scaling settings of a global secondary index for a global +// table that will be modified. +type GlobalSecondaryIndexAutoScalingUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexAutoScalingUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexAutoScalingUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndexAutoScalingUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexAutoScalingUpdate"} + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedWriteCapacityAutoScalingUpdate != nil { + if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *GlobalSecondaryIndexAutoScalingUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value. +func (s *GlobalSecondaryIndexAutoScalingUpdate) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *GlobalSecondaryIndexAutoScalingUpdate { + s.ProvisionedWriteCapacityAutoScalingUpdate = v + return s +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether the index is currently backfilling. Backfilling is the + // process of reading items from the table and determining whether they can + // be added to the index. (Not all items will qualify: For example, a partition + // key cannot have any duplicate values.) If an item can be added to the index, + // DynamoDB will do so. After all items have been processed, the backfilling + // operation is complete and Backfilling is false. + // + // You can delete an index that is being created during the Backfilling phase + // when IndexStatus is set to CREATING and Backfilling is true. You can't delete + // the index that is being created when IndexStatus is set to CREATING and Backfilling + // is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. + Backfilling *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The current state of the global secondary index: + // + // * CREATING - The index is being created. + // + // * UPDATING - The index is being updated. + // + // * DELETING - The index is being deleted. + // + // * ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetBackfilling sets the Backfilling field's value. +func (s *GlobalSecondaryIndexDescription) SetBackfilling(v bool) *GlobalSecondaryIndexDescription { + s.Backfilling = &v + return s +} + +// SetIndexArn sets the IndexArn field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexArn(v string) *GlobalSecondaryIndexDescription { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexName(v string) *GlobalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetIndexSizeBytes sets the IndexSizeBytes field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *GlobalSecondaryIndexDescription { + s.IndexSizeBytes = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexStatus(v string) *GlobalSecondaryIndexDescription { + s.IndexStatus = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *GlobalSecondaryIndexDescription) SetItemCount(v int64) *GlobalSecondaryIndexDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexDescription { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndexDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *GlobalSecondaryIndexDescription { + s.ProvisionedThroughput = v + return s +} + +// Represents the properties of a global secondary index for the table when +// the backup was created. +type GlobalSecondaryIndexInfo struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexInfo) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexInfo) SetIndexName(v string) *GlobalSecondaryIndexInfo { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexInfo { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndexInfo) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndexInfo { + s.ProvisionedThroughput = v + return s +} + +// Represents one of the following: +// +// - A new global secondary index to be added to an existing table. +// +// - New provisioned throughput parameters for an existing global secondary +// index. +// +// - An existing global secondary index to be removed from an existing table. +type GlobalSecondaryIndexUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a global secondary index on an existing + // table: + // + // * IndexName + // + // * KeySchema + // + // * AttributeDefinitions + // + // * Projection + // + // * ProvisionedThroughput + Create *CreateGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index to be removed. + Delete *DeleteGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index, along with new provisioned + // throughput settings to be applied to that index. + Update *UpdateGlobalSecondaryIndexAction `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndexUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *GlobalSecondaryIndexUpdate) SetCreate(v *CreateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *GlobalSecondaryIndexUpdate) SetDelete(v *DeleteGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Delete = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *GlobalSecondaryIndexUpdate) SetUpdate(v *UpdateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Update = v + return s +} + +// Represents the properties of a global table. +type GlobalTable struct { + _ struct{} `type:"structure"` + + // The global table name. + GlobalTableName *string `min:"3" type:"string"` + + // The Regions where the global table has replicas. + ReplicationGroup []*Replica `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTable) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *GlobalTable) SetGlobalTableName(v string) *GlobalTable { + s.GlobalTableName = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *GlobalTable) SetReplicationGroup(v []*Replica) *GlobalTable { + s.ReplicationGroup = v + return s +} + +// The specified global table already exists. +type GlobalTableAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorGlobalTableAlreadyExistsException(v protocol.ResponseMetadata) error { + return &GlobalTableAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *GlobalTableAlreadyExistsException) Code() string { + return "GlobalTableAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *GlobalTableAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *GlobalTableAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *GlobalTableAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *GlobalTableAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *GlobalTableAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains details about the global table. +type GlobalTableDescription struct { + _ struct{} `type:"structure"` + + // The creation time of the global table. + CreationDateTime *time.Time `type:"timestamp"` + + // The unique identifier of the global table. + GlobalTableArn *string `type:"string"` + + // The global table name. + GlobalTableName *string `min:"3" type:"string"` + + // The current state of the global table: + // + // * CREATING - The global table is being created. + // + // * UPDATING - The global table is being updated. + // + // * DELETING - The global table is being deleted. + // + // * ACTIVE - The global table is ready for use. + GlobalTableStatus *string `type:"string" enum:"GlobalTableStatus"` + + // The Regions where the global table has replicas. + ReplicationGroup []*ReplicaDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableDescription) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *GlobalTableDescription) SetCreationDateTime(v time.Time) *GlobalTableDescription { + s.CreationDateTime = &v + return s +} + +// SetGlobalTableArn sets the GlobalTableArn field's value. +func (s *GlobalTableDescription) SetGlobalTableArn(v string) *GlobalTableDescription { + s.GlobalTableArn = &v + return s +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *GlobalTableDescription) SetGlobalTableName(v string) *GlobalTableDescription { + s.GlobalTableName = &v + return s +} + +// SetGlobalTableStatus sets the GlobalTableStatus field's value. +func (s *GlobalTableDescription) SetGlobalTableStatus(v string) *GlobalTableDescription { + s.GlobalTableStatus = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *GlobalTableDescription) SetReplicationGroup(v []*ReplicaDescription) *GlobalTableDescription { + s.ReplicationGroup = v + return s +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Auto scaling settings for managing a global secondary index's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedWriteCapacityUnits != nil && *s.ProvisionedWriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedWriteCapacityUnits", 1)) + } + if s.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := s.ProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettingsUpdate sets the ProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedWriteCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityUnits(v int64) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedWriteCapacityUnits = &v + return s +} + +// The specified global table does not exist. +type GlobalTableNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableNotFoundException) GoString() string { + return s.String() +} + +func newErrorGlobalTableNotFoundException(v protocol.ResponseMetadata) error { + return &GlobalTableNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *GlobalTableNotFoundException) Code() string { + return "GlobalTableNotFoundException" +} + +// Message returns the exception's message. +func (s *GlobalTableNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *GlobalTableNotFoundException) OrigErr() error { + return nil +} + +func (s *GlobalTableNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *GlobalTableNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *GlobalTableNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +type IdempotentParameterMismatchException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IdempotentParameterMismatchException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IdempotentParameterMismatchException) GoString() string { + return s.String() +} + +func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { + return &IdempotentParameterMismatchException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IdempotentParameterMismatchException) Code() string { + return "IdempotentParameterMismatchException" +} + +// Message returns the exception's message. +func (s *IdempotentParameterMismatchException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IdempotentParameterMismatchException) OrigErr() error { + return nil +} + +func (s *IdempotentParameterMismatchException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IdempotentParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IdempotentParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID +} + +// There was a conflict when importing from the specified S3 source. This can +// occur when the current import conflicts with a previous import request that +// had the same client token. +type ImportConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportConflictException) GoString() string { + return s.String() +} + +func newErrorImportConflictException(v protocol.ResponseMetadata) error { + return &ImportConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImportConflictException) Code() string { + return "ImportConflictException" +} + +// Message returns the exception's message. +func (s *ImportConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImportConflictException) OrigErr() error { + return nil +} + +func (s *ImportConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImportConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImportConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified import was not found. +type ImportNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotFoundException) GoString() string { + return s.String() +} + +func newErrorImportNotFoundException(v protocol.ResponseMetadata) error { + return &ImportNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImportNotFoundException) Code() string { + return "ImportNotFoundException" +} + +// Message returns the exception's message. +func (s *ImportNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImportNotFoundException) OrigErr() error { + return nil +} + +func (s *ImportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImportNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImportNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Summary information about the source file for the import. +type ImportSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // this import task. + CloudWatchLogGroupArn *string `min:"1" type:"string"` + + // The time at which this import task ended. (Does this include the successful + // complete creation of the table it was imported to?) + EndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string `min:"37" type:"string"` + + // The status of the import operation. + ImportStatus *string `type:"string" enum:"ImportStatus"` + + // The format of the source data. Valid values are CSV, DYNAMODB_JSON or ION. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The path and S3 bucket of the source file that is being imported. This includes + // the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner (optional + // if the bucket is owned by the requester). + S3BucketSource *S3BucketSource `type:"structure"` + + // The time at which this import task began. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportSummary) GoString() string { + return s.String() +} + +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *ImportSummary) SetCloudWatchLogGroupArn(v string) *ImportSummary { + s.CloudWatchLogGroupArn = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ImportSummary) SetEndTime(v time.Time) *ImportSummary { + s.EndTime = &v + return s +} + +// SetImportArn sets the ImportArn field's value. +func (s *ImportSummary) SetImportArn(v string) *ImportSummary { + s.ImportArn = &v + return s +} + +// SetImportStatus sets the ImportStatus field's value. +func (s *ImportSummary) SetImportStatus(v string) *ImportSummary { + s.ImportStatus = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportSummary) SetInputFormat(v string) *ImportSummary { + s.InputFormat = &v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportSummary) SetS3BucketSource(v *S3BucketSource) *ImportSummary { + s.S3BucketSource = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ImportSummary) SetStartTime(v time.Time) *ImportSummary { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ImportSummary) SetTableArn(v string) *ImportSummary { + s.TableArn = &v + return s +} + +// Represents the properties of the table being imported into. +type ImportTableDescription struct { + _ struct{} `type:"structure"` + + // The client token that was provided for the import task. Reusing the client + // token on retry makes a call to ImportTable idempotent. + ClientToken *string `type:"string"` + + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // the target table. + CloudWatchLogGroupArn *string `min:"1" type:"string"` + + // The time at which the creation of the table associated with this import task + // completed. + EndTime *time.Time `type:"timestamp"` + + // The number of errors occurred on importing the source file into the target + // table. + ErrorCount *int64 `type:"long"` + + // The error code corresponding to the failure that the import job ran into + // during execution. + FailureCode *string `type:"string"` + + // The error message corresponding to the failure that the import job ran into + // during execution. + FailureMessage *string `type:"string"` + + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string `min:"37" type:"string"` + + // The status of the import. + ImportStatus *string `type:"string" enum:"ImportStatus"` + + // The number of items successfully imported into the new table. + ImportedItemCount *int64 `type:"long"` + + // The compression options for the data that has been imported into the target + // table. The values are NONE, GZIP, or ZSTD. + InputCompressionType *string `type:"string" enum:"InputCompressionType"` + + // The format of the source data going into the target table. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The format options for the data that was imported into the target table. + // There is one value, CsvOption. + InputFormatOptions *InputFormatOptions `type:"structure"` + + // The total number of items processed from the source file. + ProcessedItemCount *int64 `type:"long"` + + // The total size of data processed from the source file, in Bytes. + ProcessedSizeBytes *int64 `type:"long"` + + // Values for the S3 bucket the source file is imported from. Includes bucket + // name (required), key prefix (optional) and bucket account owner ID (optional). + S3BucketSource *S3BucketSource `type:"structure"` + + // The time when this import task started. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string `type:"string"` + + // The parameters for the new table that is being imported into. + TableCreationParameters *TableCreationParameters `type:"structure"` + + // The table id corresponding to the table created by import table process. + TableId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableDescription) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *ImportTableDescription) SetClientToken(v string) *ImportTableDescription { + s.ClientToken = &v + return s +} + +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *ImportTableDescription) SetCloudWatchLogGroupArn(v string) *ImportTableDescription { + s.CloudWatchLogGroupArn = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ImportTableDescription) SetEndTime(v time.Time) *ImportTableDescription { + s.EndTime = &v + return s +} + +// SetErrorCount sets the ErrorCount field's value. +func (s *ImportTableDescription) SetErrorCount(v int64) *ImportTableDescription { + s.ErrorCount = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ImportTableDescription) SetFailureCode(v string) *ImportTableDescription { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *ImportTableDescription) SetFailureMessage(v string) *ImportTableDescription { + s.FailureMessage = &v + return s +} + +// SetImportArn sets the ImportArn field's value. +func (s *ImportTableDescription) SetImportArn(v string) *ImportTableDescription { + s.ImportArn = &v + return s +} + +// SetImportStatus sets the ImportStatus field's value. +func (s *ImportTableDescription) SetImportStatus(v string) *ImportTableDescription { + s.ImportStatus = &v + return s +} + +// SetImportedItemCount sets the ImportedItemCount field's value. +func (s *ImportTableDescription) SetImportedItemCount(v int64) *ImportTableDescription { + s.ImportedItemCount = &v + return s +} + +// SetInputCompressionType sets the InputCompressionType field's value. +func (s *ImportTableDescription) SetInputCompressionType(v string) *ImportTableDescription { + s.InputCompressionType = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportTableDescription) SetInputFormat(v string) *ImportTableDescription { + s.InputFormat = &v + return s +} + +// SetInputFormatOptions sets the InputFormatOptions field's value. +func (s *ImportTableDescription) SetInputFormatOptions(v *InputFormatOptions) *ImportTableDescription { + s.InputFormatOptions = v + return s +} + +// SetProcessedItemCount sets the ProcessedItemCount field's value. +func (s *ImportTableDescription) SetProcessedItemCount(v int64) *ImportTableDescription { + s.ProcessedItemCount = &v + return s +} + +// SetProcessedSizeBytes sets the ProcessedSizeBytes field's value. +func (s *ImportTableDescription) SetProcessedSizeBytes(v int64) *ImportTableDescription { + s.ProcessedSizeBytes = &v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportTableDescription) SetS3BucketSource(v *S3BucketSource) *ImportTableDescription { + s.S3BucketSource = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ImportTableDescription) SetStartTime(v time.Time) *ImportTableDescription { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ImportTableDescription) SetTableArn(v string) *ImportTableDescription { + s.TableArn = &v + return s +} + +// SetTableCreationParameters sets the TableCreationParameters field's value. +func (s *ImportTableDescription) SetTableCreationParameters(v *TableCreationParameters) *ImportTableDescription { + s.TableCreationParameters = v + return s +} + +// SetTableId sets the TableId field's value. +func (s *ImportTableDescription) SetTableId(v string) *ImportTableDescription { + s.TableId = &v + return s +} + +type ImportTableInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning + // that multiple identical calls have the same effect as one single call. + // + // A client token is valid for 8 hours after the first request that uses it + // is completed. After 8 hours, any request with the same client token is treated + // as a new request. Do not resubmit the same request with the same client token + // for more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Type of compression to be used on the input coming from the imported table. + InputCompressionType *string `type:"string" enum:"InputCompressionType"` + + // The format of the source data. Valid values for ImportFormat are CSV, DYNAMODB_JSON + // or ION. + // + // InputFormat is a required field + InputFormat *string `type:"string" required:"true" enum:"InputFormat"` + + // Additional properties that specify how the input is formatted, + InputFormatOptions *InputFormatOptions `type:"structure"` + + // The S3 bucket that provides the source for the import. + // + // S3BucketSource is a required field + S3BucketSource *S3BucketSource `type:"structure" required:"true"` + + // Parameters for the table to import the data into. + // + // TableCreationParameters is a required field + TableCreationParameters *TableCreationParameters `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportTableInput"} + if s.InputFormat == nil { + invalidParams.Add(request.NewErrParamRequired("InputFormat")) + } + if s.S3BucketSource == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketSource")) + } + if s.TableCreationParameters == nil { + invalidParams.Add(request.NewErrParamRequired("TableCreationParameters")) + } + if s.InputFormatOptions != nil { + if err := s.InputFormatOptions.Validate(); err != nil { + invalidParams.AddNested("InputFormatOptions", err.(request.ErrInvalidParams)) + } + } + if s.S3BucketSource != nil { + if err := s.S3BucketSource.Validate(); err != nil { + invalidParams.AddNested("S3BucketSource", err.(request.ErrInvalidParams)) + } + } + if s.TableCreationParameters != nil { + if err := s.TableCreationParameters.Validate(); err != nil { + invalidParams.AddNested("TableCreationParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ImportTableInput) SetClientToken(v string) *ImportTableInput { + s.ClientToken = &v + return s +} + +// SetInputCompressionType sets the InputCompressionType field's value. +func (s *ImportTableInput) SetInputCompressionType(v string) *ImportTableInput { + s.InputCompressionType = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportTableInput) SetInputFormat(v string) *ImportTableInput { + s.InputFormat = &v + return s +} + +// SetInputFormatOptions sets the InputFormatOptions field's value. +func (s *ImportTableInput) SetInputFormatOptions(v *InputFormatOptions) *ImportTableInput { + s.InputFormatOptions = v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportTableInput) SetS3BucketSource(v *S3BucketSource) *ImportTableInput { + s.S3BucketSource = v + return s +} + +// SetTableCreationParameters sets the TableCreationParameters field's value. +func (s *ImportTableInput) SetTableCreationParameters(v *TableCreationParameters) *ImportTableInput { + s.TableCreationParameters = v + return s +} + +type ImportTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items + // were processed, and how many errors were encountered. + // + // ImportTableDescription is a required field + ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableOutput) GoString() string { + return s.String() +} + +// SetImportTableDescription sets the ImportTableDescription field's value. +func (s *ImportTableOutput) SetImportTableDescription(v *ImportTableDescription) *ImportTableOutput { + s.ImportTableDescription = v + return s +} + +// The operation tried to access a nonexistent index. +type IndexNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexNotFoundException) GoString() string { + return s.String() +} + +func newErrorIndexNotFoundException(v protocol.ResponseMetadata) error { + return &IndexNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IndexNotFoundException) Code() string { + return "IndexNotFoundException" +} + +// Message returns the exception's message. +func (s *IndexNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IndexNotFoundException) OrigErr() error { + return nil +} + +func (s *IndexNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IndexNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IndexNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The format options for the data that was imported into the target table. +// There is one value, CsvOption. +type InputFormatOptions struct { + _ struct{} `type:"structure"` + + // The options for imported source files in CSV format. The values are Delimiter + // and HeaderList. + Csv *CsvOptions `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputFormatOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputFormatOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputFormatOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputFormatOptions"} + if s.Csv != nil { + if err := s.Csv.Validate(); err != nil { + invalidParams.AddNested("Csv", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCsv sets the Csv field's value. +func (s *InputFormatOptions) SetCsv(v *CsvOptions) *InputFormatOptions { + s.Csv = v + return s +} + +// An error occurred on the server side. +type InternalServerError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The server encountered an internal error trying to fulfill the request. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerError) GoString() string { + return s.String() +} + +func newErrorInternalServerError(v protocol.ResponseMetadata) error { + return &InternalServerError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerError) Code() string { + return "InternalServerError" +} + +// Message returns the exception's message. +func (s *InternalServerError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerError) OrigErr() error { + return nil +} + +func (s *InternalServerError) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified ExportTime is outside of the point in time recovery window. +type InvalidExportTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidExportTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidExportTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error { + return &InvalidExportTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidExportTimeException) Code() string { + return "InvalidExportTimeException" +} + +// Message returns the exception's message. +func (s *InvalidExportTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidExportTimeException) OrigErr() error { + return nil +} + +func (s *InvalidExportTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidExportTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidExportTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. +type InvalidRestoreTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRestoreTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRestoreTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidRestoreTimeException(v protocol.ResponseMetadata) error { + return &InvalidRestoreTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRestoreTimeException) Code() string { + return "InvalidRestoreTimeException" +} + +// Message returns the exception's message. +func (s *InvalidRestoreTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRestoreTimeException) OrigErr() error { + return nil +} + +func (s *InvalidRestoreTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRestoreTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRestoreTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about item collections, if any, that were affected by the operation. +// ItemCollectionMetrics is only returned if the request asked for it. If the +// table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + _ struct{} `type:"structure"` + + // The partition key value of the item collection. This value is the same as + // the partition key value of the item. + ItemCollectionKey map[string]*AttributeValue `type:"map"` + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []*float64 `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionMetrics) GoString() string { + return s.String() +} + +// SetItemCollectionKey sets the ItemCollectionKey field's value. +func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics { + s.ItemCollectionKey = v + return s +} + +// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value. +func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics { + s.SizeEstimateRangeGB = v + return s +} + +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +type ItemCollectionSizeLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The total size of an item collection has exceeded the maximum limit of 10 + // gigabytes. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionSizeLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionSizeLimitExceededException) GoString() string { + return s.String() +} + +func newErrorItemCollectionSizeLimitExceededException(v protocol.ResponseMetadata) error { + return &ItemCollectionSizeLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ItemCollectionSizeLimitExceededException) Code() string { + return "ItemCollectionSizeLimitExceededException" +} + +// Message returns the exception's message. +func (s *ItemCollectionSizeLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ItemCollectionSizeLimitExceededException) OrigErr() error { + return nil +} + +func (s *ItemCollectionSizeLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ItemCollectionSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ItemCollectionSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Details for the requested item. +type ItemResponse struct { + _ struct{} `type:"structure"` + + // Map of attribute data consisting of the data type and attribute value. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemResponse) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *ItemResponse) SetItem(v map[string]*AttributeValue) *ItemResponse { + s.Item = v + return s +} + +// Represents a single element of a key schema. A key schema specifies the attributes +// that make up the primary key of a table, or the key attributes of an index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. For +// example, a simple primary key would be represented by one KeySchemaElement +// (for the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +// The data type must be one of String, Number, or Binary. The attribute cannot +// be nested within a List or a Map. +type KeySchemaElement struct { + _ struct{} `type:"structure"` + + // The name of a key attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // The role that this key attribute will assume: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeyType is a required field + KeyType *string `type:"string" required:"true" enum:"KeyType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeySchemaElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeySchemaElement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeySchemaElement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeySchemaElement"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.KeyType == nil { + invalidParams.Add(request.NewErrParamRequired("KeyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *KeySchemaElement) SetAttributeName(v string) *KeySchemaElement { + s.AttributeName = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *KeySchemaElement) SetKeyType(v string) *KeySchemaElement { + s.KeyType = &v + return s +} + +// Represents a set of primary keys and, for each key, the attributes to retrieve +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a simple primary key, you only need to provide the partition key. For +// a composite primary key, you must provide both the partition key and the +// sort key. +type KeysAndAttributes struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // The primary key attribute values that define the items and the attributes + // associated with the items. + // + // Keys is a required field + Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeysAndAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeysAndAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeysAndAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeysAndAttributes"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Keys == nil { + invalidParams.Add(request.NewErrParamRequired("Keys")) + } + if s.Keys != nil && len(s.Keys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Keys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *KeysAndAttributes) SetAttributesToGet(v []*string) *KeysAndAttributes { + s.AttributesToGet = v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *KeysAndAttributes) SetConsistentRead(v bool) *KeysAndAttributes { + s.ConsistentRead = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *KeysAndAttributes) SetExpressionAttributeNames(v map[string]*string) *KeysAndAttributes { + s.ExpressionAttributeNames = v + return s +} + +// SetKeys sets the Keys field's value. +func (s *KeysAndAttributes) SetKeys(v []map[string]*AttributeValue) *KeysAndAttributes { + s.Keys = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes { + s.ProjectionExpression = &v + return s +} + +// Describes a Kinesis data stream destination. +type KinesisDataStreamDestination struct { + _ struct{} `type:"structure"` + + // The current status of replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The human-readable string that corresponds to the replica status. + DestinationStatusDescription *string `type:"string"` + + // The ARN for a specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KinesisDataStreamDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KinesisDataStreamDestination) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *KinesisDataStreamDestination) SetDestinationStatus(v string) *KinesisDataStreamDestination { + s.DestinationStatus = &v + return s +} + +// SetDestinationStatusDescription sets the DestinationStatusDescription field's value. +func (s *KinesisDataStreamDestination) SetDestinationStatusDescription(v string) *KinesisDataStreamDestination { + s.DestinationStatusDescription = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *KinesisDataStreamDestination) SetStreamArn(v string) *KinesisDataStreamDestination { + s.StreamArn = &v + return s +} + +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Too many operations for a given subscriber. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LimitExceededException) GoString() string { + return s.String() +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListBackupsInput struct { + _ struct{} `type:"structure"` + + // The backups from the table specified by BackupType are listed. + // + // Where BackupType can be: + // + // * USER - On-demand backup created by you. (The default setting if no other + // backup types are specified.) + // + // * SYSTEM - On-demand backup automatically created by DynamoDB. + // + // * ALL - All types of on-demand backups (USER and SYSTEM). + BackupType *string `type:"string" enum:"BackupTypeFilter"` + + // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last + // evaluated when the current page of results was returned, inclusive of the + // current page of results. This value may be specified as the ExclusiveStartBackupArn + // of a new ListBackups operation in order to fetch the next page of results. + ExclusiveStartBackupArn *string `min:"37" type:"string"` + + // Maximum number of backups to return at once. + Limit *int64 `min:"1" type:"integer"` + + // The backups from the table specified by TableName are listed. + TableName *string `min:"3" type:"string"` + + // Only backups created after this time are listed. TimeRangeLowerBound is inclusive. + TimeRangeLowerBound *time.Time `type:"timestamp"` + + // Only backups created before this time are listed. TimeRangeUpperBound is + // exclusive. + TimeRangeUpperBound *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBackupsInput"} + if s.ExclusiveStartBackupArn != nil && len(*s.ExclusiveStartBackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartBackupArn", 37)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupType sets the BackupType field's value. +func (s *ListBackupsInput) SetBackupType(v string) *ListBackupsInput { + s.BackupType = &v + return s +} + +// SetExclusiveStartBackupArn sets the ExclusiveStartBackupArn field's value. +func (s *ListBackupsInput) SetExclusiveStartBackupArn(v string) *ListBackupsInput { + s.ExclusiveStartBackupArn = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListBackupsInput) SetLimit(v int64) *ListBackupsInput { + s.Limit = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ListBackupsInput) SetTableName(v string) *ListBackupsInput { + s.TableName = &v + return s +} + +// SetTimeRangeLowerBound sets the TimeRangeLowerBound field's value. +func (s *ListBackupsInput) SetTimeRangeLowerBound(v time.Time) *ListBackupsInput { + s.TimeRangeLowerBound = &v + return s +} + +// SetTimeRangeUpperBound sets the TimeRangeUpperBound field's value. +func (s *ListBackupsInput) SetTimeRangeUpperBound(v time.Time) *ListBackupsInput { + s.TimeRangeUpperBound = &v + return s +} + +type ListBackupsOutput struct { + _ struct{} `type:"structure"` + + // List of BackupSummary objects. + BackupSummaries []*BackupSummary `type:"list"` + + // The ARN of the backup last evaluated when the current page of results was + // returned, inclusive of the current page of results. This value may be specified + // as the ExclusiveStartBackupArn of a new ListBackups operation in order to + // fetch the next page of results. + // + // If LastEvaluatedBackupArn is empty, then the last page of results has been + // processed and there are no more results to be retrieved. + // + // If LastEvaluatedBackupArn is not empty, this may or may not indicate that + // there is more data to be returned. All results are guaranteed to have been + // returned if and only if no value for LastEvaluatedBackupArn is returned. + LastEvaluatedBackupArn *string `min:"37" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsOutput) GoString() string { + return s.String() +} + +// SetBackupSummaries sets the BackupSummaries field's value. +func (s *ListBackupsOutput) SetBackupSummaries(v []*BackupSummary) *ListBackupsOutput { + s.BackupSummaries = v + return s +} + +// SetLastEvaluatedBackupArn sets the LastEvaluatedBackupArn field's value. +func (s *ListBackupsOutput) SetLastEvaluatedBackupArn(v string) *ListBackupsOutput { + s.LastEvaluatedBackupArn = &v + return s +} + +type ListContributorInsightsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return per page. + MaxResults *int64 `type:"integer"` + + // A token to for the desired page, if there is one. + NextToken *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListContributorInsightsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListContributorInsightsInput"} + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListContributorInsightsInput) SetMaxResults(v int64) *ListContributorInsightsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListContributorInsightsInput) SetNextToken(v string) *ListContributorInsightsInput { + s.NextToken = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ListContributorInsightsInput) SetTableName(v string) *ListContributorInsightsInput { + s.TableName = &v + return s +} + +type ListContributorInsightsOutput struct { + _ struct{} `type:"structure"` + + // A list of ContributorInsightsSummary. + ContributorInsightsSummaries []*ContributorInsightsSummary `type:"list"` + + // A token to go to the next page if there is one. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsOutput) GoString() string { + return s.String() +} + +// SetContributorInsightsSummaries sets the ContributorInsightsSummaries field's value. +func (s *ListContributorInsightsOutput) SetContributorInsightsSummaries(v []*ContributorInsightsSummary) *ListContributorInsightsOutput { + s.ContributorInsightsSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListContributorInsightsOutput) SetNextToken(v string) *ListContributorInsightsOutput { + s.NextToken = &v + return s +} + +type ListExportsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return per page. + MaxResults *int64 `min:"1" type:"integer"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListExports. When provided in this manner, the API fetches + // the next page of results. + NextToken *string `type:"string"` + + // The Amazon Resource Name (ARN) associated with the exported table. + TableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListExportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListExportsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListExportsInput) SetMaxResults(v int64) *ListExportsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsInput) SetNextToken(v string) *ListExportsInput { + s.NextToken = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ListExportsInput) SetTableArn(v string) *ListExportsInput { + s.TableArn = &v + return s +} + +type ListExportsOutput struct { + _ struct{} `type:"structure"` + + // A list of ExportSummary objects. + ExportSummaries []*ExportSummary `type:"list"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListExports again, with NextToken set to this value. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsOutput) GoString() string { + return s.String() +} + +// SetExportSummaries sets the ExportSummaries field's value. +func (s *ListExportsOutput) SetExportSummaries(v []*ExportSummary) *ListExportsOutput { + s.ExportSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsOutput) SetNextToken(v string) *ListExportsOutput { + s.NextToken = &v + return s +} + +type ListGlobalTablesInput struct { + _ struct{} `type:"structure"` + + // The first global table name that this operation will evaluate. + ExclusiveStartGlobalTableName *string `min:"3" type:"string"` + + // The maximum number of table names to return, if the parameter is not specified + // DynamoDB defaults to 100. + // + // If the number of global tables DynamoDB finds reaches this limit, it stops + // the operation and returns the table names collected up to that point, with + // a table name in the LastEvaluatedGlobalTableName to apply in a subsequent + // operation to the ExclusiveStartGlobalTableName parameter. + Limit *int64 `min:"1" type:"integer"` + + // Lists the global tables in a specific Region. + RegionName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGlobalTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGlobalTablesInput"} + if s.ExclusiveStartGlobalTableName != nil && len(*s.ExclusiveStartGlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartGlobalTableName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value. +func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput { + s.ExclusiveStartGlobalTableName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput { + s.Limit = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput { + s.RegionName = &v + return s +} + +type ListGlobalTablesOutput struct { + _ struct{} `type:"structure"` + + // List of global table names. + GlobalTables []*GlobalTable `type:"list"` + + // Last evaluated global table name. + LastEvaluatedGlobalTableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesOutput) GoString() string { + return s.String() +} + +// SetGlobalTables sets the GlobalTables field's value. +func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput { + s.GlobalTables = v + return s +} + +// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value. +func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput { + s.LastEvaluatedGlobalTableName = &v + return s +} + +type ListImportsInput struct { + _ struct{} `type:"structure"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListImports. When provided in this manner, the API fetches + // the next page of results. + NextToken *string `min:"112" type:"string"` + + // The number of ImportSummary objects returned in a single page. + PageSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) associated with the table that was imported + // to. + TableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListImportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListImportsInput"} + if s.NextToken != nil && len(*s.NextToken) < 112 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 112)) + } + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImportsInput) SetNextToken(v string) *ListImportsInput { + s.NextToken = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *ListImportsInput) SetPageSize(v int64) *ListImportsInput { + s.PageSize = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ListImportsInput) SetTableArn(v string) *ListImportsInput { + s.TableArn = &v + return s +} + +type ListImportsOutput struct { + _ struct{} `type:"structure"` + + // A list of ImportSummary objects. + ImportSummaryList []*ImportSummary `type:"list"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListImports again, with NextToken set to this value. + NextToken *string `min:"112" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsOutput) GoString() string { + return s.String() +} + +// SetImportSummaryList sets the ImportSummaryList field's value. +func (s *ListImportsOutput) SetImportSummaryList(v []*ImportSummary) *ListImportsOutput { + s.ImportSummaryList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImportsOutput) SetNextToken(v string) *ListImportsOutput { + s.NextToken = &v + return s +} + +// Represents the input of a ListTables operation. +type ListTablesInput struct { + _ struct{} `type:"structure"` + + // The first table name that this operation will evaluate. Use the value that + // was returned for LastEvaluatedTableName in a previous operation, so that + // you can obtain the next page of results. + ExclusiveStartTableName *string `min:"3" type:"string"` + + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTablesInput"} + if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTableName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveStartTableName sets the ExclusiveStartTableName field's value. +func (s *ListTablesInput) SetExclusiveStartTableName(v string) *ListTablesInput { + s.ExclusiveStartTableName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListTablesInput) SetLimit(v int64) *ListTablesInput { + s.Limit = &v + return s +} + +// Represents the output of a ListTables operation. +type ListTablesOutput struct { + _ struct{} `type:"structure"` + + // The name of the last table in the current page of results. Use this value + // as the ExclusiveStartTableName in a new request to obtain the next page of + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. + LastEvaluatedTableName *string `min:"3" type:"string"` + + // The names of the tables associated with the current account at the current + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value + // as the ExclusiveStartTableName parameter in a subsequent ListTables request + // and obtain the next page of results. + TableNames []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedTableName sets the LastEvaluatedTableName field's value. +func (s *ListTablesOutput) SetLastEvaluatedTableName(v string) *ListTablesOutput { + s.LastEvaluatedTableName = &v + return s +} + +// SetTableNames sets the TableNames field's value. +func (s *ListTablesOutput) SetTableNames(v []*string) *ListTablesOutput { + s.TableNames = v + return s +} + +type ListTagsOfResourceInput struct { + _ struct{} `type:"structure"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListTagOfResource. When provided in this manner, this API + // fetches the next page of results. + NextToken *string `type:"string"` + + // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon + // Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsOfResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsOfResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOfResourceInput) SetNextToken(v string) *ListTagsOfResourceInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsOfResourceInput) SetResourceArn(v string) *ListTagsOfResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsOfResourceOutput struct { + _ struct{} `type:"structure"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListTagsOfResource again, with NextToken set to this + // value. + NextToken *string `type:"string"` + + // The tags currently associated with the Amazon DynamoDB resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOfResourceOutput) SetNextToken(v string) *ListTagsOfResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsOfResourceOutput) SetTags(v []*Tag) *ListTagsOfResourceOutput { + s.Tags = v + return s +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the local secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into the + // local secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LocalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LocalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndex) SetIndexName(v string) *LocalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndex) SetProjection(v *Projection) *LocalSecondaryIndex { + s.Projection = v + return s +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetIndexArn sets the IndexArn field's value. +func (s *LocalSecondaryIndexDescription) SetIndexArn(v string) *LocalSecondaryIndexDescription { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndexDescription) SetIndexName(v string) *LocalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetIndexSizeBytes sets the IndexSizeBytes field's value. +func (s *LocalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *LocalSecondaryIndexDescription { + s.IndexSizeBytes = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *LocalSecondaryIndexDescription) SetItemCount(v int64) *LocalSecondaryIndexDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexDescription { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndexDescription) SetProjection(v *Projection) *LocalSecondaryIndexDescription { + s.Projection = v + return s +} + +// Represents the properties of a local secondary index for the table when the +// backup was created. +type LocalSecondaryIndexInfo struct { + _ struct{} `type:"structure"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The complete key schema for a local secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexInfo) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndexInfo) SetIndexName(v string) *LocalSecondaryIndexInfo { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexInfo { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIndexInfo { + s.Projection = v + return s +} + +// Represents a PartiQL statment that uses parameters. +type ParameterizedStatement struct { + _ struct{} `type:"structure"` + + // The parameter values. + Parameters []*AttributeValue `min:"1" type:"list"` + + // A PartiQL statment that uses parameters. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParameterizedStatement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParameterizedStatement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterizedStatement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterizedStatement"} + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParameters sets the Parameters field's value. +func (s *ParameterizedStatement) SetParameters(v []*AttributeValue) *ParameterizedStatement { + s.Parameters = v + return s +} + +// SetStatement sets the Statement field's value. +func (s *ParameterizedStatement) SetStatement(v string) *ParameterizedStatement { + s.Statement = &v + return s +} + +// The description of the point in time settings applied to the table. +type PointInTimeRecoveryDescription struct { + _ struct{} `type:"structure"` + + // Specifies the earliest point in time you can restore your table to. You can + // restore your table to any point in time during the last 35 days. + EarliestRestorableDateTime *time.Time `type:"timestamp"` + + // LatestRestorableDateTime is typically 5 minutes before the current time. + LatestRestorableDateTime *time.Time `type:"timestamp"` + + // The current state of point in time recovery: + // + // * ENABLED - Point in time recovery is enabled. + // + // * DISABLED - Point in time recovery is disabled. + PointInTimeRecoveryStatus *string `type:"string" enum:"PointInTimeRecoveryStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryDescription) GoString() string { + return s.String() +} + +// SetEarliestRestorableDateTime sets the EarliestRestorableDateTime field's value. +func (s *PointInTimeRecoveryDescription) SetEarliestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { + s.EarliestRestorableDateTime = &v + return s +} + +// SetLatestRestorableDateTime sets the LatestRestorableDateTime field's value. +func (s *PointInTimeRecoveryDescription) SetLatestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { + s.LatestRestorableDateTime = &v + return s +} + +// SetPointInTimeRecoveryStatus sets the PointInTimeRecoveryStatus field's value. +func (s *PointInTimeRecoveryDescription) SetPointInTimeRecoveryStatus(v string) *PointInTimeRecoveryDescription { + s.PointInTimeRecoveryStatus = &v + return s +} + +// Represents the settings used to enable point in time recovery. +type PointInTimeRecoverySpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether point in time recovery is enabled (true) or disabled (false) + // on the table. + // + // PointInTimeRecoveryEnabled is a required field + PointInTimeRecoveryEnabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoverySpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoverySpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PointInTimeRecoverySpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PointInTimeRecoverySpecification"} + if s.PointInTimeRecoveryEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoveryEnabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPointInTimeRecoveryEnabled sets the PointInTimeRecoveryEnabled field's value. +func (s *PointInTimeRecoverySpecification) SetPointInTimeRecoveryEnabled(v bool) *PointInTimeRecoverySpecification { + s.PointInTimeRecoveryEnabled = &v + return s +} + +// Point in time recovery has not yet been enabled for this source table. +type PointInTimeRecoveryUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryUnavailableException) GoString() string { + return s.String() +} + +func newErrorPointInTimeRecoveryUnavailableException(v protocol.ResponseMetadata) error { + return &PointInTimeRecoveryUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PointInTimeRecoveryUnavailableException) Code() string { + return "PointInTimeRecoveryUnavailableException" +} + +// Message returns the exception's message. +func (s *PointInTimeRecoveryUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PointInTimeRecoveryUnavailableException) OrigErr() error { + return nil +} + +func (s *PointInTimeRecoveryUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PointInTimeRecoveryUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PointInTimeRecoveryUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents attributes that are copied (projected) from the table into an +// index. These are in addition to the primary key attributes and index key +// attributes, which are automatically projected. +type Projection struct { + _ struct{} `type:"structure"` + + // Represents the non-key attribute names which will be projected into the index. + // + // For local secondary indexes, the total count of NonKeyAttributes summed across + // all of the local secondary indexes, must not exceed 100. If you project the + // same attribute into two different indexes, this counts as two distinct attributes + // when determining the total. + NonKeyAttributes []*string `min:"1" type:"list"` + + // The set of attributes that are projected into the index: + // + // * KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // * INCLUDE - In addition to the attributes described in KEYS_ONLY, the + // secondary index will include other non-key attributes that you specify. + // + // * ALL - All of the table attributes are projected into the index. + ProjectionType *string `type:"string" enum:"ProjectionType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Projection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Projection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Projection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Projection"} + if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NonKeyAttributes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNonKeyAttributes sets the NonKeyAttributes field's value. +func (s *Projection) SetNonKeyAttributes(v []*string) *Projection { + s.NonKeyAttributes = v + return s +} + +// SetProjectionType sets the ProjectionType field's value. +func (s *Projection) SetProjectionType(v string) *Projection { + s.ProjectionType = &v + return s +} + +// Represents the provisioned throughput settings for a specified table or index. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see Service, +// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughput struct { + _ struct{} `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // ReadCapacityUnits is a required field + ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // WriteCapacityUnits is a required field + WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionedThroughput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughput"} + if s.ReadCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("ReadCapacityUnits")) + } + if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) + } + if s.WriteCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("WriteCapacityUnits")) + } + if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("WriteCapacityUnits", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughput) SetReadCapacityUnits(v int64) *ProvisionedThroughput { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ProvisionedThroughput) SetWriteCapacityUnits(v int64) *ProvisionedThroughput { + s.WriteCapacityUnits = &v + return s +} + +// Represents the provisioned throughput settings for the table, consisting +// of read and write capacity units, along with data about increases and decreases. +type ProvisionedThroughputDescription struct { + _ struct{} `type:"structure"` + + // The date and time of the last provisioned throughput decrease for this table. + LastDecreaseDateTime *time.Time `type:"timestamp"` + + // The date and time of the last provisioned throughput increase for this table. + LastIncreaseDateTime *time.Time `type:"timestamp"` + + // The number of provisioned throughput decreases for this table during this + // UTC calendar day. For current maximums on provisioned throughput decreases, + // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + NumberOfDecreasesToday *int64 `min:"1" type:"long"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. Eventually consistent reads require + // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits + // per second provides 100 eventually consistent ReadCapacityUnits per second. + ReadCapacityUnits *int64 `type:"long"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + WriteCapacityUnits *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputDescription) GoString() string { + return s.String() +} + +// SetLastDecreaseDateTime sets the LastDecreaseDateTime field's value. +func (s *ProvisionedThroughputDescription) SetLastDecreaseDateTime(v time.Time) *ProvisionedThroughputDescription { + s.LastDecreaseDateTime = &v + return s +} + +// SetLastIncreaseDateTime sets the LastIncreaseDateTime field's value. +func (s *ProvisionedThroughputDescription) SetLastIncreaseDateTime(v time.Time) *ProvisionedThroughputDescription { + s.LastIncreaseDateTime = &v + return s +} + +// SetNumberOfDecreasesToday sets the NumberOfDecreasesToday field's value. +func (s *ProvisionedThroughputDescription) SetNumberOfDecreasesToday(v int64) *ProvisionedThroughputDescription { + s.NumberOfDecreasesToday = &v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughputDescription) SetReadCapacityUnits(v int64) *ProvisionedThroughputDescription { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *ProvisionedThroughputDescription { + s.WriteCapacityUnits = &v + return s +} + +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughputExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // You exceeded your maximum allowed provisioned throughput. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputExceededException) GoString() string { + return s.String() +} + +func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error { + return &ProvisionedThroughputExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ProvisionedThroughputExceededException) Code() string { + return "ProvisionedThroughputExceededException" +} + +// Message returns the exception's message. +func (s *ProvisionedThroughputExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ProvisionedThroughputExceededException) OrigErr() error { + return nil +} + +func (s *ProvisionedThroughputExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ProvisionedThroughputExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ProvisionedThroughputExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Replica-specific provisioned throughput settings. If not specified, uses +// the source table's provisioned throughput settings. +type ProvisionedThroughputOverride struct { + _ struct{} `type:"structure"` + + // Replica-specific read capacity units. If not specified, uses the source table's + // read capacity settings. + ReadCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputOverride) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionedThroughputOverride) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughputOverride"} + if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughputOverride) SetReadCapacityUnits(v int64) *ProvisionedThroughputOverride { + s.ReadCapacityUnits = &v + return s +} + +// Represents a request to perform a PutItem operation. +type Put struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to be written by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item that are part of an index + // key schema for the table, their types must match the index key schema. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Put condition fails. For ReturnValuesOnConditionCheckFailure, the valid values + // are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table in which to write the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Put) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Put) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Put) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Put"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Put) SetConditionExpression(v string) *Put { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Put) SetExpressionAttributeNames(v map[string]*string) *Put { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Put) SetExpressionAttributeValues(v map[string]*AttributeValue) *Put { + s.ExpressionAttributeValues = v + return s +} + +// SetItem sets the Item field's value. +func (s *Put) SetItem(v map[string]*AttributeValue) *Put { + s.Item = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Put) SetReturnValuesOnConditionCheckFailure(v string) *Put { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Put) SetTableName(v string) *Put { + s.TableName = &v + return s +} + +// Represents the input of a PutItem operation. +type PutItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional PutItem operation + // to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name/value pairs, one for each attribute. Only the primary + // key attributes are required; you can optionally provide other attribute name-value + // pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide both values for both the + // partition key and the sort key. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // Empty String and Binary attribute values are allowed. Attribute values of + // type String and Binary must have a length greater than zero if the attribute + // is used as a key attribute for a table or index. + // + // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) + // in the Amazon DynamoDB Developer Guide. + // + // Each element in the Item map is an AttributeValue object. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were updated with the PutItem request. For PutItem, the valid + // values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // The values returned are strongly consistent. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table to contain the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutItemInput"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *PutItemInput) SetConditionExpression(v string) *PutItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *PutItemInput) SetConditionalOperator(v string) *PutItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *PutItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *PutItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *PutItemInput) SetExpressionAttributeNames(v map[string]*string) *PutItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *PutItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *PutItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetItem sets the Item field's value. +func (s *PutItemInput) SetItem(v map[string]*AttributeValue) *PutItemInput { + s.Item = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *PutItemInput) SetReturnConsumedCapacity(v string) *PutItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *PutItemInput) SetReturnItemCollectionMetrics(v string) *PutItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *PutItemInput) SetReturnValues(v string) *PutItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *PutItemInput) SetTableName(v string) *PutItemInput { + s.TableName = &v + return s +} + +// Represents the output of a PutItem operation. +type PutItemOutput struct { + _ struct{} `type:"structure"` + + // The attribute values as they appeared before the PutItem operation, but only + // if ReturnValues is specified as ALL_OLD in the request. Each element consists + // of an attribute name and an attribute value. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the PutItem operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the PutItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *PutItemOutput) SetAttributes(v map[string]*AttributeValue) *PutItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *PutItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *PutItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *PutItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *PutItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a request to perform a PutItem operation on an item. +type PutRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of an item to be processed by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item that are part of an index + // key schema for the table, their types must match the index key schema. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRequest) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *PutRequest) SetItem(v map[string]*AttributeValue) *PutRequest { + s.Item = v + return s +} + +// Represents the input of a Query operation. +type QueryInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you query a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number, or Binary. No + // set data types are allowed. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Query operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression does not allow key attributes. You cannot define a filter + // expression based on a partition key or a sort key. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) + // in the Amazon DynamoDB Developer Guide. + FilterExpression *string `type:"string"` + + // The name of an index to query. This index can be any local secondary index + // or global secondary index on the table. Note that if you use the IndexName + // parameter, you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The condition that specifies the key values for items to be retrieved by + // the Query action. + // + // The condition must perform an equality test on a single partition key value. + // + // The condition can optionally perform one of several comparison tests on a + // single sort key value. This allows Query to retrieve one item with a given + // partition key value and sort key value, or several items that have the same + // partition key value but different sort key values. + // + // The partition key equality test is required, and must be specified in the + // following format: + // + // partitionKeyName = :partitionkeyval + // + // If you also want to provide a condition for the sort key, it must be combined + // using AND with the condition for the sort key. Following is an example, using + // the = comparison operator for the sort key: + // + // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval + // + // Valid comparisons for the sort key condition are as follows: + // + // * sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. + // + // * sortKeyName < :sortkeyval - true if the sort key value is less than + // :sortkeyval. + // + // * sortKeyName <= :sortkeyval - true if the sort key value is less than + // or equal to :sortkeyval. + // + // * sortKeyName > :sortkeyval - true if the sort key value is greater than + // :sortkeyval. + // + // * sortKeyName >= :sortkeyval - true if the sort key value is greater than + // or equal to :sortkeyval. + // + // * sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort + // key value is greater than or equal to :sortkeyval1, and less than or equal + // to :sortkeyval2. + // + // * begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value + // begins with a particular operand. (You cannot use this function with a + // sort key that is of type Number.) Note that the function name begins_with + // is case-sensitive. + // + // Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval + // and :sortval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace + // the names of the partition key and sort key with placeholder tokens. This + // option might be necessary if an attribute name conflicts with a DynamoDB + // reserved word. For example, the following KeyConditionExpression parameter + // causes an error because Size is a reserved word: + // + // * Size = :myval + // + // To work around this, define a placeholder (such a #S) to represent the attribute + // name Size. KeyConditionExpression then is as follows: + // + // * #S = :myval + // + // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues, + // see Using Placeholders for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use KeyConditionExpression instead. For more + // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditions map[string]*Condition `type:"map"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html) + // in the Amazon DynamoDB Developer Guide. + QueryFilter map[string]*Condition `type:"map"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Specifies the order for index traversal: If true (default), the traversal + // is performed in ascending order; if false, the traversal is performed in + // descending order. + // + // Items with the same partition key value are stored in sorted order by sort + // key. If the sort key data type is Number, the results are stored in numeric + // order. For type String, the results are stored in order of UTF-8 bytes. For + // type Binary, DynamoDB treats each byte of the binary data as unsigned. + // + // If ScanIndexForward is true, DynamoDB returns the results in the order in + // which they are stored (by sort key value). This is the default behavior. + // If ScanIndexForward is false, DynamoDB reads the results in reverse order + // by sort key value, and then returns the results to the client. + ScanIndexForward *bool `type:"boolean"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. + // + // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent + // to specifying ALL_ATTRIBUTES. + // + // * COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. + // This return value is equivalent to specifying ProjectionExpression without + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation will read only the index and not the table. If any of the + // requested attributes are not projected into the local secondary index, + // DynamoDB fetches each of these attributes from the parent table. This + // extra fetching incurs additional throughput cost and latency. If you query + // or scan a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying ProjectionExpression without any + // value for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.KeyConditions != nil { + for i, v := range s.KeyConditions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueryFilter != nil { + for i, v := range s.QueryFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *QueryInput) SetAttributesToGet(v []*string) *QueryInput { + s.AttributesToGet = v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *QueryInput) SetConditionalOperator(v string) *QueryInput { + s.ConditionalOperator = &v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *QueryInput) SetConsistentRead(v bool) *QueryInput { + s.ConsistentRead = &v + return s +} + +// SetExclusiveStartKey sets the ExclusiveStartKey field's value. +func (s *QueryInput) SetExclusiveStartKey(v map[string]*AttributeValue) *QueryInput { + s.ExclusiveStartKey = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *QueryInput) SetExpressionAttributeNames(v map[string]*string) *QueryInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *QueryInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *QueryInput { + s.ExpressionAttributeValues = v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *QueryInput) SetFilterExpression(v string) *QueryInput { + s.FilterExpression = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *QueryInput) SetIndexName(v string) *QueryInput { + s.IndexName = &v + return s +} + +// SetKeyConditionExpression sets the KeyConditionExpression field's value. +func (s *QueryInput) SetKeyConditionExpression(v string) *QueryInput { + s.KeyConditionExpression = &v + return s +} + +// SetKeyConditions sets the KeyConditions field's value. +func (s *QueryInput) SetKeyConditions(v map[string]*Condition) *QueryInput { + s.KeyConditions = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *QueryInput) SetLimit(v int64) *QueryInput { + s.Limit = &v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *QueryInput) SetProjectionExpression(v string) *QueryInput { + s.ProjectionExpression = &v + return s +} + +// SetQueryFilter sets the QueryFilter field's value. +func (s *QueryInput) SetQueryFilter(v map[string]*Condition) *QueryInput { + s.QueryFilter = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *QueryInput) SetReturnConsumedCapacity(v string) *QueryInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetScanIndexForward sets the ScanIndexForward field's value. +func (s *QueryInput) SetScanIndexForward(v bool) *QueryInput { + s.ScanIndexForward = &v + return s +} + +// SetSelect sets the Select field's value. +func (s *QueryInput) SetSelect(v string) *QueryInput { + s.Select = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *QueryInput) SetTableName(v string) *QueryInput { + s.TableName = &v + return s +} + +// Represents the output of a Query operation. +type QueryOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the Query operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount are + // the same. + Count *int64 `type:"integer"` + + // An array of item attributes that match the query criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any QueryFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient + // Query operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *QueryOutput) SetConsumedCapacity(v *ConsumedCapacity) *QueryOutput { + s.ConsumedCapacity = v + return s +} + +// SetCount sets the Count field's value. +func (s *QueryOutput) SetCount(v int64) *QueryOutput { + s.Count = &v + return s +} + +// SetItems sets the Items field's value. +func (s *QueryOutput) SetItems(v []map[string]*AttributeValue) *QueryOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *QueryOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *QueryOutput { + s.LastEvaluatedKey = v + return s +} + +// SetScannedCount sets the ScannedCount field's value. +func (s *QueryOutput) SetScannedCount(v int64) *QueryOutput { + s.ScannedCount = &v + return s +} + +// Represents the properties of a replica. +type Replica struct { + _ struct{} `type:"structure"` + + // The Region where the replica needs to be created. + RegionName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Replica) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Replica) GoString() string { + return s.String() +} + +// SetRegionName sets the RegionName field's value. +func (s *Replica) SetRegionName(v string) *Replica { + s.RegionName = &v + return s +} + +// The specified replica is already part of the global table. +type ReplicaAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorReplicaAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ReplicaAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReplicaAlreadyExistsException) Code() string { + return "ReplicaAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *ReplicaAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReplicaAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *ReplicaAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReplicaAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReplicaAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the auto scaling settings of the replica. +type ReplicaAutoScalingDescription struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index auto scaling settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexAutoScalingDescription `type:"list"` + + // The Region where the replica exists. + RegionName *string `type:"string"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The current state of the replica: + // + // * CREATING - The replica is being created. + // + // * UPDATING - The replica is being updated. + // + // * DELETING - The replica is being deleted. + // + // * ACTIVE - The replica is ready for use. + ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingDescription) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *ReplicaAutoScalingDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexAutoScalingDescription) *ReplicaAutoScalingDescription { + s.GlobalSecondaryIndexes = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaAutoScalingDescription) SetRegionName(v string) *ReplicaAutoScalingDescription { + s.RegionName = &v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription { + s.ReplicaProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription { + s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *ReplicaAutoScalingDescription) SetReplicaStatus(v string) *ReplicaAutoScalingDescription { + s.ReplicaStatus = &v + return s +} + +// Represents the auto scaling settings of a replica that will be modified. +type ReplicaAutoScalingUpdate struct { + _ struct{} `type:"structure"` + + // The Region where the replica exists. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Represents the auto scaling settings of global secondary indexes that will + // be modified. + ReplicaGlobalSecondaryIndexUpdates []*ReplicaGlobalSecondaryIndexAutoScalingUpdate `type:"list"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ReplicaProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaAutoScalingUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaAutoScalingUpdate"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.ReplicaGlobalSecondaryIndexUpdates != nil { + for i, v := range s.ReplicaGlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil { + if err := s.ReplicaProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaAutoScalingUpdate) SetRegionName(v string) *ReplicaAutoScalingUpdate { + s.RegionName = &v + return s +} + +// SetReplicaGlobalSecondaryIndexUpdates sets the ReplicaGlobalSecondaryIndexUpdates field's value. +func (s *ReplicaAutoScalingUpdate) SetReplicaGlobalSecondaryIndexUpdates(v []*ReplicaGlobalSecondaryIndexAutoScalingUpdate) *ReplicaAutoScalingUpdate { + s.ReplicaGlobalSecondaryIndexUpdates = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingUpdate sets the ReplicaProvisionedReadCapacityAutoScalingUpdate field's value. +func (s *ReplicaAutoScalingUpdate) SetReplicaProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaAutoScalingUpdate { + s.ReplicaProvisionedReadCapacityAutoScalingUpdate = v + return s +} + +// Contains the details of the replica. +type ReplicaDescription struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexDescription `type:"list"` + + // The KMS key of the replica that will be used for KMS encryption. + KMSMasterKeyId *string `type:"string"` + + // Replica-specific provisioned throughput. If not described, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` + + // The name of the Region. + RegionName *string `type:"string"` + + // The time at which the replica was first detected as inaccessible. To determine + // cause of inaccessibility check the ReplicaStatus property. + ReplicaInaccessibleDateTime *time.Time `type:"timestamp"` + + // The current state of the replica: + // + // * CREATING - The replica is being created. + // + // * UPDATING - The replica is being updated. + // + // * DELETING - The replica is being deleted. + // + // * ACTIVE - The replica is ready for use. + // + // * REGION_DISABLED - The replica is inaccessible because the Amazon Web + // Services Region has been disabled. If the Amazon Web Services Region remains + // inaccessible for more than 20 hours, DynamoDB will remove this replica + // from the replication group. The replica will not be deleted and replication + // will stop from and to this region. + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the + // table is inaccessible. If the KMS key remains inaccessible for more than + // 20 hours, DynamoDB will remove this replica from the replication group. + // The replica will not be deleted and replication will stop from and to + // this region. + ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` + + // Detailed information about the replica status. + ReplicaStatusDescription *string `type:"string"` + + // Specifies the progress of a Create, Update, or Delete action on the replica + // as a percentage. + ReplicaStatusPercentProgress *string `type:"string"` + + // Contains details of the table class. + ReplicaTableClassSummary *TableClassSummary `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaDescription) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *ReplicaDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexDescription) *ReplicaDescription { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *ReplicaDescription) SetKMSMasterKeyId(v string) *ReplicaDescription { + s.KMSMasterKeyId = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *ReplicaDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaDescription { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription { + s.RegionName = &v + return s +} + +// SetReplicaInaccessibleDateTime sets the ReplicaInaccessibleDateTime field's value. +func (s *ReplicaDescription) SetReplicaInaccessibleDateTime(v time.Time) *ReplicaDescription { + s.ReplicaInaccessibleDateTime = &v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *ReplicaDescription) SetReplicaStatus(v string) *ReplicaDescription { + s.ReplicaStatus = &v + return s +} + +// SetReplicaStatusDescription sets the ReplicaStatusDescription field's value. +func (s *ReplicaDescription) SetReplicaStatusDescription(v string) *ReplicaDescription { + s.ReplicaStatusDescription = &v + return s +} + +// SetReplicaStatusPercentProgress sets the ReplicaStatusPercentProgress field's value. +func (s *ReplicaDescription) SetReplicaStatusPercentProgress(v string) *ReplicaDescription { + s.ReplicaStatusPercentProgress = &v + return s +} + +// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value. +func (s *ReplicaDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaDescription { + s.ReplicaTableClassSummary = v + return s +} + +// Represents the properties of a replica global secondary index. +type ReplicaGlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Replica table GSI-specific provisioned throughput. If not specified, uses + // the source table GSI's read capacity settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaGlobalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndex) SetIndexName(v string) *ReplicaGlobalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndex) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndex { + s.ProvisionedThroughputOverride = v + return s +} + +// Represents the auto scaling configuration for a replica global secondary +// index. +type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The current state of the replica global secondary index: + // + // * CREATING - The index is being created. + // + // * UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING + // + // * DELETING - The index is being deleted. + // + // * ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.IndexName = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.IndexStatus = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.ProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.ProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// Represents the auto scaling settings of a global secondary index for a replica +// that will be modified. +type ReplicaGlobalSecondaryIndexAutoScalingUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdate"} + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedReadCapacityAutoScalingUpdate != nil { + if err := s.ProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingUpdate sets the ProvisionedReadCapacityAutoScalingUpdate field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexAutoScalingUpdate { + s.ProvisionedReadCapacityAutoScalingUpdate = v + return s +} + +// Represents the properties of a replica global secondary index. +type ReplicaGlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // If not described, uses the source table GSI's read capacity settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndexDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { + s.ProvisionedThroughputOverride = v + return s +} + +// Represents the properties of a global secondary index. +type ReplicaGlobalSecondaryIndexSettingsDescription struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The current status of the global secondary index: + // + // * CREATING - The global secondary index is being created. + // + // * UPDATING - The global secondary index is being updated. + // + // * DELETING - The global secondary index is being deleted. + // + // * ACTIVE - The global secondary index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // Auto scaling settings for a global secondary index replica's read capacity + // units. + ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. + ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` + + // Auto scaling settings for a global secondary index replica's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsDescription) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.IndexName = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.IndexStatus = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedReadCapacityUnits = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedWriteCapacityUnits = &v + return s +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type ReplicaGlobalSecondaryIndexSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Auto scaling settings for managing a global secondary index replica's read + // capacity units. + ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. + ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedReadCapacityUnits != nil && *s.ProvisionedReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedReadCapacityUnits", 1)) + } + if s.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := s.ProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettingsUpdate sets the ProvisionedReadCapacityAutoScalingSettingsUpdate field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedReadCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedReadCapacityUnits = &v + return s +} + +// The specified replica is no longer part of the global table. +type ReplicaNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaNotFoundException) GoString() string { + return s.String() +} + +func newErrorReplicaNotFoundException(v protocol.ResponseMetadata) error { + return &ReplicaNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReplicaNotFoundException) Code() string { + return "ReplicaNotFoundException" +} + +// Message returns the exception's message. +func (s *ReplicaNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReplicaNotFoundException) OrigErr() error { + return nil +} + +func (s *ReplicaNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReplicaNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReplicaNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the properties of a replica. +type ReplicaSettingsDescription struct { + _ struct{} `type:"structure"` + + // The Region name of the replica. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // The read/write capacity mode of the replica. + ReplicaBillingModeSummary *BillingModeSummary `type:"structure"` + + // Replica global secondary index settings for the global table. + ReplicaGlobalSecondaryIndexSettings []*ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"` + + // Auto scaling settings for a global table replica's read capacity units. + ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedReadCapacityUnits *int64 `type:"long"` + + // Auto scaling settings for a global table replica's write capacity units. + ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedWriteCapacityUnits *int64 `type:"long"` + + // The current state of the Region: + // + // * CREATING - The Region is being created. + // + // * UPDATING - The Region is being updated. + // + // * DELETING - The Region is being deleted. + // + // * ACTIVE - The Region is ready for use. + ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` + + // Contains details of the table class. + ReplicaTableClassSummary *TableClassSummary `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsDescription) GoString() string { + return s.String() +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaSettingsDescription) SetRegionName(v string) *ReplicaSettingsDescription { + s.RegionName = &v + return s +} + +// SetReplicaBillingModeSummary sets the ReplicaBillingModeSummary field's value. +func (s *ReplicaSettingsDescription) SetReplicaBillingModeSummary(v *BillingModeSummary) *ReplicaSettingsDescription { + s.ReplicaBillingModeSummary = v + return s +} + +// SetReplicaGlobalSecondaryIndexSettings sets the ReplicaGlobalSecondaryIndexSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaGlobalSecondaryIndexSettings(v []*ReplicaGlobalSecondaryIndexSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaGlobalSecondaryIndexSettings = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsDescription { + s.ReplicaProvisionedReadCapacityUnits = &v + return s +} + +// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedWriteCapacityUnits sets the ReplicaProvisionedWriteCapacityUnits field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityUnits(v int64) *ReplicaSettingsDescription { + s.ReplicaProvisionedWriteCapacityUnits = &v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *ReplicaSettingsDescription) SetReplicaStatus(v string) *ReplicaSettingsDescription { + s.ReplicaStatus = &v + return s +} + +// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value. +func (s *ReplicaSettingsDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaSettingsDescription { + s.ReplicaTableClassSummary = v + return s +} + +// Represents the settings for a global table in a Region that will be modified. +type ReplicaSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The Region of the replica to be added. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Represents the settings of a global secondary index for a global table that + // will be modified. + ReplicaGlobalSecondaryIndexSettingsUpdate []*ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` + + // Auto scaling settings for managing a global table replica's read capacity + // units. + ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` + + // Replica-specific table class. If not specified, uses the source table's table + // class. + ReplicaTableClass *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaSettingsUpdate"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil && len(s.ReplicaGlobalSecondaryIndexSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaGlobalSecondaryIndexSettingsUpdate", 1)) + } + if s.ReplicaProvisionedReadCapacityUnits != nil && *s.ReplicaProvisionedReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReplicaProvisionedReadCapacityUnits", 1)) + } + if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil { + for i, v := range s.ReplicaGlobalSecondaryIndexSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaSettingsUpdate) SetRegionName(v string) *ReplicaSettingsUpdate { + s.RegionName = &v + return s +} + +// SetReplicaGlobalSecondaryIndexSettingsUpdate sets the ReplicaGlobalSecondaryIndexSettingsUpdate field's value. +func (s *ReplicaSettingsUpdate) SetReplicaGlobalSecondaryIndexSettingsUpdate(v []*ReplicaGlobalSecondaryIndexSettingsUpdate) *ReplicaSettingsUpdate { + s.ReplicaGlobalSecondaryIndexSettingsUpdate = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate sets the ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate field's value. +func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaSettingsUpdate { + s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. +func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsUpdate { + s.ReplicaProvisionedReadCapacityUnits = &v + return s +} + +// SetReplicaTableClass sets the ReplicaTableClass field's value. +func (s *ReplicaSettingsUpdate) SetReplicaTableClass(v string) *ReplicaSettingsUpdate { + s.ReplicaTableClass = &v + return s +} + +// Represents one of the following: +// +// - A new replica to be added to an existing global table. +// +// - New parameters for an existing replica. +// +// - An existing replica to be removed from an existing global table. +type ReplicaUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a replica on an existing global table. + Create *CreateReplicaAction `type:"structure"` + + // The name of the existing replica to be removed. + Delete *DeleteReplicaAction `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *ReplicaUpdate) SetCreate(v *CreateReplicaAction) *ReplicaUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *ReplicaUpdate) SetDelete(v *DeleteReplicaAction) *ReplicaUpdate { + s.Delete = v + return s +} + +// Represents one of the following: +// +// - A new replica to be added to an existing regional table or global table. +// This request invokes the CreateTableReplica action in the destination +// Region. +// +// - New parameters for an existing replica. This request invokes the UpdateTable +// action in the destination Region. +// +// - An existing replica to be deleted. The request invokes the DeleteTableReplica +// action in the destination Region, deleting the replica and all if its +// items in the destination Region. +// +// When you manually remove a table or global table replica, you do not automatically +// remove any associated scalable targets, scaling policies, or CloudWatch alarms. +type ReplicationGroupUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a replica for the table. + Create *CreateReplicationGroupMemberAction `type:"structure"` + + // The parameters required for deleting a replica for the table. + Delete *DeleteReplicationGroupMemberAction `type:"structure"` + + // The parameters required for updating a replica for the table. + Update *UpdateReplicationGroupMemberAction `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationGroupUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationGroupUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationGroupUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationGroupUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *ReplicationGroupUpdate) SetCreate(v *CreateReplicationGroupMemberAction) *ReplicationGroupUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *ReplicationGroupUpdate) SetDelete(v *DeleteReplicationGroupMemberAction) *ReplicationGroupUpdate { + s.Delete = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *ReplicationGroupUpdate) SetUpdate(v *UpdateReplicationGroupMemberAction) *ReplicationGroupUpdate { + s.Update = v + return s +} + +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +type RequestLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestLimitExceeded) GoString() string { + return s.String() +} + +func newErrorRequestLimitExceeded(v protocol.ResponseMetadata) error { + return &RequestLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RequestLimitExceeded) Code() string { + return "RequestLimitExceeded" +} + +// Message returns the exception's message. +func (s *RequestLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *RequestLimitExceeded) OrigErr() error { + return nil +} + +func (s *RequestLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *RequestLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *RequestLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +type ResourceInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The resource which is being attempted to be changed is in use. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceInUseException) GoString() string { + return s.String() +} + +func newErrorResourceInUseException(v protocol.ResponseMetadata) error { + return &ResourceInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceInUseException) Code() string { + return "ResourceInUseException" +} + +// Message returns the exception's message. +func (s *ResourceInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceInUseException) OrigErr() error { + return nil +} + +func (s *ResourceInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The resource which is being requested does not exist. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains details for the restore. +type RestoreSummary struct { + _ struct{} `type:"structure"` + + // Point in time or source backup time. + // + // RestoreDateTime is a required field + RestoreDateTime *time.Time `type:"timestamp" required:"true"` + + // Indicates if a restore is in progress or not. + // + // RestoreInProgress is a required field + RestoreInProgress *bool `type:"boolean" required:"true"` + + // The Amazon Resource Name (ARN) of the backup from which the table was restored. + SourceBackupArn *string `min:"37" type:"string"` + + // The ARN of the source table of the backup that is being restored. + SourceTableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreSummary) GoString() string { + return s.String() +} + +// SetRestoreDateTime sets the RestoreDateTime field's value. +func (s *RestoreSummary) SetRestoreDateTime(v time.Time) *RestoreSummary { + s.RestoreDateTime = &v + return s +} + +// SetRestoreInProgress sets the RestoreInProgress field's value. +func (s *RestoreSummary) SetRestoreInProgress(v bool) *RestoreSummary { + s.RestoreInProgress = &v + return s +} + +// SetSourceBackupArn sets the SourceBackupArn field's value. +func (s *RestoreSummary) SetSourceBackupArn(v string) *RestoreSummary { + s.SourceBackupArn = &v + return s +} + +// SetSourceTableArn sets the SourceTableArn field's value. +func (s *RestoreSummary) SetSourceTableArn(v string) *RestoreSummary { + s.SourceTableArn = &v + return s +} + +type RestoreTableFromBackupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` + + // The billing mode of the restored table. + BillingModeOverride *string `type:"string" enum:"BillingMode"` + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` + + // The new server-side encryption settings for the restored table. + SSESpecificationOverride *SSESpecification `type:"structure"` + + // The name of the new table to which the backup must be restored. + // + // TargetTableName is a required field + TargetTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreTableFromBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreTableFromBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + if s.TargetTableName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTableName")) + } + if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) + } + if s.GlobalSecondaryIndexOverride != nil { + for i, v := range s.GlobalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexOverride != nil { + for i, v := range s.LocalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *RestoreTableFromBackupInput) SetBackupArn(v string) *RestoreTableFromBackupInput { + s.BackupArn = &v + return s +} + +// SetBillingModeOverride sets the BillingModeOverride field's value. +func (s *RestoreTableFromBackupInput) SetBillingModeOverride(v string) *RestoreTableFromBackupInput { + s.BillingModeOverride = &v + return s +} + +// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. +func (s *RestoreTableFromBackupInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableFromBackupInput { + s.GlobalSecondaryIndexOverride = v + return s +} + +// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. +func (s *RestoreTableFromBackupInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableFromBackupInput { + s.LocalSecondaryIndexOverride = v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *RestoreTableFromBackupInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableFromBackupInput { + s.ProvisionedThroughputOverride = v + return s +} + +// SetSSESpecificationOverride sets the SSESpecificationOverride field's value. +func (s *RestoreTableFromBackupInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableFromBackupInput { + s.SSESpecificationOverride = v + return s +} + +// SetTargetTableName sets the TargetTableName field's value. +func (s *RestoreTableFromBackupInput) SetTargetTableName(v string) *RestoreTableFromBackupInput { + s.TargetTableName = &v + return s +} + +type RestoreTableFromBackupOutput struct { + _ struct{} `type:"structure"` + + // The description of the table created from an existing backup. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *RestoreTableFromBackupOutput) SetTableDescription(v *TableDescription) *RestoreTableFromBackupOutput { + s.TableDescription = v + return s +} + +type RestoreTableToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // The billing mode of the restored table. + BillingModeOverride *string `type:"string" enum:"BillingMode"` + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` + + // Time in the past to restore the table to. + RestoreDateTime *time.Time `type:"timestamp"` + + // The new server-side encryption settings for the restored table. + SSESpecificationOverride *SSESpecification `type:"structure"` + + // The DynamoDB table that will be restored. This value is an Amazon Resource + // Name (ARN). + SourceTableArn *string `type:"string"` + + // Name of the source table that is being restored. + SourceTableName *string `min:"3" type:"string"` + + // The name of the new table to which it must be restored to. + // + // TargetTableName is a required field + TargetTableName *string `min:"3" type:"string" required:"true"` + + // Restore the table to the latest possible time. LatestRestorableDateTime is + // typically 5 minutes before the current time. + UseLatestRestorableTime *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreTableToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"} + if s.SourceTableName != nil && len(*s.SourceTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("SourceTableName", 3)) + } + if s.TargetTableName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTableName")) + } + if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) + } + if s.GlobalSecondaryIndexOverride != nil { + for i, v := range s.GlobalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexOverride != nil { + for i, v := range s.LocalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBillingModeOverride sets the BillingModeOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetBillingModeOverride(v string) *RestoreTableToPointInTimeInput { + s.BillingModeOverride = &v + return s +} + +// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableToPointInTimeInput { + s.GlobalSecondaryIndexOverride = v + return s +} + +// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableToPointInTimeInput { + s.LocalSecondaryIndexOverride = v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableToPointInTimeInput { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRestoreDateTime sets the RestoreDateTime field's value. +func (s *RestoreTableToPointInTimeInput) SetRestoreDateTime(v time.Time) *RestoreTableToPointInTimeInput { + s.RestoreDateTime = &v + return s +} + +// SetSSESpecificationOverride sets the SSESpecificationOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableToPointInTimeInput { + s.SSESpecificationOverride = v + return s +} + +// SetSourceTableArn sets the SourceTableArn field's value. +func (s *RestoreTableToPointInTimeInput) SetSourceTableArn(v string) *RestoreTableToPointInTimeInput { + s.SourceTableArn = &v + return s +} + +// SetSourceTableName sets the SourceTableName field's value. +func (s *RestoreTableToPointInTimeInput) SetSourceTableName(v string) *RestoreTableToPointInTimeInput { + s.SourceTableName = &v + return s +} + +// SetTargetTableName sets the TargetTableName field's value. +func (s *RestoreTableToPointInTimeInput) SetTargetTableName(v string) *RestoreTableToPointInTimeInput { + s.TargetTableName = &v + return s +} + +// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value. +func (s *RestoreTableToPointInTimeInput) SetUseLatestRestorableTime(v bool) *RestoreTableToPointInTimeInput { + s.UseLatestRestorableTime = &v + return s +} + +type RestoreTableToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescription) *RestoreTableToPointInTimeOutput { + s.TableDescription = v + return s +} + +// The S3 bucket that is being imported from. +type S3BucketSource struct { + _ struct{} `type:"structure"` + + // The S3 bucket that is being imported from. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The account number of the S3 bucket that is being imported from. If the bucket + // is owned by the requester this is optional. + S3BucketOwner *string `type:"string"` + + // The key prefix shared by all S3 Objects that are being imported. + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3BucketSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3BucketSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3BucketSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3BucketSource"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *S3BucketSource) SetS3Bucket(v string) *S3BucketSource { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *S3BucketSource) SetS3BucketOwner(v string) *S3BucketSource { + s.S3BucketOwner = &v + return s +} + +// SetS3KeyPrefix sets the S3KeyPrefix field's value. +func (s *S3BucketSource) SetS3KeyPrefix(v string) *S3BucketSource { + s.S3KeyPrefix = &v + return s +} + +// The description of the server-side encryption status on the specified table. +type SSEDescription struct { + _ struct{} `type:"structure"` + + // Indicates the time, in UNIX epoch date format, when DynamoDB detected that + // the table's KMS key was inaccessible. This attribute will automatically be + // cleared when DynamoDB detects that the table's KMS key is accessible again. + // DynamoDB will initiate the table archival process when table's KMS key remains + // inaccessible for more than seven days from this date. + InaccessibleEncryptionDateTime *time.Time `type:"timestamp"` + + // The KMS key ARN used for the KMS encryption. + KMSMasterKeyArn *string `type:"string"` + + // Server-side encryption type. The only supported value is: + // + // * KMS - Server-side encryption that uses Key Management Service. The key + // is stored in your account and is managed by KMS (KMS charges apply). + SSEType *string `type:"string" enum:"SSEType"` + + // Represents the current state of server-side encryption. The only supported + // values are: + // + // * ENABLED - Server-side encryption is enabled. + // + // * UPDATING - Server-side encryption is being updated. + Status *string `type:"string" enum:"SSEStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEDescription) GoString() string { + return s.String() +} + +// SetInaccessibleEncryptionDateTime sets the InaccessibleEncryptionDateTime field's value. +func (s *SSEDescription) SetInaccessibleEncryptionDateTime(v time.Time) *SSEDescription { + s.InaccessibleEncryptionDateTime = &v + return s +} + +// SetKMSMasterKeyArn sets the KMSMasterKeyArn field's value. +func (s *SSEDescription) SetKMSMasterKeyArn(v string) *SSEDescription { + s.KMSMasterKeyArn = &v + return s +} + +// SetSSEType sets the SSEType field's value. +func (s *SSEDescription) SetSSEType(v string) *SSEDescription { + s.SSEType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SSEDescription) SetStatus(v string) *SSEDescription { + s.Status = &v + return s +} + +// Represents the settings used to enable server-side encryption. +type SSESpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether server-side encryption is done using an Amazon Web Services + // managed key or an Amazon Web Services owned key. If enabled (true), server-side + // encryption type is set to KMS and an Amazon Web Services managed key is used + // (KMS charges apply). If disabled (false) or not specified, server-side encryption + // is set to Amazon Web Services owned key. + Enabled *bool `type:"boolean"` + + // The KMS key that should be used for the KMS encryption. To specify a key, + // use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note + // that you should only provide this parameter if the key is different from + // the default DynamoDB key alias/aws/dynamodb. + KMSMasterKeyId *string `type:"string"` + + // Server-side encryption type. The only supported value is: + // + // * KMS - Server-side encryption that uses Key Management Service. The key + // is stored in your account and is managed by KMS (KMS charges apply). + SSEType *string `type:"string" enum:"SSEType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSESpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSESpecification) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *SSESpecification) SetEnabled(v bool) *SSESpecification { + s.Enabled = &v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *SSESpecification) SetKMSMasterKeyId(v string) *SSESpecification { + s.KMSMasterKeyId = &v + return s +} + +// SetSSEType sets the SSEType field's value. +func (s *SSESpecification) SetSSEType(v string) *SSESpecification { + s.SSEType = &v + return s +} + +// Represents the input of a Scan operation. +type ScanInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // A Boolean value that determines the read consistency model during the scan: + // + // * If ConsistentRead is false, then the data returned from Scan might not + // contain the results from other recently completed write operations (PutItem, + // UpdateItem, or DeleteItem). + // + // * If ConsistentRead is true, then all of the write operations that completed + // before the Scan began are guaranteed to be contained in the Scan response. + // + // The default setting for ConsistentRead is false. + // + // The ConsistentRead parameter is not supported on global secondary indexes. + // If you scan a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify + // the same segment whose previous Scan returned the corresponding value of + // LastEvaluatedKey. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Scan operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) + // in the Amazon DynamoDB Developer Guide. + FilterExpression *string `type:"string"` + + // The name of a secondary index to scan. This index can be any local secondary + // index or global secondary index. Note that if you use the IndexName parameter, + // you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the specified + // table or index. These attributes can include scalars, sets, or elements of + // a JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html) + // in the Amazon DynamoDB Developer Guide. + ScanFilter map[string]*Condition `type:"map"` + + // For a parallel Scan request, Segment identifies an individual segment to + // be scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, + // if you want to use four application threads to scan a table or an index, + // then the first thread specifies a Segment value of 0, the second thread specifies + // 1, and so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must + // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than the + // value provided for TotalSegments. + // + // If you provide Segment, you must also provide TotalSegments. + Segment *int64 `type:"integer"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. + // + // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent + // to specifying ALL_ATTRIBUTES. + // + // * COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. + // This return value is equivalent to specifying ProjectionExpression without + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation reads only the index and not the table. If any of the requested + // attributes are not projected into the local secondary index, DynamoDB + // fetches each of these attributes from the parent table. This extra fetching + // incurs additional throughput cost and latency. If you query or scan a + // global secondary index, you can only request attributes that are projected + // into the index. Global secondary index queries cannot fetch attributes + // from the parent table. + // + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying ProjectionExpression without any + // value for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items; or, if you provide + // IndexName, the name of the table to which that index belongs. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // For a parallel Scan request, TotalSegments represents the total number of + // segments into which the Scan operation will be divided. The value of TotalSegments + // corresponds to the number of application workers that will perform the parallel + // scan. For example, if you want to use four application threads to scan a + // table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less + // than or equal to 1000000. If you specify a TotalSegments value of 1, the + // Scan operation will be sequential rather than parallel. + // + // If you specify TotalSegments, you must also specify Segment. + TotalSegments *int64 `min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScanInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.TotalSegments != nil && *s.TotalSegments < 1 { + invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) + } + if s.ScanFilter != nil { + for i, v := range s.ScanFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *ScanInput) SetAttributesToGet(v []*string) *ScanInput { + s.AttributesToGet = v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *ScanInput) SetConditionalOperator(v string) *ScanInput { + s.ConditionalOperator = &v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *ScanInput) SetConsistentRead(v bool) *ScanInput { + s.ConsistentRead = &v + return s +} + +// SetExclusiveStartKey sets the ExclusiveStartKey field's value. +func (s *ScanInput) SetExclusiveStartKey(v map[string]*AttributeValue) *ScanInput { + s.ExclusiveStartKey = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *ScanInput) SetExpressionAttributeNames(v map[string]*string) *ScanInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *ScanInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *ScanInput { + s.ExpressionAttributeValues = v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *ScanInput) SetFilterExpression(v string) *ScanInput { + s.FilterExpression = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *ScanInput) SetIndexName(v string) *ScanInput { + s.IndexName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ScanInput) SetLimit(v int64) *ScanInput { + s.Limit = &v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *ScanInput) SetProjectionExpression(v string) *ScanInput { + s.ProjectionExpression = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *ScanInput) SetReturnConsumedCapacity(v string) *ScanInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetScanFilter sets the ScanFilter field's value. +func (s *ScanInput) SetScanFilter(v map[string]*Condition) *ScanInput { + s.ScanFilter = v + return s +} + +// SetSegment sets the Segment field's value. +func (s *ScanInput) SetSegment(v int64) *ScanInput { + s.Segment = &v + return s +} + +// SetSelect sets the Select field's value. +func (s *ScanInput) SetSelect(v string) *ScanInput { + s.Select = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ScanInput) SetTableName(v string) *ScanInput { + s.TableName = &v + return s +} + +// SetTotalSegments sets the TotalSegments field's value. +func (s *ScanInput) SetTotalSegments(v int64) *ScanInput { + s.TotalSegments = &v + return s +} + +// Represents the output of a Scan operation. +type ScanOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the Scan operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items returned + // after the filter was applied, and ScannedCount is the number of matching + // items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as ScannedCount. + Count *int64 `type:"integer"` + + // An array of item attributes that match the scan criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount + // value with few, or no, Count results indicates an inefficient Scan operation. + // For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *ScanOutput) SetConsumedCapacity(v *ConsumedCapacity) *ScanOutput { + s.ConsumedCapacity = v + return s +} + +// SetCount sets the Count field's value. +func (s *ScanOutput) SetCount(v int64) *ScanOutput { + s.Count = &v + return s +} + +// SetItems sets the Items field's value. +func (s *ScanOutput) SetItems(v []map[string]*AttributeValue) *ScanOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *ScanOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ScanOutput { + s.LastEvaluatedKey = v + return s +} + +// SetScannedCount sets the ScannedCount field's value. +func (s *ScanOutput) SetScannedCount(v int64) *ScanOutput { + s.ScannedCount = &v + return s +} + +// Contains the details of the table when the backup was created. +type SourceTableDetails struct { + _ struct{} `type:"structure"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend + // using PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. + // We recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // Number of items in the table. Note that this is an approximate value. + ItemCount *int64 `type:"long"` + + // Schema of the table. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Read IOPs and Write IOPS on the table when the backup was created. + // + // ProvisionedThroughput is a required field + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + + // ARN of the table for which backup was created. + TableArn *string `type:"string"` + + // Time when the source table was created. + // + // TableCreationDateTime is a required field + TableCreationDateTime *time.Time `type:"timestamp" required:"true"` + + // Unique identifier for the table for which the backup was created. + // + // TableId is a required field + TableId *string `type:"string" required:"true"` + + // The name of the table for which the backup was created. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // Size of the table in bytes. Note that this is an approximate value. + TableSizeBytes *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableDetails) GoString() string { + return s.String() +} + +// SetBillingMode sets the BillingMode field's value. +func (s *SourceTableDetails) SetBillingMode(v string) *SourceTableDetails { + s.BillingMode = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *SourceTableDetails) SetItemCount(v int64) *SourceTableDetails { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDetails { + s.KeySchema = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails { + s.ProvisionedThroughput = v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *SourceTableDetails) SetTableArn(v string) *SourceTableDetails { + s.TableArn = &v + return s +} + +// SetTableCreationDateTime sets the TableCreationDateTime field's value. +func (s *SourceTableDetails) SetTableCreationDateTime(v time.Time) *SourceTableDetails { + s.TableCreationDateTime = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *SourceTableDetails) SetTableId(v string) *SourceTableDetails { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *SourceTableDetails) SetTableName(v string) *SourceTableDetails { + s.TableName = &v + return s +} + +// SetTableSizeBytes sets the TableSizeBytes field's value. +func (s *SourceTableDetails) SetTableSizeBytes(v int64) *SourceTableDetails { + s.TableSizeBytes = &v + return s +} + +// Contains the details of the features enabled on the table when the backup +// was created. For example, LSIs, GSIs, streams, TTL. +type SourceTableFeatureDetails struct { + _ struct{} `type:"structure"` + + // Represents the GSI properties for the table when the backup was created. + // It includes the IndexName, KeySchema, Projection, and ProvisionedThroughput + // for the GSIs on the table at the time of backup. + GlobalSecondaryIndexes []*GlobalSecondaryIndexInfo `type:"list"` + + // Represents the LSI properties for the table when the backup was created. + // It includes the IndexName, KeySchema and Projection for the LSIs on the table + // at the time of backup. + LocalSecondaryIndexes []*LocalSecondaryIndexInfo `type:"list"` + + // The description of the server-side encryption status on the table when the + // backup was created. + SSEDescription *SSEDescription `type:"structure"` + + // Stream settings on the table when the backup was created. + StreamDescription *StreamSpecification `type:"structure"` + + // Time to Live settings on the table when the backup was created. + TimeToLiveDescription *TimeToLiveDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableFeatureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableFeatureDetails) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *SourceTableFeatureDetails) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexInfo) *SourceTableFeatureDetails { + s.GlobalSecondaryIndexes = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *SourceTableFeatureDetails) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexInfo) *SourceTableFeatureDetails { + s.LocalSecondaryIndexes = v + return s +} + +// SetSSEDescription sets the SSEDescription field's value. +func (s *SourceTableFeatureDetails) SetSSEDescription(v *SSEDescription) *SourceTableFeatureDetails { + s.SSEDescription = v + return s +} + +// SetStreamDescription sets the StreamDescription field's value. +func (s *SourceTableFeatureDetails) SetStreamDescription(v *StreamSpecification) *SourceTableFeatureDetails { + s.StreamDescription = v + return s +} + +// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. +func (s *SourceTableFeatureDetails) SetTimeToLiveDescription(v *TimeToLiveDescription) *SourceTableFeatureDetails { + s.TimeToLiveDescription = v + return s +} + +// Represents the DynamoDB Streams configuration for a table in DynamoDB. +type StreamSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) + // on the table. + // + // StreamEnabled is a required field + StreamEnabled *bool `type:"boolean" required:"true"` + + // When an item in the table is modified, StreamViewType determines what information + // is written to the stream for this table. Valid values for StreamViewType + // are: + // + // * KEYS_ONLY - Only the key attributes of the modified item are written + // to the stream. + // + // * NEW_IMAGE - The entire item, as it appears after it was modified, is + // written to the stream. + // + // * OLD_IMAGE - The entire item, as it appeared before it was modified, + // is written to the stream. + // + // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StreamSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StreamSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamSpecification"} + if s.StreamEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("StreamEnabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamEnabled sets the StreamEnabled field's value. +func (s *StreamSpecification) SetStreamEnabled(v bool) *StreamSpecification { + s.StreamEnabled = &v + return s +} + +// SetStreamViewType sets the StreamViewType field's value. +func (s *StreamSpecification) SetStreamViewType(v string) *StreamSpecification { + s.StreamViewType = &v + return s +} + +// A target table with the specified name already exists. +type TableAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorTableAlreadyExistsException(v protocol.ResponseMetadata) error { + return &TableAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TableAlreadyExistsException) Code() string { + return "TableAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *TableAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TableAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *TableAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TableAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TableAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the auto scaling configuration for a global table. +type TableAutoScalingDescription struct { + _ struct{} `type:"structure"` + + // Represents replicas of the global table. + Replicas []*ReplicaAutoScalingDescription `type:"list"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The current state of the table: + // + // * CREATING - The table is being created. + // + // * UPDATING - The table is being updated. + // + // * DELETING - The table is being deleted. + // + // * ACTIVE - The table is ready for use. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAutoScalingDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAutoScalingDescription) GoString() string { + return s.String() +} + +// SetReplicas sets the Replicas field's value. +func (s *TableAutoScalingDescription) SetReplicas(v []*ReplicaAutoScalingDescription) *TableAutoScalingDescription { + s.Replicas = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableAutoScalingDescription) SetTableName(v string) *TableAutoScalingDescription { + s.TableName = &v + return s +} + +// SetTableStatus sets the TableStatus field's value. +func (s *TableAutoScalingDescription) SetTableStatus(v string) *TableAutoScalingDescription { + s.TableStatus = &v + return s +} + +// Contains details of the table class. +type TableClassSummary struct { + _ struct{} `type:"structure"` + + // The date and time at which the table class was last updated. + LastUpdateDateTime *time.Time `type:"timestamp"` + + // The table class of the specified table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + TableClass *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableClassSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableClassSummary) GoString() string { + return s.String() +} + +// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. +func (s *TableClassSummary) SetLastUpdateDateTime(v time.Time) *TableClassSummary { + s.LastUpdateDateTime = &v + return s +} + +// SetTableClass sets the TableClass field's value. +func (s *TableClassSummary) SetTableClass(v string) *TableClassSummary { + s.TableClass = &v + return s +} + +// The parameters for the table created as part of the import operation. +type TableCreationParameters struct { + _ struct{} `type:"structure"` + + // The attributes of the table created as part of the import operation. + // + // AttributeDefinitions is a required field + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // The billing mode for provisioning the table created as part of the import + // operation. + BillingMode *string `type:"string" enum:"BillingMode"` + + // The Global Secondary Indexes (GSI) of the table to be created as part of + // the import operation. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // The primary key and option sort key of the table created as part of the import + // operation. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the settings used to enable server-side encryption. + SSESpecification *SSESpecification `type:"structure"` + + // The name of the table created as part of the import operation. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableCreationParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableCreationParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableCreationParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableCreationParameters"} + if s.AttributeDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *TableCreationParameters) SetAttributeDefinitions(v []*AttributeDefinition) *TableCreationParameters { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *TableCreationParameters) SetBillingMode(v string) *TableCreationParameters { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *TableCreationParameters) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *TableCreationParameters { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *TableCreationParameters) SetKeySchema(v []*KeySchemaElement) *TableCreationParameters { + s.KeySchema = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *TableCreationParameters) SetProvisionedThroughput(v *ProvisionedThroughput) *TableCreationParameters { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *TableCreationParameters) SetSSESpecification(v *SSESpecification) *TableCreationParameters { + s.SSESpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableCreationParameters) SetTableName(v string) *TableCreationParameters { + s.TableName = &v + return s +} + +// Represents the properties of a table. +type TableDescription struct { + _ struct{} `type:"structure"` + + // Contains information about the table archive. + ArchivalSummary *ArchivalSummary `type:"structure"` + + // An array of AttributeDefinition objects. Each of these objects describes + // one attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // + // * AttributeName - The name of the attribute. + // + // * AttributeType - The data type for the attribute. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // Contains the details for the read/write capacity mode. + BillingModeSummary *BillingModeSummary `type:"structure"` + + // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) + // format. + CreationDateTime *time.Time `type:"timestamp"` + + // The global secondary indexes, if any, on the table. Each index is scoped + // to a given partition key value. Each element is composed of: + // + // * Backfilling - If true, then the index is currently in the backfilling + // phase. Backfilling occurs only when a new global secondary index is added + // to the table. It is the process by which DynamoDB populates the new index + // with data from the table. (This attribute does not appear for indexes + // that were created during a CreateTable operation.) You can delete an index + // that is being created during the Backfilling phase when IndexStatus is + // set to CREATING and Backfilling is true. You can't delete the index that + // is being created when IndexStatus is set to CREATING and Backfilling is + // false. (This attribute does not appear for indexes that were created during + // a CreateTable operation.) + // + // * IndexName - The name of the global secondary index. + // + // * IndexSizeBytes - The total size of the global secondary index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. + // + // * IndexStatus - The current status of the global secondary index: CREATING + // - The index is being created. UPDATING - The index is being updated. DELETING + // - The index is being deleted. ACTIVE - The index is ready for use. + // + // * ItemCount - The number of items in the global secondary index. DynamoDB + // updates this value approximately every six hours. Recent changes might + // not be reflected in this value. + // + // * KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The + // key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - In addition to the attributes described in KEYS_ONLY, the secondary + // index will include other non-key attributes that you specify. ALL - All + // of the table attributes are projected into the index. NonKeyAttributes + // - A list of one or more non-key attribute names that are projected into + // the secondary index. The total count of attributes provided in NonKeyAttributes, + // summed across all of the secondary indexes, must not exceed 100. If you + // project the same attribute into two different indexes, this counts as + // two distinct attributes when determining the total. + // + // * ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units, along + // with data about increases and decreases. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` + + // Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) + // in use, if the table is replicated across Amazon Web Services Regions. + GlobalTableVersion *string `type:"string"` + + // The number of items in the specified table. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The primary key structure for the table. Each KeySchemaElement consists of: + // + // * AttributeName - The name of the attribute. + // + // * KeyType - The role of the attribute: HASH - partition key RANGE - sort + // key The partition key of an item is also known as its hash attribute. + // The term "hash attribute" derives from DynamoDB's usage of an internal + // hash function to evenly distribute data items across partitions, based + // on their partition key values. The sort key of an item is also known as + // its range attribute. The term "range attribute" derives from the way DynamoDB + // stores items with the same partition key physically close together, in + // sorted order by the sort key value. + // + // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) that uniquely identifies the latest stream + // for this table. + LatestStreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // * Amazon Web Services customer ID + // + // * Table name + // + // * StreamLabel + LatestStreamLabel *string `type:"string"` + + // Represents one or more local secondary indexes on the table. Each index is + // scoped to a given partition key value. Tables with one or more local secondary + // indexes are subject to an item collection size limit, where the amount of + // data within a given item collection cannot exceed 10 GB. Each element is + // composed of: + // + // * IndexName - The name of the local secondary index. + // + // * KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The + // key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + // + // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB + // updates this value approximately every six hours. Recent changes might + // not be reflected in this value. + // + // * ItemCount - Represents the number of items in the index. DynamoDB updates + // this value approximately every six hours. Recent changes might not be + // reflected in this value. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + + // The provisioned throughput settings for the table, consisting of read and + // write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` + + // Represents replicas of the table. + Replicas []*ReplicaDescription `type:"list"` + + // Contains details for the restore. + RestoreSummary *RestoreSummary `type:"structure"` + + // The description of the server-side encryption status on the specified table. + SSEDescription *SSEDescription `type:"structure"` + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *StreamSpecification `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the table. + TableArn *string `type:"string"` + + // Contains details of the table class. + TableClassSummary *TableClassSummary `type:"structure"` + + // Unique identifier for the table for which the backup was created. + TableId *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The total size of the specified table, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + TableSizeBytes *int64 `type:"long"` + + // The current state of the table: + // + // * CREATING - The table is being created. + // + // * UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING. + // + // * DELETING - The table is being deleted. + // + // * ACTIVE - The table is ready for use. + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the + // table in inaccessible. Table operations may fail due to failure to use + // the KMS key. DynamoDB will initiate the table archival process when a + // table's KMS key remains inaccessible for more than seven days. + // + // * ARCHIVING - The table is being archived. Operations are not allowed + // until archival is complete. + // + // * ARCHIVED - The table has been archived. See the ArchivalReason for more + // information. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableDescription) GoString() string { + return s.String() +} + +// SetArchivalSummary sets the ArchivalSummary field's value. +func (s *TableDescription) SetArchivalSummary(v *ArchivalSummary) *TableDescription { + s.ArchivalSummary = v + return s +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *TableDescription) SetAttributeDefinitions(v []*AttributeDefinition) *TableDescription { + s.AttributeDefinitions = v + return s +} + +// SetBillingModeSummary sets the BillingModeSummary field's value. +func (s *TableDescription) SetBillingModeSummary(v *BillingModeSummary) *TableDescription { + s.BillingModeSummary = v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *TableDescription) SetCreationDateTime(v time.Time) *TableDescription { + s.CreationDateTime = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *TableDescription) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexDescription) *TableDescription { + s.GlobalSecondaryIndexes = v + return s +} + +// SetGlobalTableVersion sets the GlobalTableVersion field's value. +func (s *TableDescription) SetGlobalTableVersion(v string) *TableDescription { + s.GlobalTableVersion = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *TableDescription) SetItemCount(v int64) *TableDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *TableDescription) SetKeySchema(v []*KeySchemaElement) *TableDescription { + s.KeySchema = v + return s +} + +// SetLatestStreamArn sets the LatestStreamArn field's value. +func (s *TableDescription) SetLatestStreamArn(v string) *TableDescription { + s.LatestStreamArn = &v + return s +} + +// SetLatestStreamLabel sets the LatestStreamLabel field's value. +func (s *TableDescription) SetLatestStreamLabel(v string) *TableDescription { + s.LatestStreamLabel = &v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDescription) *TableDescription { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription { + s.ProvisionedThroughput = v + return s +} + +// SetReplicas sets the Replicas field's value. +func (s *TableDescription) SetReplicas(v []*ReplicaDescription) *TableDescription { + s.Replicas = v + return s +} + +// SetRestoreSummary sets the RestoreSummary field's value. +func (s *TableDescription) SetRestoreSummary(v *RestoreSummary) *TableDescription { + s.RestoreSummary = v + return s +} + +// SetSSEDescription sets the SSEDescription field's value. +func (s *TableDescription) SetSSEDescription(v *SSEDescription) *TableDescription { + s.SSEDescription = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *TableDescription) SetStreamSpecification(v *StreamSpecification) *TableDescription { + s.StreamSpecification = v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *TableDescription) SetTableArn(v string) *TableDescription { + s.TableArn = &v + return s +} + +// SetTableClassSummary sets the TableClassSummary field's value. +func (s *TableDescription) SetTableClassSummary(v *TableClassSummary) *TableDescription { + s.TableClassSummary = v + return s +} + +// SetTableId sets the TableId field's value. +func (s *TableDescription) SetTableId(v string) *TableDescription { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableDescription) SetTableName(v string) *TableDescription { + s.TableName = &v + return s +} + +// SetTableSizeBytes sets the TableSizeBytes field's value. +func (s *TableDescription) SetTableSizeBytes(v int64) *TableDescription { + s.TableSizeBytes = &v + return s +} + +// SetTableStatus sets the TableStatus field's value. +func (s *TableDescription) SetTableStatus(v string) *TableDescription { + s.TableStatus = &v + return s +} + +// A target table with the specified name is either being created or deleted. +type TableInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableInUseException) GoString() string { + return s.String() +} + +func newErrorTableInUseException(v protocol.ResponseMetadata) error { + return &TableInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TableInUseException) Code() string { + return "TableInUseException" +} + +// Message returns the exception's message. +func (s *TableInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TableInUseException) OrigErr() error { + return nil +} + +func (s *TableInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TableInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TableInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +type TableNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableNotFoundException) GoString() string { + return s.String() +} + +func newErrorTableNotFoundException(v protocol.ResponseMetadata) error { + return &TableNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TableNotFoundException) Code() string { + return "TableNotFoundException" +} + +// Message returns the exception's message. +func (s *TableNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TableNotFoundException) OrigErr() error { + return nil +} + +func (s *TableNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TableNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TableNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to +// a single DynamoDB table. +// +// Amazon Web Services-assigned tag names and values are automatically assigned +// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned +// tag names do not count towards the tag limit of 50. User-assigned tag names +// have the prefix user: in the Cost Allocation Report. You cannot backdate +// the application of a tag. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can + // only have up to one tag with the same key. If you try to add an existing + // tag (same key), the existing tag value will be updated to the new value. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. Tag values are case-sensitive and can be null. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // Identifies the Amazon DynamoDB resource to which tags should be added. This + // value is an Amazon Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // The tags to be assigned to the Amazon DynamoDB resource. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The description of the Time to Live (TTL) status on the specified table. +type TimeToLiveDescription struct { + _ struct{} `type:"structure"` + + // The name of the TTL attribute for items in the table. + AttributeName *string `min:"1" type:"string"` + + // The TTL status for the table. + TimeToLiveStatus *string `type:"string" enum:"TimeToLiveStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveDescription) GoString() string { + return s.String() +} + +// SetAttributeName sets the AttributeName field's value. +func (s *TimeToLiveDescription) SetAttributeName(v string) *TimeToLiveDescription { + s.AttributeName = &v + return s +} + +// SetTimeToLiveStatus sets the TimeToLiveStatus field's value. +func (s *TimeToLiveDescription) SetTimeToLiveStatus(v string) *TimeToLiveDescription { + s.TimeToLiveStatus = &v + return s +} + +// Represents the settings used to enable or disable Time to Live (TTL) for +// the specified table. +type TimeToLiveSpecification struct { + _ struct{} `type:"structure"` + + // The name of the TTL attribute used to store the expiration time for items + // in the table. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // Indicates whether TTL is to be enabled (true) or disabled (false) on the + // table. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimeToLiveSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimeToLiveSpecification"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *TimeToLiveSpecification) SetAttributeName(v string) *TimeToLiveSpecification { + s.AttributeName = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *TimeToLiveSpecification) SetEnabled(v bool) *TimeToLiveSpecification { + s.Enabled = &v + return s +} + +// Specifies an item to be retrieved as part of the transaction. +type TransactGetItem struct { + _ struct{} `type:"structure"` + + // Contains the primary key that identifies the item to get, together with the + // name of the table that contains the item, and optionally the specific attributes + // of the item to retrieve. + // + // Get is a required field + Get *Get `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactGetItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactGetItem"} + if s.Get == nil { + invalidParams.Add(request.NewErrParamRequired("Get")) + } + if s.Get != nil { + if err := s.Get.Validate(); err != nil { + invalidParams.AddNested("Get", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGet sets the Get field's value. +func (s *TransactGetItem) SetGet(v *Get) *TransactGetItem { + s.Get = v + return s +} + +type TransactGetItemsInput struct { + _ struct{} `type:"structure"` + + // A value of TOTAL causes consumed capacity information to be returned, and + // a value of NONE prevents that information from being returned. No other value + // is valid. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // An ordered array of up to 100 TransactGetItem objects, each of which contains + // a Get structure. + // + // TransactItems is a required field + TransactItems []*TransactGetItem `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactGetItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactGetItemsInput"} + if s.TransactItems == nil { + invalidParams.Add(request.NewErrParamRequired("TransactItems")) + } + if s.TransactItems != nil && len(s.TransactItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) + } + if s.TransactItems != nil { + for i, v := range s.TransactItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *TransactGetItemsInput) SetReturnConsumedCapacity(v string) *TransactGetItemsInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTransactItems sets the TransactItems field's value. +func (s *TransactGetItemsInput) SetTransactItems(v []*TransactGetItem) *TransactGetItemsInput { + s.TransactItems = v + return s +} + +type TransactGetItemsOutput struct { + _ struct{} `type:"structure"` + + // If the ReturnConsumedCapacity value was TOTAL, this is an array of ConsumedCapacity + // objects, one for each table addressed by TransactGetItem objects in the TransactItems + // parameter. These ConsumedCapacity objects report the read-capacity units + // consumed by the TransactGetItems call in that table. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // An ordered array of up to 100 ItemResponse objects, each of which corresponds + // to the TransactGetItem object in the same position in the TransactItems array. + // Each ItemResponse object contains a Map of the name-value pairs that are + // the projected attributes of the requested item. + // + // If a requested item could not be retrieved, the corresponding ItemResponse + // object is Null, or if the requested item has no projected attributes, the + // corresponding ItemResponse object is an empty Map. + Responses []*ItemResponse `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *TransactGetItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactGetItemsOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *TransactGetItemsOutput) SetResponses(v []*ItemResponse) *TransactGetItemsOutput { + s.Responses = v + return s +} + +// A list of requests that can perform update, put, delete, or check operations +// on multiple items in one or more tables atomically. +type TransactWriteItem struct { + _ struct{} `type:"structure"` + + // A request to perform a check item operation. + ConditionCheck *ConditionCheck `type:"structure"` + + // A request to perform a DeleteItem operation. + Delete *Delete `type:"structure"` + + // A request to perform a PutItem operation. + Put *Put `type:"structure"` + + // A request to perform an UpdateItem operation. + Update *Update `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactWriteItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactWriteItem"} + if s.ConditionCheck != nil { + if err := s.ConditionCheck.Validate(); err != nil { + invalidParams.AddNested("ConditionCheck", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Put != nil { + if err := s.Put.Validate(); err != nil { + invalidParams.AddNested("Put", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionCheck sets the ConditionCheck field's value. +func (s *TransactWriteItem) SetConditionCheck(v *ConditionCheck) *TransactWriteItem { + s.ConditionCheck = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *TransactWriteItem) SetDelete(v *Delete) *TransactWriteItem { + s.Delete = v + return s +} + +// SetPut sets the Put field's value. +func (s *TransactWriteItem) SetPut(v *Put) *TransactWriteItem { + s.Put = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *TransactWriteItem) SetUpdate(v *Update) *TransactWriteItem { + s.Update = v + return s +} + +type TransactWriteItemsInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, + // meaning that multiple identical calls have the same effect as one single + // call. + // + // Although multiple identical calls using the same client request token produce + // the same result on the server (no side effects), the responses to the calls + // might not be the same. If the ReturnConsumedCapacity> parameter is set, then + // the initial TransactWriteItems call returns the amount of write capacity + // units consumed in making the changes. Subsequent TransactWriteItems calls + // with the same client token return the number of read capacity units consumed + // in reading the item. + // + // A client request token is valid for 10 minutes after the first request that + // uses it is completed. After 10 minutes, any request with the same client + // token is treated as a new request. Do not resubmit the same request with + // the same client token for more than 10 minutes, or the result might not be + // idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections (if any), that were + // modified during the operation and are returned in the response. If set to + // NONE (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // An ordered array of up to 100 TransactWriteItem objects, each of which contains + // a ConditionCheck, Put, Update, or Delete object. These can operate on items + // in different tables, but the tables must reside in the same Amazon Web Services + // account and Region, and no two of them can operate on the same item. + // + // TransactItems is a required field + TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactWriteItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactWriteItemsInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.TransactItems == nil { + invalidParams.Add(request.NewErrParamRequired("TransactItems")) + } + if s.TransactItems != nil && len(s.TransactItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) + } + if s.TransactItems != nil { + for i, v := range s.TransactItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *TransactWriteItemsInput) SetClientRequestToken(v string) *TransactWriteItemsInput { + s.ClientRequestToken = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *TransactWriteItemsInput) SetReturnConsumedCapacity(v string) *TransactWriteItemsInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *TransactWriteItemsInput) SetReturnItemCollectionMetrics(v string) *TransactWriteItemsInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetTransactItems sets the TransactItems field's value. +func (s *TransactWriteItemsInput) SetTransactItems(v []*TransactWriteItem) *TransactWriteItemsInput { + s.TransactItems = v + return s +} + +type TransactWriteItemsOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire TransactWriteItems operation. The + // values of the list are ordered according to the ordering of the TransactItems + // request parameter. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by TransactWriteItems and, for each + // table, information about any item collections that were affected by individual + // UpdateItem, PutItem, or DeleteItem operations. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *TransactWriteItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactWriteItemsOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *TransactWriteItemsOutput { + s.ItemCollectionMetrics = v + return s +} + +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +type TransactionCanceledException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A list of cancellation reasons. + CancellationReasons []*CancellationReason `min:"1" type:"list"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionCanceledException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionCanceledException) GoString() string { + return s.String() +} + +func newErrorTransactionCanceledException(v protocol.ResponseMetadata) error { + return &TransactionCanceledException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TransactionCanceledException) Code() string { + return "TransactionCanceledException" +} + +// Message returns the exception's message. +func (s *TransactionCanceledException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TransactionCanceledException) OrigErr() error { + return nil +} + +func (s *TransactionCanceledException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TransactionCanceledException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TransactionCanceledException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Operation was rejected because there is an ongoing transaction for the item. +type TransactionConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionConflictException) GoString() string { + return s.String() +} + +func newErrorTransactionConflictException(v protocol.ResponseMetadata) error { + return &TransactionConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TransactionConflictException) Code() string { + return "TransactionConflictException" +} + +// Message returns the exception's message. +func (s *TransactionConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TransactionConflictException) OrigErr() error { + return nil +} + +func (s *TransactionConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TransactionConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TransactionConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The transaction with the given request token is already in progress. +type TransactionInProgressException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionInProgressException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionInProgressException) GoString() string { + return s.String() +} + +func newErrorTransactionInProgressException(v protocol.ResponseMetadata) error { + return &TransactionInProgressException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TransactionInProgressException) Code() string { + return "TransactionInProgressException" +} + +// Message returns the exception's message. +func (s *TransactionInProgressException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TransactionInProgressException) OrigErr() error { + return nil +} + +func (s *TransactionInProgressException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TransactionInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TransactionInProgressException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The DynamoDB resource that the tags will be removed from. This value is an + // Amazon Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // A list of tag keys. Existing tags of the resource whose keys are members + // of this list will be removed from the DynamoDB resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// Represents a request to perform an UpdateItem operation. +type Update struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid + // values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table for the UpdateItem request. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new value(s) for them. + // + // UpdateExpression is a required field + UpdateExpression *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Update) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Update) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Update) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Update"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.UpdateExpression == nil { + invalidParams.Add(request.NewErrParamRequired("UpdateExpression")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Update) SetConditionExpression(v string) *Update { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Update) SetExpressionAttributeNames(v map[string]*string) *Update { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Update) SetExpressionAttributeValues(v map[string]*AttributeValue) *Update { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *Update) SetKey(v map[string]*AttributeValue) *Update { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Update) SetReturnValuesOnConditionCheckFailure(v string) *Update { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Update) SetTableName(v string) *Update { + s.TableName = &v + return s +} + +// SetUpdateExpression sets the UpdateExpression field's value. +func (s *Update) SetUpdateExpression(v string) *Update { + s.UpdateExpression = &v + return s +} + +type UpdateContinuousBackupsInput struct { + _ struct{} `type:"structure"` + + // Represents the settings used to enable point in time recovery. + // + // PointInTimeRecoverySpecification is a required field + PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContinuousBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContinuousBackupsInput"} + if s.PointInTimeRecoverySpecification == nil { + invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoverySpecification")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.PointInTimeRecoverySpecification != nil { + if err := s.PointInTimeRecoverySpecification.Validate(); err != nil { + invalidParams.AddNested("PointInTimeRecoverySpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPointInTimeRecoverySpecification sets the PointInTimeRecoverySpecification field's value. +func (s *UpdateContinuousBackupsInput) SetPointInTimeRecoverySpecification(v *PointInTimeRecoverySpecification) *UpdateContinuousBackupsInput { + s.PointInTimeRecoverySpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateContinuousBackupsInput) SetTableName(v string) *UpdateContinuousBackupsInput { + s.TableName = &v + return s +} + +type UpdateContinuousBackupsOutput struct { + _ struct{} `type:"structure"` + + // Represents the continuous backups and point in time recovery settings on + // the table. + ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsOutput) GoString() string { + return s.String() +} + +// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. +func (s *UpdateContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *UpdateContinuousBackupsOutput { + s.ContinuousBackupsDescription = v + return s +} + +type UpdateContributorInsightsInput struct { + _ struct{} `type:"structure"` + + // Represents the contributor insights action. + // + // ContributorInsightsAction is a required field + ContributorInsightsAction *string `type:"string" required:"true" enum:"ContributorInsightsAction"` + + // The global secondary index name, if applicable. + IndexName *string `min:"3" type:"string"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContributorInsightsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContributorInsightsInput"} + if s.ContributorInsightsAction == nil { + invalidParams.Add(request.NewErrParamRequired("ContributorInsightsAction")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContributorInsightsAction sets the ContributorInsightsAction field's value. +func (s *UpdateContributorInsightsInput) SetContributorInsightsAction(v string) *UpdateContributorInsightsInput { + s.ContributorInsightsAction = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *UpdateContributorInsightsInput) SetIndexName(v string) *UpdateContributorInsightsInput { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateContributorInsightsInput) SetTableName(v string) *UpdateContributorInsightsInput { + s.TableName = &v + return s +} + +type UpdateContributorInsightsOutput struct { + _ struct{} `type:"structure"` + + // The status of contributor insights + ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` + + // The name of the global secondary index, if applicable. + IndexName *string `min:"3" type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsOutput) GoString() string { + return s.String() +} + +// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. +func (s *UpdateContributorInsightsOutput) SetContributorInsightsStatus(v string) *UpdateContributorInsightsOutput { + s.ContributorInsightsStatus = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *UpdateContributorInsightsOutput) SetIndexName(v string) *UpdateContributorInsightsOutput { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateContributorInsightsOutput) SetTableName(v string) *UpdateContributorInsightsOutput { + s.TableName = &v + return s +} + +// Represents the new provisioned throughput settings to be applied to a global +// secondary index. +type UpdateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be updated. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProvisionedThroughput is a required field + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedThroughput == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction { + s.ProvisionedThroughput = v + return s +} + +type UpdateGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The global table name. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // A list of Regions that should be added or removed from the global table. + // + // ReplicaUpdates is a required field + ReplicaUpdates []*ReplicaUpdate `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.ReplicaUpdates == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicaUpdates")) + } + if s.ReplicaUpdates != nil { + for i, v := range s.ReplicaUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableInput) SetGlobalTableName(v string) *UpdateGlobalTableInput { + s.GlobalTableName = &v + return s +} + +// SetReplicaUpdates sets the ReplicaUpdates field's value. +func (s *UpdateGlobalTableInput) SetReplicaUpdates(v []*ReplicaUpdate) *UpdateGlobalTableInput { + s.ReplicaUpdates = v + return s +} + +type UpdateGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *UpdateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *UpdateGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +type UpdateGlobalTableSettingsInput struct { + _ struct{} `type:"structure"` + + // The billing mode of the global table. If GlobalTableBillingMode is not specified, + // the global table defaults to PROVISIONED capacity billing mode. + // + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + GlobalTableBillingMode *string `type:"string" enum:"BillingMode"` + + // Represents the settings of a global secondary index for a global table that + // will be modified. + GlobalTableGlobalSecondaryIndexSettingsUpdate []*GlobalTableGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` + + // The name of the global table + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // Auto scaling settings for managing provisioned write capacity for the global + // table. + GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` + + // Represents the settings for a global table in a Region that will be modified. + ReplicaSettingsUpdate []*ReplicaSettingsUpdate `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalTableSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableSettingsInput"} + if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil && len(s.GlobalTableGlobalSecondaryIndexSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableGlobalSecondaryIndexSettingsUpdate", 1)) + } + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.GlobalTableProvisionedWriteCapacityUnits != nil && *s.GlobalTableProvisionedWriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("GlobalTableProvisionedWriteCapacityUnits", 1)) + } + if s.ReplicaSettingsUpdate != nil && len(s.ReplicaSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaSettingsUpdate", 1)) + } + if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil { + for i, v := range s.GlobalTableGlobalSecondaryIndexSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalTableGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + if s.ReplicaSettingsUpdate != nil { + for i, v := range s.ReplicaSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableBillingMode sets the GlobalTableBillingMode field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableBillingMode(v string) *UpdateGlobalTableSettingsInput { + s.GlobalTableBillingMode = &v + return s +} + +// SetGlobalTableGlobalSecondaryIndexSettingsUpdate sets the GlobalTableGlobalSecondaryIndexSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableGlobalSecondaryIndexSettingsUpdate(v []*GlobalTableGlobalSecondaryIndexSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.GlobalTableGlobalSecondaryIndexSettingsUpdate = v + return s +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsInput { + s.GlobalTableName = &v + return s +} + +// SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate sets the GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetGlobalTableProvisionedWriteCapacityUnits sets the GlobalTableProvisionedWriteCapacityUnits field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityUnits(v int64) *UpdateGlobalTableSettingsInput { + s.GlobalTableProvisionedWriteCapacityUnits = &v + return s +} + +// SetReplicaSettingsUpdate sets the ReplicaSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetReplicaSettingsUpdate(v []*ReplicaSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.ReplicaSettingsUpdate = v + return s +} + +type UpdateGlobalTableSettingsOutput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + GlobalTableName *string `min:"3" type:"string"` + + // The Region-specific settings for the global table. + ReplicaSettings []*ReplicaSettingsDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsOutput) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableSettingsOutput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsOutput { + s.GlobalTableName = &v + return s +} + +// SetReplicaSettings sets the ReplicaSettings field's value. +func (s *UpdateGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *UpdateGlobalTableSettingsOutput { + s.ReplicaSettings = v + return s +} + +// Represents the input of an UpdateItem operation. +type UpdateItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use UpdateExpression instead. For more information, + // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html) + // in the Amazon DynamoDB Developer Guide. + AttributeUpdates map[string]*AttributeValueUpdate `type:"map"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see Specifying Conditions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see Specifying Item + // Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appear before + // or after they are updated. For UpdateItem, the valid values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - Returns all of the attributes of the item, as they appeared + // before the UpdateItem operation. + // + // * UPDATED_OLD - Returns only the updated attributes, as they appeared + // before the UpdateItem operation. + // + // * ALL_NEW - Returns all of the attributes of the item, as they appear + // after the UpdateItem operation. + // + // * UPDATED_NEW - Returns only the updated attributes, as they appear after + // the UpdateItem operation. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The values returned are strongly consistent. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table containing the item to update. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new values for them. + // + // The following action values are available for UpdateExpression. + // + // * SET - Adds one or more attributes and values to an item. If any of these + // attributes already exist, they are replaced by the new values. You can + // also use SET to add or subtract from an attribute that is of type Number. + // For example: SET myNum = myNum + :val SET supports the following functions: + // if_not_exists (path, operand) - if the item does not contain an attribute + // at the specified path, then if_not_exists evaluates to operand; otherwise, + // it evaluates to path. You can use this function to avoid overwriting an + // attribute that may already be present in the item. list_append (operand, + // operand) - evaluates to a list with a new element added to it. You can + // append the new element to the start or the end of the list by reversing + // the order of the operands. These function names are case-sensitive. + // + // * REMOVE - Removes one or more attributes from an item. + // + // * ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: If the existing attribute is a number, + // and if Value is also a number, then Value is mathematically added to the + // existing attribute. If Value is a negative number, then it is subtracted + // from the existing attribute. If you use ADD to increment or decrement + // a number value for an item that doesn't exist before the update, DynamoDB + // uses 0 as the initial value. Similarly, if you use ADD for an existing + // item to increment or decrement an attribute value that doesn't exist before + // the update, DynamoDB uses 0 as the initial value. For example, suppose + // that the item you want to update doesn't have an attribute named itemcount, + // but you decide to ADD the number 3 to this attribute anyway. DynamoDB + // will create the itemcount attribute, set its initial value to 0, and finally + // add 3 to it. The result will be a new itemcount attribute in the item, + // with a value of 3. If the existing data type is a set and if Value is + // also a set, then Value is added to the existing set. For example, if the + // attribute value is the set [1,2], and the ADD action specified [3], then + // the final attribute value is [1,2,3]. An error occurs if an ADD action + // is specified for a set attribute and the attribute type specified does + // not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The ADD action only supports + // Number and set data types. In addition, ADD can only be used on top-level + // attributes, not nested attributes. + // + // * DELETE - Deletes an element from a set. If a set of values is specified, + // then those values are subtracted from the old set. For example, if the + // attribute value was the set [a,b,c] and the DELETE action specifies [a,c], + // then the final attribute value is [b]. Specifying an empty set is an error. + // The DELETE action only supports set data types. In addition, DELETE can + // only be used on top-level attributes, not nested attributes. + // + // You can have many actions in a single expression, such as the following: + // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see Modifying Items and Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) + // in the Amazon DynamoDB Developer Guide. + UpdateExpression *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeUpdates sets the AttributeUpdates field's value. +func (s *UpdateItemInput) SetAttributeUpdates(v map[string]*AttributeValueUpdate) *UpdateItemInput { + s.AttributeUpdates = v + return s +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *UpdateItemInput) SetConditionExpression(v string) *UpdateItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *UpdateItemInput) SetConditionalOperator(v string) *UpdateItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *UpdateItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *UpdateItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *UpdateItemInput) SetExpressionAttributeNames(v map[string]*string) *UpdateItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *UpdateItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *UpdateItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *UpdateItemInput) SetKey(v map[string]*AttributeValue) *UpdateItemInput { + s.Key = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *UpdateItemInput) SetReturnConsumedCapacity(v string) *UpdateItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *UpdateItemInput) SetReturnItemCollectionMetrics(v string) *UpdateItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *UpdateItemInput) SetReturnValues(v string) *UpdateItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateItemInput) SetTableName(v string) *UpdateItemInput { + s.TableName = &v + return s +} + +// SetUpdateExpression sets the UpdateExpression field's value. +func (s *UpdateItemInput) SetUpdateExpression(v string) *UpdateItemInput { + s.UpdateExpression = &v + return s +} + +// Represents the output of an UpdateItem operation. +type UpdateItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute values as they appear before or after the UpdateItem operation, + // as determined by the ReturnValues parameter. + // + // The Attributes map is only present if ReturnValues was specified as something + // other than NONE in the request. Each element represents one attribute. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the UpdateItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics + // for the table and any indexes involved in the operation. ConsumedCapacity + // is only returned if the ReturnConsumedCapacity parameter was specified. For + // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the UpdateItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *UpdateItemOutput) SetAttributes(v map[string]*AttributeValue) *UpdateItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *UpdateItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *UpdateItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *UpdateItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *UpdateItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a replica to be modified. +type UpdateReplicationGroupMemberAction struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` + + // The KMS key of the replica that should be used for KMS encryption. To specify + // a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. + // Note that you should only provide this parameter if the key is different + // from the default DynamoDB KMS key alias/aws/dynamodb. + KMSMasterKeyId *string `type:"string"` + + // Replica-specific provisioned throughput. If not specified, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` + + // The Region where the replica exists. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Replica-specific table class. If not specified, uses the source table's table + // class. + TableClassOverride *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateReplicationGroupMemberAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateReplicationGroupMemberAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateReplicationGroupMemberAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateReplicationGroupMemberAction"} + if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1)) + } + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *UpdateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *UpdateReplicationGroupMemberAction { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *UpdateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *UpdateReplicationGroupMemberAction { + s.KMSMasterKeyId = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *UpdateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *UpdateReplicationGroupMemberAction { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *UpdateReplicationGroupMemberAction) SetRegionName(v string) *UpdateReplicationGroupMemberAction { + s.RegionName = &v + return s +} + +// SetTableClassOverride sets the TableClassOverride field's value. +func (s *UpdateReplicationGroupMemberAction) SetTableClassOverride(v string) *UpdateReplicationGroupMemberAction { + s.TableClassOverride = &v + return s +} + +// Represents the input of an UpdateTable operation. +type UpdateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // If you are adding a new global secondary index to the table, AttributeDefinitions + // must include the key element(s) of the new index. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. When switching from pay-per-request to provisioned capacity, initial + // provisioned capacity values must be set. The initial provisioned capacity + // values are estimated based on the consumed read and write capacity of your + // table and global secondary indexes over the past 30 minutes. + // + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + BillingMode *string `type:"string" enum:"BillingMode"` + + // An array of one or more global secondary indexes for the table. For each + // index in the array, you can request one action: + // + // * Create - add a new global secondary index to the table. + // + // * Update - modify the provisioned throughput settings of an existing global + // secondary index. + // + // * Delete - remove a global secondary index from the table. + // + // You can create or delete only one global secondary index per UpdateTable + // operation. + // + // For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) + // in the Amazon DynamoDB Developer Guide. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + + // The new provisioned throughput settings for the specified table or index. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // A list of replica update actions (create, delete, or update) for the table. + // + // This property only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) + // of global tables. + ReplicaUpdates []*ReplicationGroupUpdate `min:"1" type:"list"` + + // The new server-side encryption settings for the specified table. + SSESpecification *SSESpecification `type:"structure"` + + // Represents the DynamoDB Streams configuration for the table. + // + // You receive a ResourceInUseException if you try to enable a stream on a table + // that already has a stream, or if you try to disable a stream on a table that + // doesn't have a stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The table class of the table to be updated. Valid values are STANDARD and + // STANDARD_INFREQUENT_ACCESS. + TableClass *string `type:"string" enum:"TableClass"` + + // The name of the table to be updated. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"} + if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexUpdates != nil { + for i, v := range s.GlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + if s.ReplicaUpdates != nil { + for i, v := range s.ReplicaUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.StreamSpecification != nil { + if err := s.StreamSpecification.Validate(); err != nil { + invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *UpdateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *UpdateTableInput { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *UpdateTableInput) SetBillingMode(v string) *UpdateTableInput { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. +func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexUpdate) *UpdateTableInput { + s.GlobalSecondaryIndexUpdates = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput { + s.ProvisionedThroughput = v + return s +} + +// SetReplicaUpdates sets the ReplicaUpdates field's value. +func (s *UpdateTableInput) SetReplicaUpdates(v []*ReplicationGroupUpdate) *UpdateTableInput { + s.ReplicaUpdates = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *UpdateTableInput) SetSSESpecification(v *SSESpecification) *UpdateTableInput { + s.SSESpecification = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *UpdateTableInput) SetStreamSpecification(v *StreamSpecification) *UpdateTableInput { + s.StreamSpecification = v + return s +} + +// SetTableClass sets the TableClass field's value. +func (s *UpdateTableInput) SetTableClass(v string) *UpdateTableInput { + s.TableClass = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTableInput) SetTableName(v string) *UpdateTableInput { + s.TableName = &v + return s +} + +// Represents the output of an UpdateTable operation. +type UpdateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *UpdateTableOutput) SetTableDescription(v *TableDescription) *UpdateTableOutput { + s.TableDescription = v + return s +} + +type UpdateTableReplicaAutoScalingInput struct { + _ struct{} `type:"structure"` + + // Represents the auto scaling settings of the global secondary indexes of the + // replica to be updated. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexAutoScalingUpdate `min:"1" type:"list"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // Represents the auto scaling settings of replicas of the table that will be + // modified. + ReplicaUpdates []*ReplicaAutoScalingUpdate `min:"1" type:"list"` + + // The name of the global table to be updated. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTableReplicaAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTableReplicaAutoScalingInput"} + if s.GlobalSecondaryIndexUpdates != nil && len(s.GlobalSecondaryIndexUpdates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexUpdates", 1)) + } + if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.GlobalSecondaryIndexUpdates != nil { + for i, v := range s.GlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedWriteCapacityAutoScalingUpdate != nil { + if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + if s.ReplicaUpdates != nil { + for i, v := range s.ReplicaUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput { + s.GlobalSecondaryIndexUpdates = v + return s +} + +// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *UpdateTableReplicaAutoScalingInput { + s.ProvisionedWriteCapacityAutoScalingUpdate = v + return s +} + +// SetReplicaUpdates sets the ReplicaUpdates field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetReplicaUpdates(v []*ReplicaAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput { + s.ReplicaUpdates = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetTableName(v string) *UpdateTableReplicaAutoScalingInput { + s.TableName = &v + return s +} + +type UpdateTableReplicaAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // Returns information about the auto scaling settings of a table with replicas. + TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingOutput) GoString() string { + return s.String() +} + +// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. +func (s *UpdateTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *UpdateTableReplicaAutoScalingOutput { + s.TableAutoScalingDescription = v + return s +} + +// Represents the input of an UpdateTimeToLive operation. +type UpdateTimeToLiveInput struct { + _ struct{} `type:"structure"` + + // The name of the table to be configured. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // Represents the settings used to enable or disable Time to Live for the specified + // table. + // + // TimeToLiveSpecification is a required field + TimeToLiveSpecification *TimeToLiveSpecification `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTimeToLiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTimeToLiveInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.TimeToLiveSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("TimeToLiveSpecification")) + } + if s.TimeToLiveSpecification != nil { + if err := s.TimeToLiveSpecification.Validate(); err != nil { + invalidParams.AddNested("TimeToLiveSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTimeToLiveInput) SetTableName(v string) *UpdateTimeToLiveInput { + s.TableName = &v + return s +} + +// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. +func (s *UpdateTimeToLiveInput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveInput { + s.TimeToLiveSpecification = v + return s +} + +type UpdateTimeToLiveOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of an UpdateTimeToLive operation. + TimeToLiveSpecification *TimeToLiveSpecification `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveOutput) GoString() string { + return s.String() +} + +// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. +func (s *UpdateTimeToLiveOutput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveOutput { + s.TimeToLiveSpecification = v + return s +} + +// Represents an operation to perform - either DeleteItem or PutItem. You can +// only request one of these operations, not both, in a single WriteRequest. +// If you do need to perform both of these operations, you need to provide two +// separate WriteRequest objects. +type WriteRequest struct { + _ struct{} `type:"structure"` + + // A request to perform a DeleteItem operation. + DeleteRequest *DeleteRequest `type:"structure"` + + // A request to perform a PutItem operation. + PutRequest *PutRequest `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteRequest) GoString() string { + return s.String() +} + +// SetDeleteRequest sets the DeleteRequest field's value. +func (s *WriteRequest) SetDeleteRequest(v *DeleteRequest) *WriteRequest { + s.DeleteRequest = v + return s +} + +// SetPutRequest sets the PutRequest field's value. +func (s *WriteRequest) SetPutRequest(v *PutRequest) *WriteRequest { + s.PutRequest = v + return s +} + +const ( + // AttributeActionAdd is a AttributeAction enum value + AttributeActionAdd = "ADD" + + // AttributeActionPut is a AttributeAction enum value + AttributeActionPut = "PUT" + + // AttributeActionDelete is a AttributeAction enum value + AttributeActionDelete = "DELETE" +) + +// AttributeAction_Values returns all elements of the AttributeAction enum +func AttributeAction_Values() []string { + return []string{ + AttributeActionAdd, + AttributeActionPut, + AttributeActionDelete, + } +} + +const ( + // BackupStatusCreating is a BackupStatus enum value + BackupStatusCreating = "CREATING" + + // BackupStatusDeleted is a BackupStatus enum value + BackupStatusDeleted = "DELETED" + + // BackupStatusAvailable is a BackupStatus enum value + BackupStatusAvailable = "AVAILABLE" +) + +// BackupStatus_Values returns all elements of the BackupStatus enum +func BackupStatus_Values() []string { + return []string{ + BackupStatusCreating, + BackupStatusDeleted, + BackupStatusAvailable, + } +} + +const ( + // BackupTypeUser is a BackupType enum value + BackupTypeUser = "USER" + + // BackupTypeSystem is a BackupType enum value + BackupTypeSystem = "SYSTEM" + + // BackupTypeAwsBackup is a BackupType enum value + BackupTypeAwsBackup = "AWS_BACKUP" +) + +// BackupType_Values returns all elements of the BackupType enum +func BackupType_Values() []string { + return []string{ + BackupTypeUser, + BackupTypeSystem, + BackupTypeAwsBackup, + } +} + +const ( + // BackupTypeFilterUser is a BackupTypeFilter enum value + BackupTypeFilterUser = "USER" + + // BackupTypeFilterSystem is a BackupTypeFilter enum value + BackupTypeFilterSystem = "SYSTEM" + + // BackupTypeFilterAwsBackup is a BackupTypeFilter enum value + BackupTypeFilterAwsBackup = "AWS_BACKUP" + + // BackupTypeFilterAll is a BackupTypeFilter enum value + BackupTypeFilterAll = "ALL" +) + +// BackupTypeFilter_Values returns all elements of the BackupTypeFilter enum +func BackupTypeFilter_Values() []string { + return []string{ + BackupTypeFilterUser, + BackupTypeFilterSystem, + BackupTypeFilterAwsBackup, + BackupTypeFilterAll, + } +} + +const ( + // BatchStatementErrorCodeEnumConditionalCheckFailed is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumConditionalCheckFailed = "ConditionalCheckFailed" + + // BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded = "ItemCollectionSizeLimitExceeded" + + // BatchStatementErrorCodeEnumRequestLimitExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumRequestLimitExceeded = "RequestLimitExceeded" + + // BatchStatementErrorCodeEnumValidationError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumValidationError = "ValidationError" + + // BatchStatementErrorCodeEnumProvisionedThroughputExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumProvisionedThroughputExceeded = "ProvisionedThroughputExceeded" + + // BatchStatementErrorCodeEnumTransactionConflict is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumTransactionConflict = "TransactionConflict" + + // BatchStatementErrorCodeEnumThrottlingError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumThrottlingError = "ThrottlingError" + + // BatchStatementErrorCodeEnumInternalServerError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumInternalServerError = "InternalServerError" + + // BatchStatementErrorCodeEnumResourceNotFound is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumResourceNotFound = "ResourceNotFound" + + // BatchStatementErrorCodeEnumAccessDenied is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumAccessDenied = "AccessDenied" + + // BatchStatementErrorCodeEnumDuplicateItem is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumDuplicateItem = "DuplicateItem" +) + +// BatchStatementErrorCodeEnum_Values returns all elements of the BatchStatementErrorCodeEnum enum +func BatchStatementErrorCodeEnum_Values() []string { + return []string{ + BatchStatementErrorCodeEnumConditionalCheckFailed, + BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded, + BatchStatementErrorCodeEnumRequestLimitExceeded, + BatchStatementErrorCodeEnumValidationError, + BatchStatementErrorCodeEnumProvisionedThroughputExceeded, + BatchStatementErrorCodeEnumTransactionConflict, + BatchStatementErrorCodeEnumThrottlingError, + BatchStatementErrorCodeEnumInternalServerError, + BatchStatementErrorCodeEnumResourceNotFound, + BatchStatementErrorCodeEnumAccessDenied, + BatchStatementErrorCodeEnumDuplicateItem, + } +} + +const ( + // BillingModeProvisioned is a BillingMode enum value + BillingModeProvisioned = "PROVISIONED" + + // BillingModePayPerRequest is a BillingMode enum value + BillingModePayPerRequest = "PAY_PER_REQUEST" +) + +// BillingMode_Values returns all elements of the BillingMode enum +func BillingMode_Values() []string { + return []string{ + BillingModeProvisioned, + BillingModePayPerRequest, + } +} + +const ( + // ComparisonOperatorEq is a ComparisonOperator enum value + ComparisonOperatorEq = "EQ" + + // ComparisonOperatorNe is a ComparisonOperator enum value + ComparisonOperatorNe = "NE" + + // ComparisonOperatorIn is a ComparisonOperator enum value + ComparisonOperatorIn = "IN" + + // ComparisonOperatorLe is a ComparisonOperator enum value + ComparisonOperatorLe = "LE" + + // ComparisonOperatorLt is a ComparisonOperator enum value + ComparisonOperatorLt = "LT" + + // ComparisonOperatorGe is a ComparisonOperator enum value + ComparisonOperatorGe = "GE" + + // ComparisonOperatorGt is a ComparisonOperator enum value + ComparisonOperatorGt = "GT" + + // ComparisonOperatorBetween is a ComparisonOperator enum value + ComparisonOperatorBetween = "BETWEEN" + + // ComparisonOperatorNotNull is a ComparisonOperator enum value + ComparisonOperatorNotNull = "NOT_NULL" + + // ComparisonOperatorNull is a ComparisonOperator enum value + ComparisonOperatorNull = "NULL" + + // ComparisonOperatorContains is a ComparisonOperator enum value + ComparisonOperatorContains = "CONTAINS" + + // ComparisonOperatorNotContains is a ComparisonOperator enum value + ComparisonOperatorNotContains = "NOT_CONTAINS" + + // ComparisonOperatorBeginsWith is a ComparisonOperator enum value + ComparisonOperatorBeginsWith = "BEGINS_WITH" +) + +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorEq, + ComparisonOperatorNe, + ComparisonOperatorIn, + ComparisonOperatorLe, + ComparisonOperatorLt, + ComparisonOperatorGe, + ComparisonOperatorGt, + ComparisonOperatorBetween, + ComparisonOperatorNotNull, + ComparisonOperatorNull, + ComparisonOperatorContains, + ComparisonOperatorNotContains, + ComparisonOperatorBeginsWith, + } +} + +const ( + // ConditionalOperatorAnd is a ConditionalOperator enum value + ConditionalOperatorAnd = "AND" + + // ConditionalOperatorOr is a ConditionalOperator enum value + ConditionalOperatorOr = "OR" +) + +// ConditionalOperator_Values returns all elements of the ConditionalOperator enum +func ConditionalOperator_Values() []string { + return []string{ + ConditionalOperatorAnd, + ConditionalOperatorOr, + } +} + +const ( + // ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value + ContinuousBackupsStatusEnabled = "ENABLED" + + // ContinuousBackupsStatusDisabled is a ContinuousBackupsStatus enum value + ContinuousBackupsStatusDisabled = "DISABLED" +) + +// ContinuousBackupsStatus_Values returns all elements of the ContinuousBackupsStatus enum +func ContinuousBackupsStatus_Values() []string { + return []string{ + ContinuousBackupsStatusEnabled, + ContinuousBackupsStatusDisabled, + } +} + +const ( + // ContributorInsightsActionEnable is a ContributorInsightsAction enum value + ContributorInsightsActionEnable = "ENABLE" + + // ContributorInsightsActionDisable is a ContributorInsightsAction enum value + ContributorInsightsActionDisable = "DISABLE" +) + +// ContributorInsightsAction_Values returns all elements of the ContributorInsightsAction enum +func ContributorInsightsAction_Values() []string { + return []string{ + ContributorInsightsActionEnable, + ContributorInsightsActionDisable, + } +} + +const ( + // ContributorInsightsStatusEnabling is a ContributorInsightsStatus enum value + ContributorInsightsStatusEnabling = "ENABLING" + + // ContributorInsightsStatusEnabled is a ContributorInsightsStatus enum value + ContributorInsightsStatusEnabled = "ENABLED" + + // ContributorInsightsStatusDisabling is a ContributorInsightsStatus enum value + ContributorInsightsStatusDisabling = "DISABLING" + + // ContributorInsightsStatusDisabled is a ContributorInsightsStatus enum value + ContributorInsightsStatusDisabled = "DISABLED" + + // ContributorInsightsStatusFailed is a ContributorInsightsStatus enum value + ContributorInsightsStatusFailed = "FAILED" +) + +// ContributorInsightsStatus_Values returns all elements of the ContributorInsightsStatus enum +func ContributorInsightsStatus_Values() []string { + return []string{ + ContributorInsightsStatusEnabling, + ContributorInsightsStatusEnabled, + ContributorInsightsStatusDisabling, + ContributorInsightsStatusDisabled, + ContributorInsightsStatusFailed, + } +} + +const ( + // DestinationStatusEnabling is a DestinationStatus enum value + DestinationStatusEnabling = "ENABLING" + + // DestinationStatusActive is a DestinationStatus enum value + DestinationStatusActive = "ACTIVE" + + // DestinationStatusDisabling is a DestinationStatus enum value + DestinationStatusDisabling = "DISABLING" + + // DestinationStatusDisabled is a DestinationStatus enum value + DestinationStatusDisabled = "DISABLED" + + // DestinationStatusEnableFailed is a DestinationStatus enum value + DestinationStatusEnableFailed = "ENABLE_FAILED" +) + +// DestinationStatus_Values returns all elements of the DestinationStatus enum +func DestinationStatus_Values() []string { + return []string{ + DestinationStatusEnabling, + DestinationStatusActive, + DestinationStatusDisabling, + DestinationStatusDisabled, + DestinationStatusEnableFailed, + } +} + +const ( + // ExportFormatDynamodbJson is a ExportFormat enum value + ExportFormatDynamodbJson = "DYNAMODB_JSON" + + // ExportFormatIon is a ExportFormat enum value + ExportFormatIon = "ION" +) + +// ExportFormat_Values returns all elements of the ExportFormat enum +func ExportFormat_Values() []string { + return []string{ + ExportFormatDynamodbJson, + ExportFormatIon, + } +} + +const ( + // ExportStatusInProgress is a ExportStatus enum value + ExportStatusInProgress = "IN_PROGRESS" + + // ExportStatusCompleted is a ExportStatus enum value + ExportStatusCompleted = "COMPLETED" + + // ExportStatusFailed is a ExportStatus enum value + ExportStatusFailed = "FAILED" +) + +// ExportStatus_Values returns all elements of the ExportStatus enum +func ExportStatus_Values() []string { + return []string{ + ExportStatusInProgress, + ExportStatusCompleted, + ExportStatusFailed, + } +} + +const ( + // GlobalTableStatusCreating is a GlobalTableStatus enum value + GlobalTableStatusCreating = "CREATING" + + // GlobalTableStatusActive is a GlobalTableStatus enum value + GlobalTableStatusActive = "ACTIVE" + + // GlobalTableStatusDeleting is a GlobalTableStatus enum value + GlobalTableStatusDeleting = "DELETING" + + // GlobalTableStatusUpdating is a GlobalTableStatus enum value + GlobalTableStatusUpdating = "UPDATING" +) + +// GlobalTableStatus_Values returns all elements of the GlobalTableStatus enum +func GlobalTableStatus_Values() []string { + return []string{ + GlobalTableStatusCreating, + GlobalTableStatusActive, + GlobalTableStatusDeleting, + GlobalTableStatusUpdating, + } +} + +const ( + // ImportStatusInProgress is a ImportStatus enum value + ImportStatusInProgress = "IN_PROGRESS" + + // ImportStatusCompleted is a ImportStatus enum value + ImportStatusCompleted = "COMPLETED" + + // ImportStatusCancelling is a ImportStatus enum value + ImportStatusCancelling = "CANCELLING" + + // ImportStatusCancelled is a ImportStatus enum value + ImportStatusCancelled = "CANCELLED" + + // ImportStatusFailed is a ImportStatus enum value + ImportStatusFailed = "FAILED" +) + +// ImportStatus_Values returns all elements of the ImportStatus enum +func ImportStatus_Values() []string { + return []string{ + ImportStatusInProgress, + ImportStatusCompleted, + ImportStatusCancelling, + ImportStatusCancelled, + ImportStatusFailed, + } +} + +const ( + // IndexStatusCreating is a IndexStatus enum value + IndexStatusCreating = "CREATING" + + // IndexStatusUpdating is a IndexStatus enum value + IndexStatusUpdating = "UPDATING" + + // IndexStatusDeleting is a IndexStatus enum value + IndexStatusDeleting = "DELETING" + + // IndexStatusActive is a IndexStatus enum value + IndexStatusActive = "ACTIVE" +) + +// IndexStatus_Values returns all elements of the IndexStatus enum +func IndexStatus_Values() []string { + return []string{ + IndexStatusCreating, + IndexStatusUpdating, + IndexStatusDeleting, + IndexStatusActive, + } +} + +const ( + // InputCompressionTypeGzip is a InputCompressionType enum value + InputCompressionTypeGzip = "GZIP" + + // InputCompressionTypeZstd is a InputCompressionType enum value + InputCompressionTypeZstd = "ZSTD" + + // InputCompressionTypeNone is a InputCompressionType enum value + InputCompressionTypeNone = "NONE" +) + +// InputCompressionType_Values returns all elements of the InputCompressionType enum +func InputCompressionType_Values() []string { + return []string{ + InputCompressionTypeGzip, + InputCompressionTypeZstd, + InputCompressionTypeNone, + } +} + +const ( + // InputFormatDynamodbJson is a InputFormat enum value + InputFormatDynamodbJson = "DYNAMODB_JSON" + + // InputFormatIon is a InputFormat enum value + InputFormatIon = "ION" + + // InputFormatCsv is a InputFormat enum value + InputFormatCsv = "CSV" +) + +// InputFormat_Values returns all elements of the InputFormat enum +func InputFormat_Values() []string { + return []string{ + InputFormatDynamodbJson, + InputFormatIon, + InputFormatCsv, + } +} + +const ( + // KeyTypeHash is a KeyType enum value + KeyTypeHash = "HASH" + + // KeyTypeRange is a KeyType enum value + KeyTypeRange = "RANGE" +) + +// KeyType_Values returns all elements of the KeyType enum +func KeyType_Values() []string { + return []string{ + KeyTypeHash, + KeyTypeRange, + } +} + +const ( + // PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value + PointInTimeRecoveryStatusEnabled = "ENABLED" + + // PointInTimeRecoveryStatusDisabled is a PointInTimeRecoveryStatus enum value + PointInTimeRecoveryStatusDisabled = "DISABLED" +) + +// PointInTimeRecoveryStatus_Values returns all elements of the PointInTimeRecoveryStatus enum +func PointInTimeRecoveryStatus_Values() []string { + return []string{ + PointInTimeRecoveryStatusEnabled, + PointInTimeRecoveryStatusDisabled, + } +} + +const ( + // ProjectionTypeAll is a ProjectionType enum value + ProjectionTypeAll = "ALL" + + // ProjectionTypeKeysOnly is a ProjectionType enum value + ProjectionTypeKeysOnly = "KEYS_ONLY" + + // ProjectionTypeInclude is a ProjectionType enum value + ProjectionTypeInclude = "INCLUDE" +) + +// ProjectionType_Values returns all elements of the ProjectionType enum +func ProjectionType_Values() []string { + return []string{ + ProjectionTypeAll, + ProjectionTypeKeysOnly, + ProjectionTypeInclude, + } +} + +const ( + // ReplicaStatusCreating is a ReplicaStatus enum value + ReplicaStatusCreating = "CREATING" + + // ReplicaStatusCreationFailed is a ReplicaStatus enum value + ReplicaStatusCreationFailed = "CREATION_FAILED" + + // ReplicaStatusUpdating is a ReplicaStatus enum value + ReplicaStatusUpdating = "UPDATING" + + // ReplicaStatusDeleting is a ReplicaStatus enum value + ReplicaStatusDeleting = "DELETING" + + // ReplicaStatusActive is a ReplicaStatus enum value + ReplicaStatusActive = "ACTIVE" + + // ReplicaStatusRegionDisabled is a ReplicaStatus enum value + ReplicaStatusRegionDisabled = "REGION_DISABLED" + + // ReplicaStatusInaccessibleEncryptionCredentials is a ReplicaStatus enum value + ReplicaStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" +) + +// ReplicaStatus_Values returns all elements of the ReplicaStatus enum +func ReplicaStatus_Values() []string { + return []string{ + ReplicaStatusCreating, + ReplicaStatusCreationFailed, + ReplicaStatusUpdating, + ReplicaStatusDeleting, + ReplicaStatusActive, + ReplicaStatusRegionDisabled, + ReplicaStatusInaccessibleEncryptionCredentials, + } +} + +// Determines the level of detail about either provisioned or on-demand throughput +// consumption that is returned in the response: +// +// - INDEXES - The response includes the aggregate ConsumedCapacity for the +// operation, together with ConsumedCapacity for each table and secondary +// index that was accessed. Note that some operations, such as GetItem and +// BatchGetItem, do not access any indexes at all. In these cases, specifying +// INDEXES will only return ConsumedCapacity information for table(s). +// +// - TOTAL - The response includes only the aggregate ConsumedCapacity for +// the operation. +// +// - NONE - No ConsumedCapacity details are included in the response. +const ( + // ReturnConsumedCapacityIndexes is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityIndexes = "INDEXES" + + // ReturnConsumedCapacityTotal is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityTotal = "TOTAL" + + // ReturnConsumedCapacityNone is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityNone = "NONE" +) + +// ReturnConsumedCapacity_Values returns all elements of the ReturnConsumedCapacity enum +func ReturnConsumedCapacity_Values() []string { + return []string{ + ReturnConsumedCapacityIndexes, + ReturnConsumedCapacityTotal, + ReturnConsumedCapacityNone, + } +} + +const ( + // ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value + ReturnItemCollectionMetricsSize = "SIZE" + + // ReturnItemCollectionMetricsNone is a ReturnItemCollectionMetrics enum value + ReturnItemCollectionMetricsNone = "NONE" +) + +// ReturnItemCollectionMetrics_Values returns all elements of the ReturnItemCollectionMetrics enum +func ReturnItemCollectionMetrics_Values() []string { + return []string{ + ReturnItemCollectionMetricsSize, + ReturnItemCollectionMetricsNone, + } +} + +const ( + // ReturnValueNone is a ReturnValue enum value + ReturnValueNone = "NONE" + + // ReturnValueAllOld is a ReturnValue enum value + ReturnValueAllOld = "ALL_OLD" + + // ReturnValueUpdatedOld is a ReturnValue enum value + ReturnValueUpdatedOld = "UPDATED_OLD" + + // ReturnValueAllNew is a ReturnValue enum value + ReturnValueAllNew = "ALL_NEW" + + // ReturnValueUpdatedNew is a ReturnValue enum value + ReturnValueUpdatedNew = "UPDATED_NEW" +) + +// ReturnValue_Values returns all elements of the ReturnValue enum +func ReturnValue_Values() []string { + return []string{ + ReturnValueNone, + ReturnValueAllOld, + ReturnValueUpdatedOld, + ReturnValueAllNew, + ReturnValueUpdatedNew, + } +} + +const ( + // ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value + ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD" + + // ReturnValuesOnConditionCheckFailureNone is a ReturnValuesOnConditionCheckFailure enum value + ReturnValuesOnConditionCheckFailureNone = "NONE" +) + +// ReturnValuesOnConditionCheckFailure_Values returns all elements of the ReturnValuesOnConditionCheckFailure enum +func ReturnValuesOnConditionCheckFailure_Values() []string { + return []string{ + ReturnValuesOnConditionCheckFailureAllOld, + ReturnValuesOnConditionCheckFailureNone, + } +} + +const ( + // S3SseAlgorithmAes256 is a S3SseAlgorithm enum value + S3SseAlgorithmAes256 = "AES256" + + // S3SseAlgorithmKms is a S3SseAlgorithm enum value + S3SseAlgorithmKms = "KMS" +) + +// S3SseAlgorithm_Values returns all elements of the S3SseAlgorithm enum +func S3SseAlgorithm_Values() []string { + return []string{ + S3SseAlgorithmAes256, + S3SseAlgorithmKms, + } +} + +const ( + // SSEStatusEnabling is a SSEStatus enum value + SSEStatusEnabling = "ENABLING" + + // SSEStatusEnabled is a SSEStatus enum value + SSEStatusEnabled = "ENABLED" + + // SSEStatusDisabling is a SSEStatus enum value + SSEStatusDisabling = "DISABLING" + + // SSEStatusDisabled is a SSEStatus enum value + SSEStatusDisabled = "DISABLED" + + // SSEStatusUpdating is a SSEStatus enum value + SSEStatusUpdating = "UPDATING" +) + +// SSEStatus_Values returns all elements of the SSEStatus enum +func SSEStatus_Values() []string { + return []string{ + SSEStatusEnabling, + SSEStatusEnabled, + SSEStatusDisabling, + SSEStatusDisabled, + SSEStatusUpdating, + } +} + +const ( + // SSETypeAes256 is a SSEType enum value + SSETypeAes256 = "AES256" + + // SSETypeKms is a SSEType enum value + SSETypeKms = "KMS" +) + +// SSEType_Values returns all elements of the SSEType enum +func SSEType_Values() []string { + return []string{ + SSETypeAes256, + SSETypeKms, + } +} + +const ( + // ScalarAttributeTypeS is a ScalarAttributeType enum value + ScalarAttributeTypeS = "S" + + // ScalarAttributeTypeN is a ScalarAttributeType enum value + ScalarAttributeTypeN = "N" + + // ScalarAttributeTypeB is a ScalarAttributeType enum value + ScalarAttributeTypeB = "B" +) + +// ScalarAttributeType_Values returns all elements of the ScalarAttributeType enum +func ScalarAttributeType_Values() []string { + return []string{ + ScalarAttributeTypeS, + ScalarAttributeTypeN, + ScalarAttributeTypeB, + } +} + +const ( + // SelectAllAttributes is a Select enum value + SelectAllAttributes = "ALL_ATTRIBUTES" + + // SelectAllProjectedAttributes is a Select enum value + SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES" + + // SelectSpecificAttributes is a Select enum value + SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES" + + // SelectCount is a Select enum value + SelectCount = "COUNT" +) + +// Select_Values returns all elements of the Select enum +func Select_Values() []string { + return []string{ + SelectAllAttributes, + SelectAllProjectedAttributes, + SelectSpecificAttributes, + SelectCount, + } +} + +const ( + // StreamViewTypeNewImage is a StreamViewType enum value + StreamViewTypeNewImage = "NEW_IMAGE" + + // StreamViewTypeOldImage is a StreamViewType enum value + StreamViewTypeOldImage = "OLD_IMAGE" + + // StreamViewTypeNewAndOldImages is a StreamViewType enum value + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + + // StreamViewTypeKeysOnly is a StreamViewType enum value + StreamViewTypeKeysOnly = "KEYS_ONLY" +) + +// StreamViewType_Values returns all elements of the StreamViewType enum +func StreamViewType_Values() []string { + return []string{ + StreamViewTypeNewImage, + StreamViewTypeOldImage, + StreamViewTypeNewAndOldImages, + StreamViewTypeKeysOnly, + } +} + +const ( + // TableClassStandard is a TableClass enum value + TableClassStandard = "STANDARD" + + // TableClassStandardInfrequentAccess is a TableClass enum value + TableClassStandardInfrequentAccess = "STANDARD_INFREQUENT_ACCESS" +) + +// TableClass_Values returns all elements of the TableClass enum +func TableClass_Values() []string { + return []string{ + TableClassStandard, + TableClassStandardInfrequentAccess, + } +} + +const ( + // TableStatusCreating is a TableStatus enum value + TableStatusCreating = "CREATING" + + // TableStatusUpdating is a TableStatus enum value + TableStatusUpdating = "UPDATING" + + // TableStatusDeleting is a TableStatus enum value + TableStatusDeleting = "DELETING" + + // TableStatusActive is a TableStatus enum value + TableStatusActive = "ACTIVE" + + // TableStatusInaccessibleEncryptionCredentials is a TableStatus enum value + TableStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + + // TableStatusArchiving is a TableStatus enum value + TableStatusArchiving = "ARCHIVING" + + // TableStatusArchived is a TableStatus enum value + TableStatusArchived = "ARCHIVED" +) + +// TableStatus_Values returns all elements of the TableStatus enum +func TableStatus_Values() []string { + return []string{ + TableStatusCreating, + TableStatusUpdating, + TableStatusDeleting, + TableStatusActive, + TableStatusInaccessibleEncryptionCredentials, + TableStatusArchiving, + TableStatusArchived, + } +} + +const ( + // TimeToLiveStatusEnabling is a TimeToLiveStatus enum value + TimeToLiveStatusEnabling = "ENABLING" + + // TimeToLiveStatusDisabling is a TimeToLiveStatus enum value + TimeToLiveStatusDisabling = "DISABLING" + + // TimeToLiveStatusEnabled is a TimeToLiveStatus enum value + TimeToLiveStatusEnabled = "ENABLED" + + // TimeToLiveStatusDisabled is a TimeToLiveStatus enum value + TimeToLiveStatusDisabled = "DISABLED" +) + +// TimeToLiveStatus_Values returns all elements of the TimeToLiveStatus enum +func TimeToLiveStatus_Values() []string { + return []string{ + TimeToLiveStatusEnabling, + TimeToLiveStatusDisabling, + TimeToLiveStatusEnabled, + TimeToLiveStatusDisabled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go new file mode 100644 index 00000000000..c019e63dfc8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go @@ -0,0 +1,98 @@ +package dynamodb + +import ( + "bytes" + "hash/crc32" + "io" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + if c.Config.Retryer == nil { + // Only override the retryer with a custom one if the config + // does not already contain a retryer + setCustomRetryer(c) + } + + c.Handlers.Build.PushBack(disableCompression) + c.Handlers.Unmarshal.PushFront(validateCRC32) + } +} + +func setCustomRetryer(c *client.Client) { + maxRetries := aws.IntValue(c.Config.MaxRetries) + if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 10 + } + + c.Retryer = client.DefaultRetryer{ + NumMaxRetries: maxRetries, + MinRetryDelay: 50 * time.Millisecond, + } +} + +func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) { + if length < 0 { + length = 0 + } + buf := bytes.NewBuffer(make([]byte, 0, length)) + + if _, err = buf.ReadFrom(b); err != nil { + return nil, err + } + if err = b.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func disableCompression(r *request.Request) { + r.HTTPRequest.Header.Set("Accept-Encoding", "identity") +} + +func validateCRC32(r *request.Request) { + if r.Error != nil { + return // already have an error, no need to verify CRC + } + + // Checksum validation is off, skip + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + // Try to get CRC from response + header := r.HTTPResponse.Header.Get("X-Amz-Crc32") + if header == "" { + return // No header, skip + } + + expected, err := strconv.ParseUint(header, 10, 32) + if err != nil { + return // Could not determine CRC value, skip + } + + buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength) + if err != nil { // failed to read the response body, skip + return + } + + // Reset body for subsequent reads + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + + // Compute the CRC checksum + crc := crc32.ChecksumIEEE(buf.Bytes()) + + if crc != uint32(expected) { + // CRC does not match, set a retryable error + r.Retryable = aws.Bool(true) + r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go new file mode 100644 index 00000000000..ab12b274f39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go @@ -0,0 +1,45 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dynamodb provides the client and types for making API +// requests to Amazon DynamoDB. +// +// Amazon DynamoDB is a fully managed NoSQL database service that provides fast +// and predictable performance with seamless scalability. DynamoDB lets you +// offload the administrative burdens of operating and scaling a distributed +// database, so that you don't have to worry about hardware provisioning, setup +// and configuration, replication, software patching, or cluster scaling. +// +// With DynamoDB, you can create database tables that can store and retrieve +// any amount of data, and serve any level of request traffic. You can scale +// up or scale down your tables' throughput capacity without downtime or performance +// degradation, and use the Amazon Web Services Management Console to monitor +// resource utilization and performance metrics. +// +// DynamoDB automatically spreads the data and traffic for your tables over +// a sufficient number of servers to handle your throughput and storage requirements, +// while maintaining consistent and fast performance. All of your data is stored +// on solid state disks (SSDs) and automatically replicated across multiple +// Availability Zones in an Amazon Web Services Region, providing built-in high +// availability and data durability. +// +// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service. +// +// See dynamodb package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/ +// +// # Using the Client +// +// To contact Amazon DynamoDB with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon DynamoDB client DynamoDB for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New +package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go new file mode 100644 index 00000000000..0cca7e4b9e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go @@ -0,0 +1,27 @@ +/* +AttributeValue Marshaling and Unmarshaling Helpers + +Utility helpers to marshal and unmarshal AttributeValue to and +from Go types can be found in the dynamodbattribute sub package. This package +provides specialized functions for the common ways of working with +AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and +directly with *AttributeValue. This is helpful for marshaling Go types for API +operations such as PutItem, and unmarshaling Query and Scan APIs' responses. + +See the dynamodbattribute package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/ + +# Expression Builders + +The expression package provides utility types and functions to build DynamoDB +expression for type safe construction of API ExpressionAttributeNames, and +ExpressionAttribute Values. + +The package represents the various DynamoDB Expressions as structs named +accordingly. For example, ConditionBuilder represents a DynamoDB Condition +Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on. + +See the expression package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/ +*/ +package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go new file mode 100644 index 00000000000..0c97f94ed04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go @@ -0,0 +1,303 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dynamodbiface provides an interface to enable mocking the Amazon DynamoDB service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package dynamodbiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// DynamoDBAPI provides an interface to enable mocking the +// dynamodb.DynamoDB service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon DynamoDB. +// func myFunc(svc dynamodbiface.DynamoDBAPI) bool { +// // Make svc.BatchExecuteStatement request +// } +// +// func main() { +// sess := session.New() +// svc := dynamodb.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockDynamoDBClient struct { +// dynamodbiface.DynamoDBAPI +// } +// func (m *mockDynamoDBClient) BatchExecuteStatement(input *dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockDynamoDBClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type DynamoDBAPI interface { + BatchExecuteStatement(*dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) + BatchExecuteStatementWithContext(aws.Context, *dynamodb.BatchExecuteStatementInput, ...request.Option) (*dynamodb.BatchExecuteStatementOutput, error) + BatchExecuteStatementRequest(*dynamodb.BatchExecuteStatementInput) (*request.Request, *dynamodb.BatchExecuteStatementOutput) + + BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) + BatchGetItemWithContext(aws.Context, *dynamodb.BatchGetItemInput, ...request.Option) (*dynamodb.BatchGetItemOutput, error) + BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput) + + BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error + BatchGetItemPagesWithContext(aws.Context, *dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool, ...request.Option) error + + BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) + BatchWriteItemWithContext(aws.Context, *dynamodb.BatchWriteItemInput, ...request.Option) (*dynamodb.BatchWriteItemOutput, error) + BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput) + + CreateBackup(*dynamodb.CreateBackupInput) (*dynamodb.CreateBackupOutput, error) + CreateBackupWithContext(aws.Context, *dynamodb.CreateBackupInput, ...request.Option) (*dynamodb.CreateBackupOutput, error) + CreateBackupRequest(*dynamodb.CreateBackupInput) (*request.Request, *dynamodb.CreateBackupOutput) + + CreateGlobalTable(*dynamodb.CreateGlobalTableInput) (*dynamodb.CreateGlobalTableOutput, error) + CreateGlobalTableWithContext(aws.Context, *dynamodb.CreateGlobalTableInput, ...request.Option) (*dynamodb.CreateGlobalTableOutput, error) + CreateGlobalTableRequest(*dynamodb.CreateGlobalTableInput) (*request.Request, *dynamodb.CreateGlobalTableOutput) + + CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) + CreateTableWithContext(aws.Context, *dynamodb.CreateTableInput, ...request.Option) (*dynamodb.CreateTableOutput, error) + CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput) + + DeleteBackup(*dynamodb.DeleteBackupInput) (*dynamodb.DeleteBackupOutput, error) + DeleteBackupWithContext(aws.Context, *dynamodb.DeleteBackupInput, ...request.Option) (*dynamodb.DeleteBackupOutput, error) + DeleteBackupRequest(*dynamodb.DeleteBackupInput) (*request.Request, *dynamodb.DeleteBackupOutput) + + DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) + DeleteItemWithContext(aws.Context, *dynamodb.DeleteItemInput, ...request.Option) (*dynamodb.DeleteItemOutput, error) + DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput) + + DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) + DeleteTableWithContext(aws.Context, *dynamodb.DeleteTableInput, ...request.Option) (*dynamodb.DeleteTableOutput, error) + DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput) + + DescribeBackup(*dynamodb.DescribeBackupInput) (*dynamodb.DescribeBackupOutput, error) + DescribeBackupWithContext(aws.Context, *dynamodb.DescribeBackupInput, ...request.Option) (*dynamodb.DescribeBackupOutput, error) + DescribeBackupRequest(*dynamodb.DescribeBackupInput) (*request.Request, *dynamodb.DescribeBackupOutput) + + DescribeContinuousBackups(*dynamodb.DescribeContinuousBackupsInput) (*dynamodb.DescribeContinuousBackupsOutput, error) + DescribeContinuousBackupsWithContext(aws.Context, *dynamodb.DescribeContinuousBackupsInput, ...request.Option) (*dynamodb.DescribeContinuousBackupsOutput, error) + DescribeContinuousBackupsRequest(*dynamodb.DescribeContinuousBackupsInput) (*request.Request, *dynamodb.DescribeContinuousBackupsOutput) + + DescribeContributorInsights(*dynamodb.DescribeContributorInsightsInput) (*dynamodb.DescribeContributorInsightsOutput, error) + DescribeContributorInsightsWithContext(aws.Context, *dynamodb.DescribeContributorInsightsInput, ...request.Option) (*dynamodb.DescribeContributorInsightsOutput, error) + DescribeContributorInsightsRequest(*dynamodb.DescribeContributorInsightsInput) (*request.Request, *dynamodb.DescribeContributorInsightsOutput) + + DescribeEndpoints(*dynamodb.DescribeEndpointsInput) (*dynamodb.DescribeEndpointsOutput, error) + DescribeEndpointsWithContext(aws.Context, *dynamodb.DescribeEndpointsInput, ...request.Option) (*dynamodb.DescribeEndpointsOutput, error) + DescribeEndpointsRequest(*dynamodb.DescribeEndpointsInput) (*request.Request, *dynamodb.DescribeEndpointsOutput) + + DescribeExport(*dynamodb.DescribeExportInput) (*dynamodb.DescribeExportOutput, error) + DescribeExportWithContext(aws.Context, *dynamodb.DescribeExportInput, ...request.Option) (*dynamodb.DescribeExportOutput, error) + DescribeExportRequest(*dynamodb.DescribeExportInput) (*request.Request, *dynamodb.DescribeExportOutput) + + DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error) + DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error) + DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput) + + DescribeGlobalTableSettings(*dynamodb.DescribeGlobalTableSettingsInput) (*dynamodb.DescribeGlobalTableSettingsOutput, error) + DescribeGlobalTableSettingsWithContext(aws.Context, *dynamodb.DescribeGlobalTableSettingsInput, ...request.Option) (*dynamodb.DescribeGlobalTableSettingsOutput, error) + DescribeGlobalTableSettingsRequest(*dynamodb.DescribeGlobalTableSettingsInput) (*request.Request, *dynamodb.DescribeGlobalTableSettingsOutput) + + DescribeImport(*dynamodb.DescribeImportInput) (*dynamodb.DescribeImportOutput, error) + DescribeImportWithContext(aws.Context, *dynamodb.DescribeImportInput, ...request.Option) (*dynamodb.DescribeImportOutput, error) + DescribeImportRequest(*dynamodb.DescribeImportInput) (*request.Request, *dynamodb.DescribeImportOutput) + + DescribeKinesisStreamingDestination(*dynamodb.DescribeKinesisStreamingDestinationInput) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) + DescribeKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DescribeKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) + DescribeKinesisStreamingDestinationRequest(*dynamodb.DescribeKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DescribeKinesisStreamingDestinationOutput) + + DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error) + DescribeLimitsWithContext(aws.Context, *dynamodb.DescribeLimitsInput, ...request.Option) (*dynamodb.DescribeLimitsOutput, error) + DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput) + + DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) + DescribeTableWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.Option) (*dynamodb.DescribeTableOutput, error) + DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput) + + DescribeTableReplicaAutoScaling(*dynamodb.DescribeTableReplicaAutoScalingInput) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error) + DescribeTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.DescribeTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error) + DescribeTableReplicaAutoScalingRequest(*dynamodb.DescribeTableReplicaAutoScalingInput) (*request.Request, *dynamodb.DescribeTableReplicaAutoScalingOutput) + + DescribeTimeToLive(*dynamodb.DescribeTimeToLiveInput) (*dynamodb.DescribeTimeToLiveOutput, error) + DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error) + DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput) + + DisableKinesisStreamingDestination(*dynamodb.DisableKinesisStreamingDestinationInput) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) + DisableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DisableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) + DisableKinesisStreamingDestinationRequest(*dynamodb.DisableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DisableKinesisStreamingDestinationOutput) + + EnableKinesisStreamingDestination(*dynamodb.EnableKinesisStreamingDestinationInput) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) + EnableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.EnableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) + EnableKinesisStreamingDestinationRequest(*dynamodb.EnableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.EnableKinesisStreamingDestinationOutput) + + ExecuteStatement(*dynamodb.ExecuteStatementInput) (*dynamodb.ExecuteStatementOutput, error) + ExecuteStatementWithContext(aws.Context, *dynamodb.ExecuteStatementInput, ...request.Option) (*dynamodb.ExecuteStatementOutput, error) + ExecuteStatementRequest(*dynamodb.ExecuteStatementInput) (*request.Request, *dynamodb.ExecuteStatementOutput) + + ExecuteTransaction(*dynamodb.ExecuteTransactionInput) (*dynamodb.ExecuteTransactionOutput, error) + ExecuteTransactionWithContext(aws.Context, *dynamodb.ExecuteTransactionInput, ...request.Option) (*dynamodb.ExecuteTransactionOutput, error) + ExecuteTransactionRequest(*dynamodb.ExecuteTransactionInput) (*request.Request, *dynamodb.ExecuteTransactionOutput) + + ExportTableToPointInTime(*dynamodb.ExportTableToPointInTimeInput) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeWithContext(aws.Context, *dynamodb.ExportTableToPointInTimeInput, ...request.Option) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeRequest(*dynamodb.ExportTableToPointInTimeInput) (*request.Request, *dynamodb.ExportTableToPointInTimeOutput) + + GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) + GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error) + GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) + + ImportTable(*dynamodb.ImportTableInput) (*dynamodb.ImportTableOutput, error) + ImportTableWithContext(aws.Context, *dynamodb.ImportTableInput, ...request.Option) (*dynamodb.ImportTableOutput, error) + ImportTableRequest(*dynamodb.ImportTableInput) (*request.Request, *dynamodb.ImportTableOutput) + + ListBackups(*dynamodb.ListBackupsInput) (*dynamodb.ListBackupsOutput, error) + ListBackupsWithContext(aws.Context, *dynamodb.ListBackupsInput, ...request.Option) (*dynamodb.ListBackupsOutput, error) + ListBackupsRequest(*dynamodb.ListBackupsInput) (*request.Request, *dynamodb.ListBackupsOutput) + + ListContributorInsights(*dynamodb.ListContributorInsightsInput) (*dynamodb.ListContributorInsightsOutput, error) + ListContributorInsightsWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, ...request.Option) (*dynamodb.ListContributorInsightsOutput, error) + ListContributorInsightsRequest(*dynamodb.ListContributorInsightsInput) (*request.Request, *dynamodb.ListContributorInsightsOutput) + + ListContributorInsightsPages(*dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool) error + ListContributorInsightsPagesWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool, ...request.Option) error + + ListExports(*dynamodb.ListExportsInput) (*dynamodb.ListExportsOutput, error) + ListExportsWithContext(aws.Context, *dynamodb.ListExportsInput, ...request.Option) (*dynamodb.ListExportsOutput, error) + ListExportsRequest(*dynamodb.ListExportsInput) (*request.Request, *dynamodb.ListExportsOutput) + + ListExportsPages(*dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool) error + ListExportsPagesWithContext(aws.Context, *dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool, ...request.Option) error + + ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error) + ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error) + ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput) + + ListImports(*dynamodb.ListImportsInput) (*dynamodb.ListImportsOutput, error) + ListImportsWithContext(aws.Context, *dynamodb.ListImportsInput, ...request.Option) (*dynamodb.ListImportsOutput, error) + ListImportsRequest(*dynamodb.ListImportsInput) (*request.Request, *dynamodb.ListImportsOutput) + + ListImportsPages(*dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool) error + ListImportsPagesWithContext(aws.Context, *dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool, ...request.Option) error + + ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) + ListTablesWithContext(aws.Context, *dynamodb.ListTablesInput, ...request.Option) (*dynamodb.ListTablesOutput, error) + ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput) + + ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error + ListTablesPagesWithContext(aws.Context, *dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool, ...request.Option) error + + ListTagsOfResource(*dynamodb.ListTagsOfResourceInput) (*dynamodb.ListTagsOfResourceOutput, error) + ListTagsOfResourceWithContext(aws.Context, *dynamodb.ListTagsOfResourceInput, ...request.Option) (*dynamodb.ListTagsOfResourceOutput, error) + ListTagsOfResourceRequest(*dynamodb.ListTagsOfResourceInput) (*request.Request, *dynamodb.ListTagsOfResourceOutput) + + PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) + PutItemWithContext(aws.Context, *dynamodb.PutItemInput, ...request.Option) (*dynamodb.PutItemOutput, error) + PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput) + + Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error) + QueryWithContext(aws.Context, *dynamodb.QueryInput, ...request.Option) (*dynamodb.QueryOutput, error) + QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput) + + QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error + QueryPagesWithContext(aws.Context, *dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool, ...request.Option) error + + RestoreTableFromBackup(*dynamodb.RestoreTableFromBackupInput) (*dynamodb.RestoreTableFromBackupOutput, error) + RestoreTableFromBackupWithContext(aws.Context, *dynamodb.RestoreTableFromBackupInput, ...request.Option) (*dynamodb.RestoreTableFromBackupOutput, error) + RestoreTableFromBackupRequest(*dynamodb.RestoreTableFromBackupInput) (*request.Request, *dynamodb.RestoreTableFromBackupOutput) + + RestoreTableToPointInTime(*dynamodb.RestoreTableToPointInTimeInput) (*dynamodb.RestoreTableToPointInTimeOutput, error) + RestoreTableToPointInTimeWithContext(aws.Context, *dynamodb.RestoreTableToPointInTimeInput, ...request.Option) (*dynamodb.RestoreTableToPointInTimeOutput, error) + RestoreTableToPointInTimeRequest(*dynamodb.RestoreTableToPointInTimeInput) (*request.Request, *dynamodb.RestoreTableToPointInTimeOutput) + + Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error) + ScanWithContext(aws.Context, *dynamodb.ScanInput, ...request.Option) (*dynamodb.ScanOutput, error) + ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput) + + ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error + ScanPagesWithContext(aws.Context, *dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool, ...request.Option) error + + TagResource(*dynamodb.TagResourceInput) (*dynamodb.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *dynamodb.TagResourceInput, ...request.Option) (*dynamodb.TagResourceOutput, error) + TagResourceRequest(*dynamodb.TagResourceInput) (*request.Request, *dynamodb.TagResourceOutput) + + TransactGetItems(*dynamodb.TransactGetItemsInput) (*dynamodb.TransactGetItemsOutput, error) + TransactGetItemsWithContext(aws.Context, *dynamodb.TransactGetItemsInput, ...request.Option) (*dynamodb.TransactGetItemsOutput, error) + TransactGetItemsRequest(*dynamodb.TransactGetItemsInput) (*request.Request, *dynamodb.TransactGetItemsOutput) + + TransactWriteItems(*dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error) + TransactWriteItemsWithContext(aws.Context, *dynamodb.TransactWriteItemsInput, ...request.Option) (*dynamodb.TransactWriteItemsOutput, error) + TransactWriteItemsRequest(*dynamodb.TransactWriteItemsInput) (*request.Request, *dynamodb.TransactWriteItemsOutput) + + UntagResource(*dynamodb.UntagResourceInput) (*dynamodb.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *dynamodb.UntagResourceInput, ...request.Option) (*dynamodb.UntagResourceOutput, error) + UntagResourceRequest(*dynamodb.UntagResourceInput) (*request.Request, *dynamodb.UntagResourceOutput) + + UpdateContinuousBackups(*dynamodb.UpdateContinuousBackupsInput) (*dynamodb.UpdateContinuousBackupsOutput, error) + UpdateContinuousBackupsWithContext(aws.Context, *dynamodb.UpdateContinuousBackupsInput, ...request.Option) (*dynamodb.UpdateContinuousBackupsOutput, error) + UpdateContinuousBackupsRequest(*dynamodb.UpdateContinuousBackupsInput) (*request.Request, *dynamodb.UpdateContinuousBackupsOutput) + + UpdateContributorInsights(*dynamodb.UpdateContributorInsightsInput) (*dynamodb.UpdateContributorInsightsOutput, error) + UpdateContributorInsightsWithContext(aws.Context, *dynamodb.UpdateContributorInsightsInput, ...request.Option) (*dynamodb.UpdateContributorInsightsOutput, error) + UpdateContributorInsightsRequest(*dynamodb.UpdateContributorInsightsInput) (*request.Request, *dynamodb.UpdateContributorInsightsOutput) + + UpdateGlobalTable(*dynamodb.UpdateGlobalTableInput) (*dynamodb.UpdateGlobalTableOutput, error) + UpdateGlobalTableWithContext(aws.Context, *dynamodb.UpdateGlobalTableInput, ...request.Option) (*dynamodb.UpdateGlobalTableOutput, error) + UpdateGlobalTableRequest(*dynamodb.UpdateGlobalTableInput) (*request.Request, *dynamodb.UpdateGlobalTableOutput) + + UpdateGlobalTableSettings(*dynamodb.UpdateGlobalTableSettingsInput) (*dynamodb.UpdateGlobalTableSettingsOutput, error) + UpdateGlobalTableSettingsWithContext(aws.Context, *dynamodb.UpdateGlobalTableSettingsInput, ...request.Option) (*dynamodb.UpdateGlobalTableSettingsOutput, error) + UpdateGlobalTableSettingsRequest(*dynamodb.UpdateGlobalTableSettingsInput) (*request.Request, *dynamodb.UpdateGlobalTableSettingsOutput) + + UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) + UpdateItemWithContext(aws.Context, *dynamodb.UpdateItemInput, ...request.Option) (*dynamodb.UpdateItemOutput, error) + UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput) + + UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error) + UpdateTableWithContext(aws.Context, *dynamodb.UpdateTableInput, ...request.Option) (*dynamodb.UpdateTableOutput, error) + UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput) + + UpdateTableReplicaAutoScaling(*dynamodb.UpdateTableReplicaAutoScalingInput) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error) + UpdateTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.UpdateTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error) + UpdateTableReplicaAutoScalingRequest(*dynamodb.UpdateTableReplicaAutoScalingInput) (*request.Request, *dynamodb.UpdateTableReplicaAutoScalingOutput) + + UpdateTimeToLive(*dynamodb.UpdateTimeToLiveInput) (*dynamodb.UpdateTimeToLiveOutput, error) + UpdateTimeToLiveWithContext(aws.Context, *dynamodb.UpdateTimeToLiveInput, ...request.Option) (*dynamodb.UpdateTimeToLiveOutput, error) + UpdateTimeToLiveRequest(*dynamodb.UpdateTimeToLiveInput) (*request.Request, *dynamodb.UpdateTimeToLiveOutput) + + WaitUntilTableExists(*dynamodb.DescribeTableInput) error + WaitUntilTableExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error + + WaitUntilTableNotExists(*dynamodb.DescribeTableInput) error + WaitUntilTableNotExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error +} + +var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go new file mode 100644 index 00000000000..9f7baf88f6b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -0,0 +1,347 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeBackupInUseException for service response error code + // "BackupInUseException". + // + // There is another ongoing conflicting backup control plane operation on the + // table. The backup is either being created, deleted or restored to a table. + ErrCodeBackupInUseException = "BackupInUseException" + + // ErrCodeBackupNotFoundException for service response error code + // "BackupNotFoundException". + // + // Backup not found for the given BackupARN. + ErrCodeBackupNotFoundException = "BackupNotFoundException" + + // ErrCodeConditionalCheckFailedException for service response error code + // "ConditionalCheckFailedException". + // + // A condition specified in the operation could not be evaluated. + ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException" + + // ErrCodeContinuousBackupsUnavailableException for service response error code + // "ContinuousBackupsUnavailableException". + // + // Backups have not yet been enabled for this table. + ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException" + + // ErrCodeDuplicateItemException for service response error code + // "DuplicateItemException". + // + // There was an attempt to insert an item with the same primary key as an item + // that already exists in the DynamoDB table. + ErrCodeDuplicateItemException = "DuplicateItemException" + + // ErrCodeExportConflictException for service response error code + // "ExportConflictException". + // + // There was a conflict when writing to the specified S3 bucket. + ErrCodeExportConflictException = "ExportConflictException" + + // ErrCodeExportNotFoundException for service response error code + // "ExportNotFoundException". + // + // The specified export was not found. + ErrCodeExportNotFoundException = "ExportNotFoundException" + + // ErrCodeGlobalTableAlreadyExistsException for service response error code + // "GlobalTableAlreadyExistsException". + // + // The specified global table already exists. + ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException" + + // ErrCodeGlobalTableNotFoundException for service response error code + // "GlobalTableNotFoundException". + // + // The specified global table does not exist. + ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException" + + // ErrCodeIdempotentParameterMismatchException for service response error code + // "IdempotentParameterMismatchException". + // + // DynamoDB rejected the request because you retried a request with a different + // payload but with an idempotent token that was already used. + ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" + + // ErrCodeImportConflictException for service response error code + // "ImportConflictException". + // + // There was a conflict when importing from the specified S3 source. This can + // occur when the current import conflicts with a previous import request that + // had the same client token. + ErrCodeImportConflictException = "ImportConflictException" + + // ErrCodeImportNotFoundException for service response error code + // "ImportNotFoundException". + // + // The specified import was not found. + ErrCodeImportNotFoundException = "ImportNotFoundException" + + // ErrCodeIndexNotFoundException for service response error code + // "IndexNotFoundException". + // + // The operation tried to access a nonexistent index. + ErrCodeIndexNotFoundException = "IndexNotFoundException" + + // ErrCodeInternalServerError for service response error code + // "InternalServerError". + // + // An error occurred on the server side. + ErrCodeInternalServerError = "InternalServerError" + + // ErrCodeInvalidExportTimeException for service response error code + // "InvalidExportTimeException". + // + // The specified ExportTime is outside of the point in time recovery window. + ErrCodeInvalidExportTimeException = "InvalidExportTimeException" + + // ErrCodeInvalidRestoreTimeException for service response error code + // "InvalidRestoreTimeException". + // + // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime + // and LatestRestorableDateTime. + ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException" + + // ErrCodeItemCollectionSizeLimitExceededException for service response error code + // "ItemCollectionSizeLimitExceededException". + // + // An item collection is too large. This exception is only returned for tables + // that have one or more local secondary indexes. + ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // There is no limit to the number of daily on-demand backups that can be taken. + // + // For most purposes, up to 500 simultaneous table operations are allowed per + // account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, + // RestoreTableFromBackup, and RestoreTableToPointInTime. + // + // When you are creating a table with one or more secondary indexes, you can + // have up to 250 such requests running at a time. However, if the table or + // index specifications are complex, then DynamoDB might temporarily reduce + // the number of concurrent operations. + // + // When importing into DynamoDB, up to 50 simultaneous import table operations + // are allowed per account. + // + // There is a soft account quota of 2,500 tables. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodePointInTimeRecoveryUnavailableException for service response error code + // "PointInTimeRecoveryUnavailableException". + // + // Point in time recovery has not yet been enabled for this source table. + ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException" + + // ErrCodeProvisionedThroughputExceededException for service response error code + // "ProvisionedThroughputExceededException". + // + // Your request rate is too high. The Amazon Web Services SDKs for DynamoDB + // automatically retry requests that receive this exception. Your request is + // eventually successful, unless your retry queue is too large to finish. Reduce + // the frequency of requests and use exponential backoff. For more information, + // go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) + // in the Amazon DynamoDB Developer Guide. + ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException" + + // ErrCodeReplicaAlreadyExistsException for service response error code + // "ReplicaAlreadyExistsException". + // + // The specified replica is already part of the global table. + ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException" + + // ErrCodeReplicaNotFoundException for service response error code + // "ReplicaNotFoundException". + // + // The specified replica is no longer part of the global table. + ErrCodeReplicaNotFoundException = "ReplicaNotFoundException" + + // ErrCodeRequestLimitExceeded for service response error code + // "RequestLimitExceeded". + // + // Throughput exceeds the current throughput quota for your account. Please + // contact Amazon Web Services Support (https://aws.amazon.com/support) to request + // a quota increase. + ErrCodeRequestLimitExceeded = "RequestLimitExceeded" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The operation conflicts with the resource's availability. For example, you + // attempted to recreate an existing table, or tried to delete a table currently + // in the CREATING state. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The operation tried to access a nonexistent table or index. The resource + // might not be specified correctly, or its status might not be ACTIVE. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTableAlreadyExistsException for service response error code + // "TableAlreadyExistsException". + // + // A target table with the specified name already exists. + ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException" + + // ErrCodeTableInUseException for service response error code + // "TableInUseException". + // + // A target table with the specified name is either being created or deleted. + ErrCodeTableInUseException = "TableInUseException" + + // ErrCodeTableNotFoundException for service response error code + // "TableNotFoundException". + // + // A source table with the name TableName does not currently exist within the + // subscriber's account or the subscriber is operating in the wrong Amazon Web + // Services Region. + ErrCodeTableNotFoundException = "TableNotFoundException" + + // ErrCodeTransactionCanceledException for service response error code + // "TransactionCanceledException". + // + // The entire transaction request was canceled. + // + // DynamoDB cancels a TransactWriteItems request under the following circumstances: + // + // * A condition in one of the condition expressions is not met. + // + // * A table in the TransactWriteItems request is in a different account + // or region. + // + // * More than one action in the TransactWriteItems operation targets the + // same item. + // + // * There is insufficient provisioned capacity for the transaction to be + // completed. + // + // * An item size becomes too large (larger than 400 KB), or a local secondary + // index (LSI) becomes too large, or a similar validation error occurs because + // of changes made by the transaction. + // + // * There is a user error, such as an invalid data format. + // + // DynamoDB cancels a TransactGetItems request under the following circumstances: + // + // * There is an ongoing TransactGetItems operation that conflicts with a + // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. + // In this case the TransactGetItems operation fails with a TransactionCanceledException. + // + // * A table in the TransactGetItems request is in a different account or + // region. + // + // * There is insufficient provisioned capacity for the transaction to be + // completed. + // + // * There is a user error, such as an invalid data format. + // + // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons + // property. This property is not set for other languages. Transaction cancellation + // reasons are ordered in the order of requested items, if an item has no error + // it will have None code and Null message. + // + // Cancellation reason codes and possible error messages: + // + // * No Errors: Code: None Message: null + // + // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The + // conditional request failed. + // + // * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded + // Message: Collection size exceeded. + // + // * Transaction Conflict: Code: TransactionConflict Message: Transaction + // is ongoing for the item. + // + // * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded + // Messages: The level of configured provisioned throughput for the table + // was exceeded. Consider increasing your provisioning level with the UpdateTable + // API. This Message is received when provisioned throughput is exceeded + // is on a provisioned DynamoDB table. The level of configured provisioned + // throughput for one or more global secondary indexes of the table was exceeded. + // Consider increasing your provisioning level for the under-provisioned + // global secondary indexes with the UpdateTable API. This message is returned + // when provisioned throughput is exceeded is on a provisioned GSI. + // + // * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds + // the current capacity of your table or index. DynamoDB is automatically + // scaling your table or index so please try again shortly. If exceptions + // persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. + // This message is returned when writes get throttled on an On-Demand table + // as DynamoDB is automatically scaling the table. Throughput exceeds the + // current capacity for one or more global secondary indexes. DynamoDB is + // automatically scaling your index so please try again shortly. This message + // is returned when when writes get throttled on an On-Demand GSI as DynamoDB + // is automatically scaling the GSI. + // + // * Validation Error: Code: ValidationError Messages: One or more parameter + // values were invalid. The update expression attempted to update the secondary + // index key beyond allowed size limits. The update expression attempted + // to update the secondary index key to unsupported type. An operand in the + // update expression has an incorrect data type. Item size to update has + // exceeded the maximum allowed size. Number overflow. Attempting to store + // a number with magnitude larger than supported range. Type mismatch for + // attribute to update. Nesting Levels have exceeded supported limits. The + // document path provided in the update expression is invalid for update. + // The provided expression refers to an attribute that does not exist in + // the item. + ErrCodeTransactionCanceledException = "TransactionCanceledException" + + // ErrCodeTransactionConflictException for service response error code + // "TransactionConflictException". + // + // Operation was rejected because there is an ongoing transaction for the item. + ErrCodeTransactionConflictException = "TransactionConflictException" + + // ErrCodeTransactionInProgressException for service response error code + // "TransactionInProgressException". + // + // The transaction with the given request token is already in progress. + ErrCodeTransactionInProgressException = "TransactionInProgressException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "BackupInUseException": newErrorBackupInUseException, + "BackupNotFoundException": newErrorBackupNotFoundException, + "ConditionalCheckFailedException": newErrorConditionalCheckFailedException, + "ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException, + "DuplicateItemException": newErrorDuplicateItemException, + "ExportConflictException": newErrorExportConflictException, + "ExportNotFoundException": newErrorExportNotFoundException, + "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException, + "GlobalTableNotFoundException": newErrorGlobalTableNotFoundException, + "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException, + "ImportConflictException": newErrorImportConflictException, + "ImportNotFoundException": newErrorImportNotFoundException, + "IndexNotFoundException": newErrorIndexNotFoundException, + "InternalServerError": newErrorInternalServerError, + "InvalidExportTimeException": newErrorInvalidExportTimeException, + "InvalidRestoreTimeException": newErrorInvalidRestoreTimeException, + "ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException, + "LimitExceededException": newErrorLimitExceededException, + "PointInTimeRecoveryUnavailableException": newErrorPointInTimeRecoveryUnavailableException, + "ProvisionedThroughputExceededException": newErrorProvisionedThroughputExceededException, + "ReplicaAlreadyExistsException": newErrorReplicaAlreadyExistsException, + "ReplicaNotFoundException": newErrorReplicaNotFoundException, + "RequestLimitExceeded": newErrorRequestLimitExceeded, + "ResourceInUseException": newErrorResourceInUseException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TableAlreadyExistsException": newErrorTableAlreadyExistsException, + "TableInUseException": newErrorTableInUseException, + "TableNotFoundException": newErrorTableNotFoundException, + "TransactionCanceledException": newErrorTransactionCanceledException, + "TransactionConflictException": newErrorTransactionConflictException, + "TransactionInProgressException": newErrorTransactionInProgressException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go new file mode 100644 index 00000000000..ce0ed74469f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -0,0 +1,112 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/crr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// DynamoDB provides the API operation methods for making requests to +// Amazon DynamoDB. See this package's package overview docs +// for details on the service. +// +// DynamoDB methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type DynamoDB struct { + *client.Client + endpointCache *crr.EndpointCache +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "dynamodb" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "DynamoDB" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the DynamoDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a DynamoDB client from just a session. +// svc := dynamodb.New(mySession) +// +// // Create a DynamoDB client with additional configuration +// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *DynamoDB { + svc := &DynamoDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2012-08-10", + ResolvedRegion: resolvedRegion, + JSONVersion: "1.0", + TargetPrefix: "DynamoDB_20120810", + }, + handlers, + ), + } + svc.endpointCache = crr.NewEndpointCache(10) + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDB operation and runs any +// custom request initialization. +func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go new file mode 100644 index 00000000000..ae515f7de5b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilTableExists uses the DynamoDB API operation +// DescribeTable to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { + return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTableExists", + MaxAttempts: 25, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Table.TableStatus", + Expected: "ACTIVE", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTableInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTableRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilTableNotExists uses the DynamoDB API operation +// DescribeTable to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { + return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTableNotExists", + MaxAttempts: 25, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTableInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTableRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4c82195c0b9..352accc6d1b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -117,6 +117,7 @@ github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/processcreds github.com/aws/aws-sdk-go/aws/credentials/ssocreds github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/crr github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults github.com/aws/aws-sdk-go/aws/ec2metadata @@ -141,6 +142,8 @@ github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restjson github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil +github.com/aws/aws-sdk-go/service/dynamodb +github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface github.com/aws/aws-sdk-go/service/sns github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface From baf9bb1bf7a3a34e6f8fcbb8fae3bec3a9bfdeed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Jan 2023 23:31:44 -0800 Subject: [PATCH 051/133] Bump github.com/dustin/go-humanize from 1.0.0 to 1.0.1 (#5098) Bumps [github.com/dustin/go-humanize](https://github.com/dustin/go-humanize) from 1.0.0 to 1.0.1. - [Release notes](https://github.com/dustin/go-humanize/releases) - [Commits](https://github.com/dustin/go-humanize/compare/v1.0.0...v1.0.1) --- updated-dependencies: - dependency-name: github.com/dustin/go-humanize dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 3 ++- .../github.com/dustin/go-humanize/.travis.yml | 16 +++++++-------- .../dustin/go-humanize/README.markdown | 2 +- .../github.com/dustin/go-humanize/bigbytes.go | 20 +++++++++++++++++-- .../github.com/dustin/go-humanize/commaf.go | 1 + vendor/github.com/dustin/go-humanize/ftoa.go | 3 +++ .../github.com/dustin/go-humanize/number.go | 2 +- vendor/github.com/dustin/go-humanize/si.go | 4 ++++ vendor/modules.txt | 4 ++-- 10 files changed, 41 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 85abbfc07a1..99f4e1ca8ec 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/aws/aws-sdk-go v1.44.156 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 - github.com/dustin/go-humanize v1.0.0 + github.com/dustin/go-humanize v1.0.1 github.com/efficientgo/core v1.0.0-rc.2 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/felixge/fgprof v0.9.3 diff --git a/go.sum b/go.sum index 856173871a5..9941729d3f1 100644 --- a/go.sum +++ b/go.sum @@ -501,8 +501,9 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml index ba95cdd15c3..ac12e485a15 100644 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -1,12 +1,12 @@ sudo: false language: go +go_import_path: github.com/dustin/go-humanize go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - stable - master matrix: allow_failures: @@ -15,7 +15,7 @@ matrix: install: - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). script: - - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . + - go vet . + - go install -v -race ./... - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown index 91b4ae56464..7d0b16b34f5 100644 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -5,7 +5,7 @@ Just a few functions for helping humanize times and sizes. `go get` it as `github.com/dustin/go-humanize`, import it as `"github.com/dustin/go-humanize"`, use it as `humanize`. -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for complete documentation. ## Sizes diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go index 1a2bf617239..3b015fd59ec 100644 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -28,6 +28,10 @@ var ( BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) // BigYiByte is 1,024 z bytes in bit.Ints BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) + // BigRiByte is 1,024 y bytes in bit.Ints + BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp) + // BigQiByte is 1,024 r bytes in bit.Ints + BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp) ) var ( @@ -51,6 +55,10 @@ var ( BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) // BigYByte is 1,000 SI z bytes in big.Ints BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) + // BigRByte is 1,000 SI y bytes in big.Ints + BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp) + // BigQByte is 1,000 SI r bytes in big.Ints + BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp) ) var bigBytesSizeTable = map[string]*big.Int{ @@ -71,6 +79,10 @@ var bigBytesSizeTable = map[string]*big.Int{ "zb": BigZByte, "yib": BigYiByte, "yb": BigYByte, + "rib": BigRiByte, + "rb": BigRByte, + "qib": BigQiByte, + "qb": BigQByte, // Without suffix "": BigByte, "ki": BigKiByte, @@ -89,6 +101,10 @@ var bigBytesSizeTable = map[string]*big.Int{ "zi": BigZiByte, "y": BigYByte, "yi": BigYiByte, + "r": BigRByte, + "ri": BigRiByte, + "q": BigQByte, + "qi": BigQiByte, } var ten = big.NewInt(10) @@ -115,7 +131,7 @@ func humanateBigBytes(s, base *big.Int, sizes []string) string { // // BigBytes(82854982) -> 83 MB func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"} return humanateBigBytes(s, bigSIExp, sizes) } @@ -125,7 +141,7 @@ func BigBytes(s *big.Int) string { // // BigIBytes(82854982) -> 79 MiB func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"} return humanateBigBytes(s, bigIECExp, sizes) } diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go index 620690dec7d..2bc83a03cf8 100644 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -1,3 +1,4 @@ +//go:build go1.6 // +build go1.6 package humanize diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go index 1c62b640d47..bce923f371a 100644 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -6,6 +6,9 @@ import ( ) func stripTrailingZeros(s string) string { + if !strings.ContainsRune(s, '.') { + return s + } offset := len(s) - 1 for offset > 0 { if s[offset] == '.' { diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go index dec61865996..6470d0d47a8 100644 --- a/vendor/github.com/dustin/go-humanize/number.go +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -73,7 +73,7 @@ func FormatFloat(format string, n float64) string { if n > math.MaxFloat64 { return "Infinity" } - if n < -math.MaxFloat64 { + if n < (0.0 - math.MaxFloat64) { return "-Infinity" } diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go index ae659e0e497..8b85019849a 100644 --- a/vendor/github.com/dustin/go-humanize/si.go +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -8,6 +8,8 @@ import ( ) var siPrefixTable = map[float64]string{ + -30: "q", // quecto + -27: "r", // ronto -24: "y", // yocto -21: "z", // zepto -18: "a", // atto @@ -25,6 +27,8 @@ var siPrefixTable = map[float64]string{ 18: "E", // exa 21: "Z", // zetta 24: "Y", // yotta + 27: "R", // ronna + 30: "Q", // quetta } var revSIPrefixTable = revfmap(siPrefixTable) diff --git a/vendor/modules.txt b/vendor/modules.txt index 352accc6d1b..78e1e27584d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -262,8 +262,8 @@ github.com/dgryski/go-rendezvous # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/dustin/go-humanize v1.0.0 -## explicit +# github.com/dustin/go-humanize v1.0.1 +## explicit; go 1.16 github.com/dustin/go-humanize # github.com/edsrzf/mmap-go v1.1.0 ## explicit; go 1.17 From 6e842ad42de7cd0f7477ea13ebb03b7f387c0593 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Jan 2023 09:26:21 -0800 Subject: [PATCH 052/133] Bump golang.org/x/net from 0.4.0 to 0.5.0 (#5083) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.4.0 to 0.5.0. - [Release notes](https://github.com/golang/net/releases) - [Commits](https://github.com/golang/net/compare/v0.4.0...v0.5.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 6 +- go.sum | 14 +- vendor/golang.org/x/net/http2/flow.go | 88 +- vendor/golang.org/x/net/http2/server.go | 85 +- vendor/golang.org/x/net/http2/transport.go | 86 +- .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 44 +- vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 54 ++ vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- vendor/golang.org/x/sys/unix/ioctl.go | 4 +- vendor/golang.org/x/sys/unix/mkall.sh | 4 +- .../x/sys/unix/syscall_dragonfly.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 1 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 22 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + vendor/golang.org/x/sys/unix/syscall_linux.go | 50 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 15 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + .../x/sys/unix/syscall_openbsd_libc.go | 4 +- .../golang.org/x/sys/unix/syscall_solaris.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 55 +- .../x/sys/unix/zerrors_openbsd_386.go | 356 ++++++-- .../x/sys/unix/zerrors_openbsd_amd64.go | 189 +++- .../x/sys/unix/zerrors_openbsd_arm.go | 348 ++++++-- .../x/sys/unix/zerrors_openbsd_arm64.go | 160 +++- .../x/sys/unix/zerrors_openbsd_mips64.go | 95 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_386.go | 10 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 10 + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 10 + .../x/sys/unix/zsyscall_netbsd_386.go | 10 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 10 + .../x/sys/unix/zsyscall_openbsd_386.go | 14 + .../x/sys/unix/zsyscall_openbsd_386.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_amd64.go | 14 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_arm.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_arm64.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 137 +-- .../x/sys/unix/zsyscall_openbsd_mips64.go | 812 +++++++++++++++--- .../x/sys/unix/zsyscall_openbsd_mips64.s | 669 +++++++++++++++ .../x/sys/unix/zsyscall_openbsd_ppc64.go | 14 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 14 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 137 +-- .../x/sys/unix/zsyscall_solaris_amd64.go | 13 + .../x/sys/unix/zsysctl_openbsd_386.go | 51 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 51 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +- .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + .../x/sys/unix/ztypes_netbsd_386.go | 84 ++ .../x/sys/unix/ztypes_netbsd_amd64.go | 84 ++ .../x/sys/unix/ztypes_netbsd_arm.go | 84 ++ .../x/sys/unix/ztypes_netbsd_arm64.go | 84 ++ .../x/sys/unix/ztypes_openbsd_386.go | 97 +-- .../x/sys/unix/ztypes_openbsd_amd64.go | 33 +- .../x/sys/unix/ztypes_openbsd_arm.go | 9 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 9 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 9 +- vendor/modules.txt | 6 +- 68 files changed, 3472 insertions(+), 1293 deletions(-) create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s diff --git a/go.mod b/go.mod index 99f4e1ca8ec..fd67ba18a11 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,7 @@ require ( go.opentelemetry.io/otel/sdk v1.11.2 go.opentelemetry.io/otel/trace v1.11.2 go.uber.org/atomic v1.10.0 - golang.org/x/net v0.4.0 + golang.org/x/net v0.5.0 golang.org/x/sync v0.1.0 golang.org/x/time v0.3.0 google.golang.org/grpc v1.51.0 @@ -206,8 +206,8 @@ require ( golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/oauth2 v0.1.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect golang.org/x/tools v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect diff --git a/go.sum b/go.sum index 9941729d3f1..fe3c24cd95d 100644 --- a/go.sum +++ b/go.sum @@ -1853,8 +1853,8 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2044,15 +2044,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= +golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2063,8 +2063,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index b51f0e0cf1f..750ac52f2a5 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -6,23 +6,91 @@ package http2 -// flow is the flow control window's size. -type flow struct { +// inflowMinRefresh is the minimum number of bytes we'll send for a +// flow control window update. +const inflowMinRefresh = 4 << 10 + +// inflow accounts for an inbound flow control window. +// It tracks both the latest window sent to the peer (used for enforcement) +// and the accumulated unsent window. +type inflow struct { + avail int32 + unsent int32 +} + +// set sets the initial window. +func (f *inflow) init(n int32) { + f.avail = n +} + +// add adds n bytes to the window, with a maximum window size of max, +// indicating that the peer can now send us more data. +// For example, the user read from a {Request,Response} body and consumed +// some of the buffered data, so the peer can now send more. +// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer. +// Window updates are accumulated and sent when the unsent capacity +// is at least inflowMinRefresh or will at least double the peer's available window. +func (f *inflow) add(n int) (connAdd int32) { + if n < 0 { + panic("negative update") + } + unsent := int64(f.unsent) + int64(n) + // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets." + // RFC 7540 Section 6.9.1. + const maxWindow = 1<<31 - 1 + if unsent+int64(f.avail) > maxWindow { + panic("flow control update exceeds maximum window size") + } + f.unsent = int32(unsent) + if f.unsent < inflowMinRefresh && f.unsent < f.avail { + // If there aren't at least inflowMinRefresh bytes of window to send, + // and this update won't at least double the window, buffer the update for later. + return 0 + } + f.avail += f.unsent + f.unsent = 0 + return int32(unsent) +} + +// take attempts to take n bytes from the peer's flow control window. +// It reports whether the window has available capacity. +func (f *inflow) take(n uint32) bool { + if n > uint32(f.avail) { + return false + } + f.avail -= int32(n) + return true +} + +// takeInflows attempts to take n bytes from two inflows, +// typically connection-level and stream-level flows. +// It reports whether both windows have available capacity. +func takeInflows(f1, f2 *inflow, n uint32) bool { + if n > uint32(f1.avail) || n > uint32(f2.avail) { + return false + } + f1.avail -= int32(n) + f2.avail -= int32(n) + return true +} + +// outflow is the outbound flow control window's size. +type outflow struct { _ incomparable // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. + // An outflow is kept both on a conn and a per-stream. n int32 - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow + // conn points to the shared connection-level outflow that is + // shared by all streams on that conn. It is nil for the outflow // that's on the conn directly. - conn *flow + conn *outflow } -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } +func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf } -func (f *flow) available() int32 { +func (f *outflow) available() int32 { n := f.n if f.conn != nil && f.conn.n < n { n = f.conn.n @@ -30,7 +98,7 @@ func (f *flow) available() int32 { return n } -func (f *flow) take(n int32) { +func (f *outflow) take(n int32) { if n > f.available() { panic("internal error: took too much") } @@ -42,7 +110,7 @@ func (f *flow) take(n int32) { // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { +func (f *outflow) add(n int32) bool { sum := f.n + n if (sum > n) == (f.n > 0) { f.n = sum diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 4eb7617fa0d..b624dc0a705 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -448,7 +448,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // configured value for inflow, that will be updated when we send a // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) + sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) @@ -563,8 +563,8 @@ type serverConn struct { wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan bodyReadMsg // from handlers -> serve serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control + flow outflow // conn-wide (not stream-specific) outbound flow control + inflow inflow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler @@ -641,10 +641,10 @@ type stream struct { cancelCtx func() // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow outflow // limits writing from Handler to client + inflow inflow // what the client is allowed to POST/etc to us state streamState resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen @@ -1503,7 +1503,7 @@ func (sc *serverConn) processFrame(f Frame) error { if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) { if f, ok := f.(*DataFrame); ok { - if sc.inflow.available() < int32(f.Length) { + if !sc.inflow.take(f.Length) { return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl)) } sc.sendWindowUpdate(nil, int(f.Length)) // conn-level @@ -1775,14 +1775,9 @@ func (sc *serverConn) processData(f *DataFrame) error { // But still enforce their connection-level flow control, // and return any flow control bytes since we're not going // to consume them. - if sc.inflow.available() < int32(f.Length) { + if !sc.inflow.take(f.Length) { return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) sc.sendWindowUpdate(nil, int(f.Length)) // conn-level if st != nil && st.resetQueued { @@ -1797,10 +1792,9 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - if sc.inflow.available() < int32(f.Length) { + if !sc.inflow.take(f.Length) { return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) } - sc.inflow.take(int32(f.Length)) sc.sendWindowUpdate(nil, int(f.Length)) // conn-level st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) @@ -1811,10 +1805,9 @@ func (sc *serverConn) processData(f *DataFrame) error { } if f.Length > 0 { // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { + if !takeInflows(&sc.inflow, &st.inflow, f.Length) { return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl)) } - st.inflow.take(int32(f.Length)) if len(data) > 0 { wrote, err := st.body.Write(data) @@ -1830,10 +1823,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // Return any padded flow control now, since we won't // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } + // Call sendWindowUpdate even if there is no padding, + // to return buffered flow control credit if the sent + // window has shrunk. + pad := int32(f.Length) - int32(len(data)) + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) } if f.StreamEnded() { st.endStream() @@ -2105,8 +2100,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout != 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2388,47 +2382,28 @@ func (sc *serverConn) noteBodyRead(st *stream, n int) { } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n > maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.sendWindowUpdate(st, int(n)) } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } var streamID uint32 - if st != nil { + var send int32 + if st == nil { + send = sc.inflow.add(n) + } else { streamID = st.id + send = st.inflow.add(n) + } + if send == 0 { + return } sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + write: writeWindowUpdate{streamID: streamID, n: uint32(send)}, stream: st, }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } } // requestBody is the Handler's Request.Body type. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 30f706e6cb8..b43ec10cfed 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -47,10 +47,6 @@ const ( // we buffer per stream. transportDefaultStreamFlow = 4 << 20 - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - defaultUserAgent = "Go-http-client/2.0" // initialMaxConcurrentStreams is a connections maxConcurrentStreams until @@ -310,8 +306,8 @@ type ClientConn struct { mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool @@ -376,10 +372,10 @@ type clientStream struct { respHeaderRecv chan struct{} // closed when headers are received res *http.Response // set if respHeaderRecv is closed - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read + flow outflow // guarded by cc.mu + inflow inflow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read reqBody io.ReadCloser reqBodyContentLength int64 // -1 means unknown @@ -811,7 +807,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.inflow.init(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -2073,8 +2069,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) + cs.inflow.init(transportDefaultStreamFlow) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2533,21 +2528,10 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { } cc.mu.Lock() - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } + connAdd := cc.inflow.add(n) + var streamAdd int32 if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } + streamAdd = cs.inflow.add(n) } cc.mu.Unlock() @@ -2575,17 +2559,15 @@ func (b transportResponseBody) Close() error { if unread > 0 { cc.mu.Lock() // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - } + connAdd := cc.inflow.add(unread) cc.mu.Unlock() // TODO(dneil): Acquiring this mutex can block indefinitely. // Move flow control return to a goroutine? cc.wmu.Lock() // Return connection-level flow control. - if unread > 0 { - cc.fr.WriteWindowUpdate(0, uint32(unread)) + if connAdd > 0 { + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) } cc.bw.Flush() cc.wmu.Unlock() @@ -2628,13 +2610,18 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { // But at least return their flow control: if f.Length > 0 { cc.mu.Lock() - cc.inflow.add(int32(f.Length)) + ok := cc.inflow.take(f.Length) + connAdd := cc.inflow.add(int(f.Length)) cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() + if !ok { + return ConnectionError(ErrCodeFlowControl) + } + if connAdd > 0 { + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) + cc.bw.Flush() + cc.wmu.Unlock() + } } return nil } @@ -2665,9 +2652,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } // Check connection-level flow control. cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { + if !takeInflows(&cc.inflow, &cs.inflow, f.Length) { cc.mu.Unlock() return ConnectionError(ErrCodeFlowControl) } @@ -2689,19 +2674,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } } - if refund > 0 { - cc.inflow.add(int32(refund)) - if !didReset { - cs.inflow.add(int32(refund)) - } + sendConn := cc.inflow.add(refund) + var sendStream int32 + if !didReset { + sendStream = cs.inflow.add(refund) } cc.mu.Unlock() - if refund > 0 { + if sendConn > 0 || sendStream > 0 { cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(refund)) - if !didReset { - cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + if sendConn > 0 { + cc.fr.WriteWindowUpdate(0, uint32(sendConn)) + } + if sendStream > 0 { + cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream)) } cc.bw.Flush() cc.wmu.Unlock() diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 79a38a0b9bc..a968b80fa6a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -4,6 +4,11 @@ package cpu +import ( + "strings" + "syscall" +) + // HWCAP/HWCAP2 bits. These are exposed by Linux. const ( hwcap_FP = 1 << 0 @@ -32,10 +37,45 @@ const ( hwcap_ASIMDFHM = 1 << 23 ) +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + func doinit() { if err := readHWCAP(); err != nil { - // failed to read /proc/self/auxv, try reading registers directly - readARM64Registers() + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } return } diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 00000000000..762b63d6882 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 00000000000..d87bd6b3eb0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 +// +build linux,arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 0dee23222ca..b06f52d748f 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gccgo && !aix -// +build gccgo,!aix +//go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 2cb1fefac64..c4fce0e7003 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo -// +build !aix +// +build gccgo,!hurd +// +build !aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 6c7ad052e6b..1c51b0ec2bc 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 727cba21270..8e3947c3686 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -174,10 +174,10 @@ openbsd_arm64) mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_mips64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 61c0d0de15d..a41111a794e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -255,6 +255,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index de7c23e0648..d50b9dc250b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -319,6 +319,7 @@ func PtraceSingleStep(pid int) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go new file mode 100644 index 00000000000..4ffb64808d7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -0,0 +1,22 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build hurd +// +build hurd + +package unix + +/* +#include +int ioctl(int, unsigned long int, uintptr_t); +*/ +import "C" + +func ioctl(fd int, req uint, arg uintptr) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go new file mode 100644 index 00000000000..7cf54a3e4f1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 && hurd +// +build 386,hurd + +package unix + +const ( + TIOCGETA = 0x62251713 +) + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index c5a98440eca..d839962e663 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1973,36 +1973,46 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 //sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 -func bytes2iovec(bs [][]byte) []Iovec { - iovecs := make([]Iovec, len(bs)) - for i, b := range bs { - iovecs[i].SetLen(len(b)) +// minIovec is the size of the small initial allocation used by +// Readv, Writev, etc. +// +// This small allocation gets stack allocated, which lets the +// common use case of len(iovs) <= minIovs avoid more expensive +// heap allocations. +const minIovec = 8 + +// appendBytes converts bs to Iovecs and appends them to vecs. +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) if len(b) > 0 { - iovecs[i].Base = &b[0] + v.Base = &b[0] } else { - iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) + v.Base = (*byte)(unsafe.Pointer(&_zero)) } + vecs = append(vecs, v) } - return iovecs + return vecs } -// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit -// systems, hi will always be 0. On 32-bit systems, offs will be split in half. -// preadv/pwritev chose this calling convention so they don't need to add a -// padding-register for alignment on ARM. +// offs2lohi splits offs into its low and high order bits. func offs2lohi(offs int64) (lo, hi uintptr) { - return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) + const longBits = SizeofLong * 8 + return uintptr(offs), uintptr(uint64(offs) >> longBits) } func Readv(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) readvRacedetect(iovecs, n, err) return n, err } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv(fd, iovecs, lo, hi) readvRacedetect(iovecs, n, err) @@ -2010,7 +2020,8 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv2(fd, iovecs, lo, hi, flags) readvRacedetect(iovecs, n, err) @@ -2037,7 +2048,8 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2047,7 +2059,8 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2058,7 +2071,8 @@ func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 666f0a1b33d..35a3ad758f5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,6 +110,20 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } +func SysctlUvmexp(name string) (*Uvmexp, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofUvmexp) + var u Uvmexp + if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil { + return nil, err + } + return &u, nil +} + func Pipe(p []int) (err error) { return Pipe2(p, 0) } @@ -245,6 +259,7 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 78daceb338b..9b67b908e5f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -220,6 +220,7 @@ func Uname(uname *Utsname) error { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index e23c5394eff..04aa43f41b2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build openbsd && !mips64 -// +build openbsd,!mips64 +//go:build openbsd +// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 2109e569cce..07ac56109a0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -590,6 +590,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Creat(path string, mode uint32) (fd int, err error) //sys Dup(fd int) (nfd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 00bafda8654..a386f8897df 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -331,6 +331,19 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +// Recvmsg receives a message from a socket using the recvmsg system call. The +// received non-control data will be written to p, and any "out of band" +// control data will be written to oob. The flags are passed to recvmsg. +// +// The results are: +// - n is the number of non-control data bytes read into p +// - oobn is the number of control data bytes read into oob; this may be interpreted using [ParseSocketControlMessage] +// - recvflags is flags returned by recvmsg +// - from is the address of the sender +// +// If the underlying socket type is not SOCK_DGRAM, a received message +// containing oob data and a single '\0' of non-control data is treated as if +// the message contained only control data, i.e. n will be zero on return. func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var iov [1]Iovec if len(p) > 0 { @@ -346,13 +359,9 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from return } -// RecvmsgBuffers receives a message from a socket using the recvmsg -// system call. The flags are passed to recvmsg. Any non-control data -// read is scattered into the buffers slices. The results are: -// - n is the number of non-control data read into bufs -// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage] -// - recvflags is flags returned by recvmsg -// - from is the address of the sender +// RecvmsgBuffers receives a message from a socket using the recvmsg system +// call. This function is equivalent to Recvmsg, but non-control data read is +// scattered into the buffers slices. func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { iov := make([]Iovec, len(buffers)) for i := range buffers { @@ -371,11 +380,38 @@ func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn in return } +// Sendmsg sends a message on a socket to an address using the sendmsg system +// call. This function is equivalent to SendmsgN, but does not return the +// number of bytes actually sent. func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { _, err = SendmsgN(fd, p, oob, to, flags) return } +// SendmsgN sends a message on a socket to an address using the sendmsg system +// call. p contains the non-control data to send, and oob contains the "out of +// band" control data. The flags are passed to sendmsg. The number of +// non-control bytes actually written to the socket is returned. +// +// Some socket types do not support sending control data without accompanying +// non-control data. If p is empty, and oob contains control data, and the +// underlying socket type is not SOCK_DGRAM, p will be treated as containing a +// single '\0' and the return value will indicate zero bytes sent. +// +// The Go function Recvmsg, if called with an empty p and a non-empty oob, +// will read and ignore this additional '\0'. If the message is received by +// code that does not use Recvmsg, or that does not use Go at all, that code +// will need to be written to expect and ignore the additional '\0'. +// +// If you need to send non-empty oob with p actually empty, and if the +// underlying socket type supports it, you can do so via a raw system call as +// follows: +// +// msg := &unix.Msghdr{ +// Control: &oob[0], +// } +// msg.SetControllen(len(oob)) +// n, _, errno := unix.Syscall(unix.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), flags) func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { var iov [1]Iovec if len(p) > 0 { @@ -394,9 +430,8 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } // SendmsgBuffers sends a message on a socket to an address using the sendmsg -// system call. The flags are passed to sendmsg. Any non-control data written -// is gathered from buffers. The function returns the number of bytes written -// to the socket. +// system call. This function is equivalent to SendmsgN, but the non-control +// data is gathered from buffers. func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { iov := make([]Iovec, len(buffers)) for i := range buffers { diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 6d56edc05ac..af20e474b38 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xccc84404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xccc8441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xccc84407 + DIOCGETRULES = 0xccc84406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc084444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0844450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,7 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 @@ -460,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -471,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -605,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -695,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -729,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -762,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -787,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -826,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -865,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -900,10 +1019,11 @@ const ( MAP_INHERIT_COPY = 0x1 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 + MAP_RENAME = 0x0 MAP_SHARED = 0x1 MAP_STACK = 0x4000 MAP_TRYFIXED = 0x0 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,13 +1050,29 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -946,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -953,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -968,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -977,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1015,7 +1160,6 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 - PT_MASK = 0x3ff000 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,49 +1197,57 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x10f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 - RTF_MASK = 0x80 + RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 RTF_PROTO3 = 0x2000 RTF_REJECT = 0x8 - RTF_SOURCE = 0x20000 RTF_STATIC = 0x800 - RTF_TUNNEL = 0x100000 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x805c693c + SIOCBRDGADDL = 0x805c6949 + SIOCBRDGADDS = 0x805c6941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x805c693d + SIOCBRDGDELS = 0x805c6942 + SIOCBRDGFLUSH = 0x805c6948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc05c693e SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc03c6958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc05c6942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x805c6955 + SIOCBRDGSIFFLGS = 0x805c693f + SIOCBRDGSIFPRIO = 0x805c6954 + SIOCBRDGSIFPROT = 0x805c694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,40 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1209,25 +1387,37 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 @@ -1238,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1245,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1258,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1287,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1298,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1357,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1378,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1390,8 +1619,8 @@ const ( WCONTINUED = 0x8 WCOREFLAG = 0x80 WNOHANG = 0x1 - WSTOPPED = 0x7f WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1405,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1431,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1459,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1472,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1568,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1624,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1638,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1665,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 25cb6094813..6015fcb2bf6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -109,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -137,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -177,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -240,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -292,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -323,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -351,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -441,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -466,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -732,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -797,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -906,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -970,12 +1051,26 @@ const ( MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -988,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -996,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1013,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1130,9 +1228,11 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 @@ -1140,7 +1240,6 @@ const ( RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe RTM_INVALIDATE = 0x11 - RTM_LOCK = 0x8 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 @@ -1148,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1166,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1182,35 +1284,37 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80286989 SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a @@ -1229,6 +1333,7 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc028698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d SIOCGIFGMEMB = 0xc028698a SIOCGIFGROUP = 0xc0286988 SIOCGIFHARDMTU = 0xc02069a5 @@ -1243,13 +1348,21 @@ const ( SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e SIOCGLIFPHYADDR = 0xc218694b SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 @@ -1287,19 +1400,20 @@ const ( SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1314,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1321,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1370,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1379,8 +1506,11 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCHKVERAUTH = 0x2000741e @@ -1445,7 +1575,6 @@ const ( TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 @@ -1467,7 +1596,8 @@ const ( VMIN = 0x10 VM_ANONMIN = 0x7 VM_LOADAVG = 0x2 - VM_MAXID = 0xc + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd VM_MAXSLP = 0xa VM_METER = 0x1 VM_NKMEMPAGES = 0x6 @@ -1745,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1772,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index aef6c085609..8d44955e44d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -82,7 +83,7 @@ const ( BIOCGFILDROP = 0x40044278 BIOCGHDRCMPLT = 0x40044274 BIOCGRSIG = 0x40044273 - BIOCGRTIMEOUT = 0x400c426e + BIOCGRTIMEOUT = 0x4010426e BIOCGSTATS = 0x4008426f BIOCIMMEDIATE = 0x80044270 BIOCLOCK = 0x20004276 @@ -96,7 +97,7 @@ const ( BIOCSFILDROP = 0x80044279 BIOCSHDRCMPLT = 0x80044275 BIOCSRSIG = 0x80044272 - BIOCSRTIMEOUT = 0x800c426d + BIOCSRTIMEOUT = 0x8010426d BIOCVERSION = 0x40044271 BPF_A = 0x10 BPF_ABS = 0x20 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xcce04404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcce0441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xcce04407 + DIOCGETRULES = 0xcce04406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,8 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 F_SETFL = 0x4 @@ -459,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -470,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -604,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -694,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -728,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -761,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -786,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -825,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -864,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,12 +1050,27 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -947,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -954,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -969,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -978,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,24 +1197,29 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x70f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 - RTF_MASK = 0x80 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 @@ -1073,23 +1228,26 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc060693e SIOCBRDGGMA = 0xc0146953 - SIOCBRDGGPARAM = 0xc03c6958 + SIOCBRDGGPARAM = 0xc0406958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,41 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1210,26 +1387,36 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 @@ -1241,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1248,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1261,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1290,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1301,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1321,7 +1530,7 @@ const ( TIOCGFLAGS = 0x4004745d TIOCGPGRP = 0x40047477 TIOCGSID = 0x40047463 - TIOCGTSTAMP = 0x400c745b + TIOCGTSTAMP = 0x4010745b TIOCGWINSZ = 0x40087468 TIOCMBIC = 0x8004746b TIOCMBIS = 0x8004746c @@ -1360,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1381,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1394,6 +1620,7 @@ const ( WCOREFLAG = 0x80 WNOHANG = 0x1 WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1407,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1433,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1461,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1474,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1570,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1626,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1640,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1667,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 90de7dfc33a..ae16fe7542a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -180,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -243,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -295,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -326,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -354,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -445,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -470,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -736,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -801,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -910,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -981,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -993,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1001,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1018,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1154,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1172,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1188,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1264,6 +1360,7 @@ const ( SIOCGPWE3CTRLWORD = 0xc02069dc SIOCGPWE3FAT = 0xc02069dd SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be @@ -1310,17 +1407,13 @@ const ( SIOCSPWE3CTRLWORD = 0x802069dc SIOCSPWE3FAT = 0x802069dd SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1335,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1342,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1391,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1400,6 +1506,7 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 TIMER_ABSTIME = 0x1 @@ -1768,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1795,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index f1154ff56f6..03d90fe3550 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -301,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -353,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -413,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -504,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -529,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -795,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -860,6 +873,7 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -970,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -1041,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -1053,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1061,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1078,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1214,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1232,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1248,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1378,11 +1414,6 @@ const ( SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1455,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1833,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1860,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {81920, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 1b6eedfa611..54749f9c5ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -552,6 +552,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 039c4aa06c2..77479d45815 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 0535d3cfdf2..2e966d4d7a6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 1018b522170..d65a7c0fa6e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 3802f4b379a..6f0b97c6db3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 8a2db7da9f3..e1c23b52723 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 4af561a48d8..79f7389963e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3b90e9448ad..fb161f3a263 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 890f4ccd131..4c8ac993a88 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index c79f071fc6a..76dd8ec4fdb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 2925fe0a7b7..caeb807bd4e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 75eb2f5f3f7..087444250c9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 98446d2b954..a05e5f4fff6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 243a6663ce6..5782cd10844 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 8da6791d1e3..b2da8e50cc7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index 9ad116d9fbd..cf310420c94 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 800aab6e3e7..048b2655e6f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 4efeff9abbf..484bb42e0a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 016d959bc66..6f33e37e723 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1504,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1517,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1525,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1538,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1546,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s new file mode 100644 index 00000000000..55af27263ad --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd mips64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index c85de2d9766..330cf7f7ac6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 7c9223b6418..4028255b0d5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -249,6 +249,12 @@ TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_clock_gettime(SB) + RET +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_close(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 8e3e7873f89..5f24de0d9d7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -696,6 +696,20 @@ var libc_chroot_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 7dba789271c..e1fbd4dfa8c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -5,792 +5,665 @@ TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe2(SB) - GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdents(SB) - GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) - GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup3(SB) - GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrtable(SB) - GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifoat(SB) - GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknodat(SB) - GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) - GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresgid(SB) - GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setresuid(SB) - GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) - GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) - GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 91f5a2bde28..78d4a4240e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -38,6 +38,7 @@ import ( //go:cgo_import_dynamic libc_chmod chmod "libc.so" //go:cgo_import_dynamic libc_chown chown "libc.so" //go:cgo_import_dynamic libc_chroot chroot "libc.so" +//go:cgo_import_dynamic libc_clockgettime clockgettime "libc.so" //go:cgo_import_dynamic libc_close close "libc.so" //go:cgo_import_dynamic libc_creat creat "libc.so" //go:cgo_import_dynamic libc_dup dup "libc.so" @@ -177,6 +178,7 @@ import ( //go:linkname procChmod libc_chmod //go:linkname procChown libc_chown //go:linkname procChroot libc_chroot +//go:linkname procClockGettime libc_clockgettime //go:linkname procClose libc_close //go:linkname procCreat libc_creat //go:linkname procDup libc_dup @@ -317,6 +319,7 @@ var ( procChmod, procChown, procChroot, + procClockGettime, procClose, procCreat, procDup, @@ -750,6 +753,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 9e9d0b2a9c4..55e0484719c 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index adecd09667d..d2243cf83f5 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -36,23 +36,29 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.dnsjackport", []_C_int{1, 13}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, @@ -81,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -108,15 +114,19 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, @@ -176,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -252,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 8ea52a4a181..82dc51bd8b5 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index 154b57ae3e2..cbdda1a4ae2 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -44,6 +45,7 @@ var sysctlMib = []mibentry{ {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, {"kern.audio", []_C_int{1, 84}}, @@ -51,6 +53,8 @@ var sysctlMib = []mibentry{ {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, @@ -83,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -110,13 +114,16 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, @@ -179,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -255,7 +261,6 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index d96bb2ba4db..f55eae1a821 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -86,7 +87,6 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, @@ -123,6 +123,7 @@ var sysctlMib = []mibentry{ {"kern.ttycount", []_C_int{1, 57}}, {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index a37f7737563..01c43a01fda 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 2fd2060e617..9bc4c8f9d88 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -491,6 +491,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 6a5a1a8ae55..bb05f655d22 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 84cc8d01e65..db40e3a19c6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -496,6 +496,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index c844e7096ff..11121151ccf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 2ed718ca06a..26eba23b729 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -58,22 +58,22 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Mode uint32 - Dev int32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - X__st_birthtim Timespec + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec } type Statfs_t struct { @@ -98,7 +98,7 @@ type Statfs_t struct { F_mntonname [90]byte F_mntfromname [90]byte F_mntfromspec [90]byte - Pad_cgo_0 [2]byte + _ [2]byte Mount_info [160]byte } @@ -111,13 +111,13 @@ type Flock_t struct { } type Dirent struct { - Fileno uint64 - Off int64 - Reclen uint16 - Type uint8 - Namlen uint8 - X__d_padding [4]uint8 - Name [256]int8 + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 } type Fsid struct { @@ -262,8 +262,8 @@ type FdSet struct { } const ( - SizeofIfMsghdr = 0xec - SizeofIfData = 0xd4 + SizeofIfMsghdr = 0xa0 + SizeofIfData = 0x88 SizeofIfaMsghdr = 0x18 SizeofIfAnnounceMsghdr = 0x1a SizeofRtMsghdr = 0x60 @@ -292,7 +292,7 @@ type IfData struct { Link_state uint8 Mtu uint32 Metric uint32 - Pad uint32 + Rdomain uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 @@ -304,10 +304,10 @@ type IfData struct { Imcasts uint64 Omcasts uint64 Iqdrops uint64 + Oqdrops uint64 Noproto uint64 Capabilities uint32 Lastchange Timeval - Mclpool [7]Mclpool } type IfaMsghdr struct { @@ -368,20 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct { - Grown int32 - Alive uint16 - Hwm uint16 - Cwm uint16 - Lwm uint16 -} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -407,11 +399,14 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index b4fb97ebe65..5a547988698 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -73,7 +73,6 @@ type Stat_t struct { Blksize int32 Flags uint32 Gen uint32 - _ [4]byte _ Timespec } @@ -81,7 +80,6 @@ type Statfs_t struct { F_flags uint32 F_bsize uint32 F_iosize uint32 - _ [4]byte F_blocks uint64 F_bfree uint64 F_bavail int64 @@ -200,10 +198,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen uint32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -311,7 +307,6 @@ type IfData struct { Oqdrops uint64 Noproto uint64 Capabilities uint32 - _ [4]byte Lastchange Timeval } @@ -373,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -395,7 +388,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -411,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 2c4675040ef..be58c4e1ff8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -375,14 +375,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -412,7 +410,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ddee0451470..52338266cb3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index eb13d4e8bfc..605cfdb12b1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/modules.txt b/vendor/modules.txt index 78e1e27584d..4f0e990d725 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1081,7 +1081,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.7.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.4.0 +# golang.org/x/net v0.5.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -1111,7 +1111,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.3.0 +# golang.org/x/sys v0.4.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1119,7 +1119,7 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.5.0 +# golang.org/x/text v0.6.0 ## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/internal From 198599cdf1b206a97781629dc816d0c136f2dfb0 Mon Sep 17 00:00:00 2001 From: Daniel Blando Date: Thu, 26 Jan 2023 08:47:30 -0800 Subject: [PATCH 053/133] Change log level for 4xx on Push API. Do not log on 429 (#5103) * Remove error logs in case API returns 4xx Signed-off-by: Daniel Deluiggi * Update chnagelog Signed-off-by: Daniel Deluiggi * Change to just not log in 429 Signed-off-by: Daniel Deluiggi Signed-off-by: Daniel Deluiggi Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/util/push/push.go | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 656d800b3fd..187630e31c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## master / unreleased * [CHANGE] Alertmanager: Local file disclosure vulnerability in OpsGenie configuration has been fixed. #5045 +* [CHANGE] Distributor/Ingester: Log warn level on push requests when they have status code 4xx. Do not log if status is 429. #5103 * [ENHANCEMENT] Update Go version to 1.19.3. #4988 * [ENHANCEMENT] Querier: limit series query to only ingesters if `start` param is not specified. #4976 * [ENHANCEMENT] Query-frontend/scheduler: add a new limit `frontend.max-outstanding-requests-per-tenant` for configuring queue size per tenant. Started deprecating two flags `-query-scheduler.max-outstanding-requests-per-tenant` and `-querier.max-outstanding-requests-per-tenant`, and change their value default to 0. Now if both the old flag and new flag are specified, the old flag's queue size will be picked. #5005 diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index f2005a4f02b..9cabb395228 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -47,8 +47,10 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F http.Error(w, err.Error(), http.StatusInternalServerError) return } - if resp.GetCode() != 202 { + if resp.GetCode()/100 == 5 { level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) } http.Error(w, string(resp.Body), int(resp.Code)) } From bdbb8d7d72c5fdaafa9ec9a5f0b81d0978e1cf9e Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 26 Jan 2023 16:12:12 -0800 Subject: [PATCH 054/133] include status code in query frontend query stats logs (#5104) Signed-off-by: Ben Ye Signed-off-by: Ben Ye Signed-off-by: Alex Le --- pkg/frontend/transport/handler.go | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 8add8ae82bc..7e6c8298a92 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -147,7 +147,12 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.reportSlowQuery(r, queryString, queryResponseTime) } if f.cfg.QueryStatsEnabled { - f.reportQueryStats(r, queryString, queryResponseTime, stats, err) + statusCode := resp.StatusCode + if err != nil { + statusCode = getStatusCodeFromError(err) + } + + f.reportQueryStats(r, queryString, queryResponseTime, stats, err, statusCode) } if err != nil { @@ -204,7 +209,7 @@ func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, query level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) } -func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, queryResponseTime time.Duration, stats *querier_stats.QueryStats, error error) { +func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, queryResponseTime time.Duration, stats *querier_stats.QueryStats, error error, statusCode int) { tenantIDs, err := tenant.TenantIDs(r.Context()) if err != nil { return @@ -233,6 +238,7 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer "fetched_series_count", numSeries, "fetched_chunks_bytes", numBytes, "fetched_data_bytes", numDataBytes, + "status_code", statusCode, }, stats.LoadExtraFields()...) logMessage = append(logMessage, formatQueryString(queryString)...) @@ -299,3 +305,22 @@ func statsValue(name string, d time.Duration) string { durationInMs := strconv.FormatFloat(float64(d)/float64(time.Millisecond), 'f', -1, 64) return name + ";dur=" + durationInMs } + +// getStatusCodeFromError extracts http status code based on error, similar to how writeError was implemented. +func getStatusCodeFromError(err error) int { + switch err { + case context.Canceled: + return StatusClientClosedRequest + case context.DeadlineExceeded: + return http.StatusGatewayTimeout + default: + if util.IsRequestBodyTooLarge(err) { + return http.StatusRequestEntityTooLarge + } + } + resp, ok := httpgrpc.HTTPResponseFromError(err) + if ok { + return int(resp.Code) + } + return http.StatusInternalServerError +} From 87ca5c5cfca3f04d7fe472663c763cfa7ed9243d Mon Sep 17 00:00:00 2001 From: Bryan Date: Fri, 27 Jan 2023 19:19:22 +0100 Subject: [PATCH 055/133] Rename oltp_endpoint to otlp_endpoint to match opentelemetry spec and lib name (#5068) * rename oltp_endpoint to otlp_endpoint to be consistent with library name and spec Signed-off-by: bha * update changelog Signed-off-by: bha * change BUGFIX to CHANGE Signed-off-by: bha * deprecate instead of remove Signed-off-by: bha * fix errors because I forgot this is golang and not python lol Signed-off-by: sharkymcdongles * improve documentation comment Signed-off-by: sharkymcdongles * remove newline Signed-off-by: sharkymcdongles * hide deprecated config Signed-off-by: sharkymcdongles * remove hidden Signed-off-by: sharkymcdongles --------- Signed-off-by: bha Signed-off-by: sharkymcdongles Signed-off-by: Bryan Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/configuration/config-file-reference.md | 4 ++-- pkg/tracing/tracing.go | 26 +++++++++++++++++---- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 187630e31c2..44ef946c98a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## master / unreleased * [CHANGE] Alertmanager: Local file disclosure vulnerability in OpsGenie configuration has been fixed. #5045 +* [CHANGE] Rename oltp_endpoint to otlp_endpoint to match opentelemetry spec and lib name. #5067 * [CHANGE] Distributor/Ingester: Log warn level on push requests when they have status code 4xx. Do not log if status is 429. #5103 * [ENHANCEMENT] Update Go version to 1.19.3. #4988 * [ENHANCEMENT] Querier: limit series query to only ingesters if `start` param is not specified. #4976 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index ab75686cc23..18e56571ba0 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -4158,8 +4158,8 @@ The `tracing_config` configures backends cortex uses. otel: # otl collector endpoint that the driver will use to send spans. - # CLI flag: -tracing.otel.oltp-endpoint - [oltp_endpoint: | default = ""] + # CLI flag: -tracing.otel.otlp-endpoint + [otlp_endpoint: | default = ""] # enhance/modify traces/propagators for specific exporter. If empty, OTEL # defaults will apply. Supported values are: `awsxray.` diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index c562d771cf1..bfa97e6a06f 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -38,7 +38,8 @@ type Config struct { } type Otel struct { - OltpEndpoint string `yaml:"oltp_endpoint" json:"oltp_endpoint"` + OltpEndpoint string `yaml:"oltp_endpoint" json:"oltp_endpoint" doc:"hidden"` + OtlpEndpoint string `yaml:"otlp_endpoint" json:"otlp_endpoint"` ExporterType string `yaml:"exporter_type" json:"exporter_type"` SampleRatio float64 `yaml:"sample_ratio" json:"sample_ratio"` TLSEnabled bool `yaml:"tls_enabled"` @@ -51,7 +52,8 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { p := "tracing" f.StringVar(&c.Type, p+".type", JaegerType, "Tracing type. OTEL and JAEGER are currently supported. For jaeger `JAEGER_AGENT_HOST` environment variable should also be set. See: https://cortexmetrics.io/docs/guides/tracing .") f.Float64Var(&c.Otel.SampleRatio, p+".otel.sample-ratio", 0.001, "Fraction of traces to be sampled. Fractions >= 1 means sampling if off and everything is traced.") - f.StringVar(&c.Otel.OltpEndpoint, p+".otel.oltp-endpoint", "", "otl collector endpoint that the driver will use to send spans.") + f.StringVar(&c.Otel.OltpEndpoint, p+".otel.oltp-endpoint", "", "DEPRECATED: use otel.otlp-endpoint instead.") + f.StringVar(&c.Otel.OtlpEndpoint, p+".otel.otlp-endpoint", "", "otl collector endpoint that the driver will use to send spans.") f.StringVar(&c.Otel.ExporterType, p+".otel.exporter-type", "", "enhance/modify traces/propagators for specific exporter. If empty, OTEL defaults will apply. Supported values are: `awsxray.`") f.BoolVar(&c.Otel.TLSEnabled, p+".otel.tls-enabled", c.Otel.TLSEnabled, "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.") c.Otel.TLS.RegisterFlagsWithPrefix(p+".otel.tls", f) @@ -60,8 +62,11 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { func (c *Config) Validate() error { switch strings.ToLower(c.Type) { case OtelType: - if c.Otel.OltpEndpoint == "" { - return errors.New("oltp-endpoint must be defined when using otel exporter") + if (c.Otel.OtlpEndpoint == "") && (c.Otel.OltpEndpoint == "") { + return errors.New("otlp-endpoint must be defined when using otel exporter") + } + if len(c.Otel.OltpEndpoint) > 0 { + level.Warn(util_log.Logger).Log("DEPRECATED: otel.oltp-endpoint is deprecated. User otel.otlp-endpoint instead.") } } @@ -83,8 +88,19 @@ func SetupTracing(ctx context.Context, name string, c Config) (func(context.Cont case OtelType: util_log.Logger.Log("msg", "creating otel exporter") + if (len(c.Otel.OtlpEndpoint) > 0) && (len(c.Otel.OltpEndpoint) > 0) { + level.Warn(util_log.Logger).Log("msg", "DEPRECATED: otel.otlp and otel.oltp both set, using otel.otlp because otel.oltp is deprecated") + } + options := []otlptracegrpc.Option{ - otlptracegrpc.WithEndpoint(c.Otel.OltpEndpoint), + otlptracegrpc.WithEndpoint(c.Otel.OtlpEndpoint), + } + + if (c.Otel.OtlpEndpoint == "") && (len(c.Otel.OltpEndpoint) > 0) { + level.Warn(util_log.Logger).Log("msg", "DEPRECATED: otel.oltp is deprecated use otel.otlp") + options = []otlptracegrpc.Option{ + otlptracegrpc.WithEndpoint(c.Otel.OltpEndpoint), + } } if c.Otel.TLSEnabled { From 80f14cb05d11201d9a7ce057906184dca610304e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 28 Jan 2023 22:57:36 -0800 Subject: [PATCH 056/133] Bump github.com/prometheus/common from 0.37.0 to 0.39.0 (#5052) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.37.0 to 0.39.0. - [Release notes](https://github.com/prometheus/common/releases) - [Commits](https://github.com/prometheus/common/compare/v0.37.0...v0.39.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 4 +- go.sum | 8 +- .../prometheus/common/config/config.go | 26 ++++- .../prometheus/common/config/http_config.go | 12 +- .../prometheus/common/expfmt/fuzz.go | 4 +- .../common/expfmt/openmetrics_create.go | 22 ++-- .../bitbucket.org/ww/goautoneg/autoneg.go | 22 ++-- .../prometheus/common/version/info.go | 12 +- .../google/internal/externalaccount/aws.go | 105 ++++++++++++++---- .../externalaccount/basecredentials.go | 4 + vendor/modules.txt | 4 +- 11 files changed, 164 insertions(+), 59 deletions(-) diff --git a/go.mod b/go.mod index fd67ba18a11..44ce9becfa6 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.38.0 + github.com/prometheus/common v0.39.0 github.com/prometheus/prometheus v0.40.7 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 @@ -205,7 +205,7 @@ require ( golang.org/x/crypto v0.1.0 // indirect golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect golang.org/x/mod v0.7.0 // indirect - golang.org/x/oauth2 v0.1.0 // indirect + golang.org/x/oauth2 v0.3.0 // indirect golang.org/x/sys v0.4.0 // indirect golang.org/x/text v0.6.0 // indirect golang.org/x/tools v0.4.0 // indirect diff --git a/go.sum b/go.sum index fe3c24cd95d..9e0c8d1f4d7 100644 --- a/go.sum +++ b/go.sum @@ -1364,8 +1364,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.38.0 h1:VTQitp6mXTdUoCmDMugDVOJ1opi6ADftKfp/yeqTR/E= -github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= @@ -1878,8 +1878,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go index fffda4a7ef4..0b91f20d55a 100644 --- a/vendor/github.com/prometheus/common/config/config.go +++ b/vendor/github.com/prometheus/common/config/config.go @@ -18,6 +18,7 @@ package config import ( "encoding/json" + "net/http" "path/filepath" ) @@ -34,7 +35,7 @@ func (s Secret) MarshalYAML() (interface{}, error) { return nil, nil } -//UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. +// UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain Secret return unmarshal((*plain)(s)) @@ -48,6 +49,29 @@ func (s Secret) MarshalJSON() ([]byte, error) { return json.Marshal(secretToken) } +type Header map[string][]Secret + +func (h *Header) HTTPHeader() http.Header { + if h == nil || *h == nil { + return nil + } + + header := make(http.Header) + + for name, values := range *h { + var s []string + if values != nil { + s = make([]string, 0, len(values)) + for _, value := range values { + s = append(s, string(value)) + } + } + header[name] = s + } + + return header +} + // DirectorySetter is a config type that contains file paths that may // be relative to the file containing the config. type DirectorySetter interface { diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 3ba7f99cc15..3965099972a 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -289,6 +289,11 @@ type HTTPClientConfig struct { BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"` // HTTP proxy server to use to connect to the targets. ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` + // ProxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. Assume that at least _some_ of + // these headers are going to contain secrets and use Secret as the + // value type instead of string. + ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` // TLSConfig to use to connect to the targets. TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` // FollowRedirects specifies whether the client should follow HTTP 3xx redirects. @@ -314,7 +319,8 @@ func (c *HTTPClientConfig) SetDirectory(dir string) { } // Validate validates the HTTPClientConfig to check only one of BearerToken, -// BasicAuth and BearerTokenFile is configured. +// BasicAuth and BearerTokenFile is configured. It also validates that ProxyURL +// is set if ProxyConnectHeader is set. func (c *HTTPClientConfig) Validate() error { // Backwards compatibility with the bearer_token field. if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { @@ -372,6 +378,9 @@ func (c *HTTPClientConfig) Validate() error { return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured") } } + if len(c.ProxyConnectHeader) > 0 && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "") { + return fmt.Errorf("if proxy_connect_header is configured proxy_url must also be configured") + } return nil } @@ -500,6 +509,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // It is applied on request. So we leave out any timings here. var rt http.RoundTripper = &http.Transport{ Proxy: http.ProxyURL(cfg.ProxyURL.URL), + ProxyConnectHeader: cfg.ProxyConnectHeader.HTTPHeader(), MaxIdleConns: 20000, MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 DisableKeepAlives: !opts.keepAlivesEnabled, diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index f819e4f8b54..dfac962a4e7 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -21,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 9d94ae9effe..21cdddcf054 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -46,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c7c..a21b9d15dd8 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 39bcfba6411..b3da48a17d0 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -31,6 +31,8 @@ var ( BuildUser string BuildDate string GoVersion = runtime.Version() + GoOS = runtime.GOOS + GoArch = runtime.GOARCH ) // NewCollector returns a collector that exports metrics about current version @@ -41,7 +43,7 @@ func NewCollector(program string) prometheus.Collector { Namespace: program, Name: "build_info", Help: fmt.Sprintf( - "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.", + "A metric with a constant '1' value labeled by version, revision, branch, goversion from which %s was built, and the goos and goarch for the build.", program, ), ConstLabels: prometheus.Labels{ @@ -49,6 +51,8 @@ func NewCollector(program string) prometheus.Collector { "revision": getRevision(), "branch": Branch, "goversion": GoVersion, + "goos": GoOS, + "goarch": GoArch, }, }, func() float64 { return 1 }, @@ -74,7 +78,7 @@ func Print(program string) string { "buildUser": BuildUser, "buildDate": BuildDate, "goVersion": GoVersion, - "platform": runtime.GOOS + "/" + runtime.GOARCH, + "platform": GoOS + "/" + GoArch, } t := template.Must(template.New("version").Parse(versionInfoTmpl)) @@ -90,7 +94,7 @@ func Info() string { return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision()) } -// BuildContext returns goVersion, buildUser and buildDate information. +// BuildContext returns goVersion, platform, buildUser and buildDate information. func BuildContext() string { - return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate) + return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate) } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index e917195d53a..2bf3202b290 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -62,6 +62,13 @@ const ( // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -267,6 +274,49 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } +func (cs awsCredentialSource) validateMetadataServers() error { + if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { + return err + } + if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { + return err + } + return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") +} + +var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} + +func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { + if metadataUrl == "" { + // Zero value means use default, which is valid. + return true + } + + u, err := url.Parse(metadataUrl) + if err != nil { + // Unparseable URL means invalid + return false + } + + for _, validHostname := range validHostnames { + if u.Hostname() == validHostname { + // If it's one of the valid hostnames, everything is good + return true + } + } + + // hostname not found in our allowlist, so not valid + return false +} + +func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { + if !cs.isValidMetadataServer(metadataUrl) { + return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) + } + + return nil +} + func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -274,16 +324,33 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro return cs.client.Do(req.WithContext(cs.ctx)) } +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func shouldUseMetadataServer() bool { + return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSessionToken, err := cs.getAWSSessionToken() - if err != nil { - return "", err - } - headers := make(map[string]string) - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + if shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } } awsSecurityCredentials, err := cs.getSecurityCredentials(headers) @@ -389,11 +456,11 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { } func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { - if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { - return envAwsRegion, nil - } - if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { - return envAwsRegion, nil + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil } if cs.RegionURL == "" { @@ -434,14 +501,12 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err } func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { - if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { - if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { - return awsSecurityCredentials{ - AccessKeyID: accessKeyID, - SecretAccessKey: secretAccessKey, - SecurityToken: getenv("AWS_SESSION_TOKEN"), - }, nil - } + if canRetrieveSecurityCredentialFromEnvironment() { + return awsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SecurityToken: getenv(awsSessionToken), + }, nil } roleName, err := cs.getMetadataRoleName(headers) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 9fc35535e7f..3eab8df7ced 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -213,6 +213,10 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } + if err := awsCredSource.validateMetadataServers(); err != nil { + return nil, err + } + return awsCredSource, nil } } else if c.CredentialSource.File != "" { diff --git a/vendor/modules.txt b/vendor/modules.txt index 4f0e990d725..954819bd771 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -677,7 +677,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.38.0 +# github.com/prometheus/common v0.39.0 ## explicit; go 1.17 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -1098,7 +1098,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.1.0 +# golang.org/x/oauth2 v0.3.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler From 7d1ed3dbdfc78569eb0f29a2356615b7d89cd13e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 28 Jan 2023 23:27:11 -0800 Subject: [PATCH 057/133] Bump github.com/minio/minio-go/v7 from 7.0.46 to 7.0.47 (#5096) Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.46 to 7.0.47. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.46...v7.0.47) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/minio/minio-go/v7/api.go | 4 +- .../pkg/signer/request-signature-streaming.go | 37 +++++++++++++------ vendor/modules.txt | 2 +- 5 files changed, 32 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 44ce9becfa6..fe039563503 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.14 github.com/lib/pq v1.10.7 - github.com/minio/minio-go/v7 v7.0.46 + github.com/minio/minio-go/v7 v7.0.47 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e diff --git a/go.sum b/go.sum index 9e0c8d1f4d7..91150199ade 100644 --- a/go.sum +++ b/go.sum @@ -1136,8 +1136,8 @@ github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7Xn github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.46 h1:Vo3tNmNXuj7ME5qrvN4iadO7b4mzu/RSFdUkUhaPldk= -github.com/minio/minio-go/v7 v7.0.46/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs= +github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 084b3ccdd95..99a03df958f 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -118,7 +118,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.46" + libraryVersion = "v7.0.47" ) // User Agent should always following the below style. @@ -845,7 +845,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request // Additionally, we also look if the initialized client is secure, // if yes then we don't need to perform streaming signature. req = signer.StreamingSignV4(req, accessKeyID, - secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher()) default: // Set sha256 sum for signature calculation only with signature version '4'. shaHeader := unsignedPayload diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index cf2356fd90f..1c2f1dc9d14 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -26,6 +26,8 @@ import ( "strconv" "strings" "time" + + md5simd "github.com/minio/md5-simd" ) // Reference for constants used below - @@ -90,14 +92,14 @@ func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { // buildChunkStringToSign - returns the string to sign given chunk data // and previous signature. -func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { +func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { stringToSignParts := []string{ streamingPayloadHdr, t.Format(iso8601DateFormat), getScope(region, t, ServiceTypeS3), previousSig, emptySHA256, - hex.EncodeToString(sum256(chunkData)), + chunkChecksum, } return strings.Join(stringToSignParts, "\n") @@ -105,13 +107,13 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ // buildTrailerChunkStringToSign - returns the string to sign given chunk data // and previous signature. -func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { +func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { stringToSignParts := []string{ streamingTrailerHdr, t.Format(iso8601DateFormat), getScope(region, t, ServiceTypeS3), previousSig, - hex.EncodeToString(sum256(chunkData)), + chunkChecksum, } return strings.Join(stringToSignParts, "\n") @@ -148,21 +150,21 @@ func buildChunkHeader(chunkLen int64, signature string) []byte { } // buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildChunkSignature(chunkData []byte, reqTime time.Time, region, +func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region, previousSignature, secretAccessKey string, ) string { chunkStringToSign := buildChunkStringToSign(reqTime, region, - previousSignature, chunkData) + previousSignature, chunkCheckSum) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) return getSignature(signingKey, chunkStringToSign) } // buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region, +func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region, previousSignature, secretAccessKey string, ) string { chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, - previousSignature, chunkData) + previousSignature, chunkChecksum) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) return getSignature(signingKey, chunkStringToSign) } @@ -202,12 +204,17 @@ type StreamingReader struct { totalChunks int lastChunkSize int trailer http.Header + sh256 md5simd.Hasher } // signChunk - signs a chunk read from s.baseReader of chunkLen size. func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { // Compute chunk signature for next header - signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.sh256.Reset() + s.sh256.Write(s.chunkBuf[:chunkLen]) + chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil)) + + signature := buildChunkSignature(chunckChecksum, s.reqTime, s.region, s.prevSignature, s.secretAccessKey) // For next chunk signature computation @@ -239,8 +246,11 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) { s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) } + s.sh256.Reset() + s.sh256.Write(s.chunkBuf) + chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil)) // Compute chunk signature - signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime, + signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime, s.region, s.prevSignature, s.secretAccessKey) // For next chunk signature computation @@ -273,7 +283,7 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { // StreamingSignV4 - provides chunked upload signatureV4 support by // implementing io.Reader. func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time, + region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher, ) *http.Request { // Set headers needed for streaming signature. prepareStreamingRequest(req, sessionToken, dataLen, reqTime) @@ -294,6 +304,7 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok chunkNum: 1, totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, lastChunkSize: int(dataLen % payloadChunkSize), + sh256: sh256, } if len(req.Trailer) > 0 { stReader.trailer = req.Trailer @@ -384,5 +395,9 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { // Close - this method makes underlying io.ReadCloser's Close method available. func (s *StreamingReader) Close() error { + if s.sh256 != nil { + s.sh256.Close() + s.sh256 = nil + } return s.baseReadCloser.Close() } diff --git a/vendor/modules.txt b/vendor/modules.txt index 954819bd771..1bc6de265a9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -556,7 +556,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.46 +# github.com/minio/minio-go/v7 v7.0.47 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials From 79e30177b41b8be0b7b04364143289a5f9b8be21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 14:56:21 -0800 Subject: [PATCH 058/133] Bump github.com/aws/aws-sdk-go from 1.44.156 to 1.44.189 (#5110) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.156 to 1.44.189. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.156...v1.44.189) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 2151 ++++++++++++++--- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../protocol/jsonrpc/unmarshal_error.go | 63 +- .../aws/aws-sdk-go/service/sts/api.go | 17 +- vendor/modules.txt | 2 +- 7 files changed, 1871 insertions(+), 370 deletions(-) diff --git a/go.mod b/go.mod index fe039563503..1973920170a 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alicebob/miniredis/v2 v2.30.0 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.156 + github.com/aws/aws-sdk-go v1.44.189 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.1 diff --git a/go.sum b/go.sum index 91150199ade..f841f040f93 100644 --- a/go.sum +++ b/go.sum @@ -201,8 +201,8 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.156 h1:3RhbBTZ87HoI5OP2JjcKdd5qOnyo9YOAW8+Bb/h0vSE= -github.com/aws/aws-sdk-go v1.44.156/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.189 h1:9PBrjndH1uL5AN8818qI3duhQ4hgkMuLvqkJlg9MRyk= +github.com/aws/aws-sdk-go v1.44.189/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 521b3219800..4cb781fedbd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -27,6 +27,7 @@ const ( ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). EuCentral2RegionID = "eu-central-2" // Europe (Zurich). @@ -172,6 +173,9 @@ var awsPartition = partition{ "ap-southeast-3": region{ Description: "Asia Pacific (Jakarta)", }, + "ap-southeast-4": region{ + Description: "Asia Pacific (Melbourne)", + }, "ca-central-1": region{ Description: "Canada (Central)", }, @@ -261,6 +265,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -432,6 +439,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -935,6 +945,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -1168,6 +1187,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -1733,9 +1760,15 @@ var awsPartition = partition{ }, "api.mediatailor": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1751,6 +1784,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -1831,6 +1867,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2084,6 +2123,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2275,6 +2317,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2481,6 +2526,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3015,6 +3063,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -3535,6 +3586,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3710,6 +3764,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3907,6 +3964,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3997,12 +4057,32 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "cassandra": service{ @@ -4134,6 +4214,43 @@ var awsPartition = partition{ }, }, }, + "cleanrooms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "cloud9": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4233,6 +4350,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4421,6 +4541,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4595,6 +4718,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -4699,6 +4825,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4902,6 +5031,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -5176,6 +5308,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6228,6 +6363,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7379,6 +7517,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7543,6 +7684,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7619,6 +7763,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8127,6 +8274,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8282,159 +8432,171 @@ var awsPartition = partition{ Region: "ap-south-1", }: endpoint{}, endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ Region: "ap-south-1", Variant: dualStackVariant, }: endpoint{ @@ -8452,6 +8614,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8639,6 +8804,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8947,6 +9115,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9297,6 +9468,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -9315,6 +9495,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9441,6 +9630,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-north-1", }: endpoint{ @@ -9459,6 +9657,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-west-1", }: endpoint{ @@ -9651,6 +9858,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9799,6 +10009,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10354,6 +10567,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10505,6 +10721,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11518,6 +11737,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11734,12 +11956,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11749,6 +11977,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -12296,6 +12530,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -12552,12 +12789,18 @@ var awsPartition = partition{ }, "identitystore": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -12567,6 +12810,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12576,12 +12822,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -13846,6 +14101,9 @@ var awsPartition = partition{ }, "kendra": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13914,6 +14172,141 @@ var awsPartition = partition{ }, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "kendra-ranking.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-3.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "kendra-ranking.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "kendra-ranking.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "kendra-ranking.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "kendra-ranking.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "kendra-ranking.us-west-2.api.aws", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13946,6 +14339,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14369,6 +14765,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14878,6 +15292,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ap-southeast-3.api.aws", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -15214,6 +15637,121 @@ var awsPartition = partition{ }, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, "license-manager-user-subscriptions": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15419,6 +15957,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -15603,6 +16144,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -15615,6 +16159,10 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15627,15 +16175,67 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "machinelearning": service{ @@ -15944,6 +16544,9 @@ var awsPartition = partition{ }, "mediaconvert": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -16377,124 +16980,328 @@ var awsPartition = partition{ Region: "eu-west-3", }: endpoint{}, endpointKey{ - Region: "fips", + Region: "fips", + }: endpoint{ + Hostname: "memory-db-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "messaging-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", }: endpoint{ - Hostname: "memory-db-fips.us-west-1.amazonaws.com", + Hostname: "mgn-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "messaging-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, + Region: "fips-us-east-2", }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + Hostname: "mgn-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-east-1-fips", + Region: "fips-us-west-1", }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + Hostname: "mgn-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, Deprecated: boxedTrue, }, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Service: "aws-marketplace", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16505,109 +17312,38 @@ var awsPartition = partition{ Region: "us-east-1", }: endpoint{}, endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mgh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + }, endpointKey{ - Region: "eu-central-1", + Region: "us-east-2", }: endpoint{}, endpointKey{ - Region: "eu-west-1", - }: endpoint{}, + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + }, endpointKey{ - Region: "eu-west-2", + Region: "us-west-1", }: endpoint{}, endpointKey{ - Region: "us-east-1", - }: endpoint{}, + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, - }, - }, - "mgn": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + }, }, }, "migrationhub-orchestrator": service{ @@ -16814,6 +17550,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17010,6 +17749,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17241,6 +17983,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17425,6 +18170,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17477,6 +18225,14 @@ var awsPartition = partition{ }, "oidc": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, endpointKey{ Region: "ap-east-1", }: endpoint{ @@ -17533,6 +18289,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -17621,6 +18385,14 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -18021,6 +18793,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18406,6 +19181,14 @@ var awsPartition = partition{ }, "portal.sso": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, endpointKey{ Region: "ap-east-1", }: endpoint{ @@ -18462,6 +19245,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -18550,6 +19341,14 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -18779,12 +19578,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -18819,6 +19624,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18840,12 +19648,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18967,6 +19781,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18976,6 +19793,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19133,6 +19953,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19534,6 +20357,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -20071,6 +20897,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.ap-southeast-2.api.aws", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -20153,6 +20984,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20162,18 +20996,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -20467,6 +21310,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20689,6 +21535,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20897,6 +21746,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", + }, endpointKey{ Region: "aws-global", }: endpoint{ @@ -21986,6 +22844,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -22197,6 +23058,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22596,6 +23460,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22645,33 +23512,102 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.af-south-1.amazonaws.com", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-east-1.amazonaws.com", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-1.amazonaws.com", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-2.amazonaws.com", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-3.amazonaws.com", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-1.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-3.amazonaws.com", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, @@ -22690,30 +23626,102 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-north-1.amazonaws.com", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-3.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-central-1.amazonaws.com", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-south-1.amazonaws.com", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.sa-east-1.amazonaws.com", + }, endpointKey{ Region: "servicediscovery", }: endpoint{ @@ -22744,6 +23752,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -22762,6 +23776,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -22780,6 +23800,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -22798,6 +23824,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-2.amazonaws.com", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -22870,6 +23902,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -23179,6 +24214,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23188,12 +24229,51 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + }, }, }, "snowball": service{ @@ -23540,6 +24620,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -23688,6 +24771,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -23833,6 +24919,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24010,8 +25099,81 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "ssm-sap": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "sso": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -24033,6 +25195,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24066,6 +25231,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -24103,6 +25271,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24263,12 +25434,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24410,6 +25587,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24502,6 +25682,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "aws-global", }: endpoint{ @@ -24678,6 +25861,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24820,6 +26006,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24962,6 +26151,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -25683,6 +26875,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25699,12 +26906,12 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "fips.voice-chime.us-east-1.amazonaws.com", + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-1-fips", }: endpoint{ - Hostname: "fips.voice-chime.us-east-1.amazonaws.com", + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -25717,12 +26924,12 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "fips.voice-chime.us-west-2.amazonaws.com", + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", }, endpointKey{ Region: "us-west-2-fips", }: endpoint{ - Hostname: "fips.voice-chime.us-west-2.amazonaws.com", + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, @@ -26280,6 +27487,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -26334,6 +27550,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "waf-regional.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -26856,6 +28089,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -26910,6 +28152,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "wafv2.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -27341,6 +28600,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -27947,6 +29209,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28409,6 +29681,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28429,6 +29726,13 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28513,6 +29817,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "monitoring": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -28856,9 +30170,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn", + }, }, }, "sms": service{ @@ -29547,6 +30873,9 @@ var awsusgovPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -30503,9 +31832,24 @@ var awsusgovPartition = partition{ }, "databrew": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + }, }, }, "datasync": service{ @@ -31953,6 +33297,31 @@ var awsusgovPartition = partition{ }, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-west-1.api.aws", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32280,6 +33649,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -32563,6 +33942,16 @@ var awsusgovPartition = partition{ }, }, }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "pinpoint": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -33468,6 +34857,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -33486,6 +34881,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-west-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -33596,9 +34997,24 @@ var awsusgovPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + }, }, }, "snowball": service{ @@ -34880,6 +36296,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "health": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35001,6 +36424,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35255,6 +36685,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, }, @@ -35381,6 +36814,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -35655,6 +37095,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index ee4c2d9e987..9e8856ba5c2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.156" +const SDKVersion = "1.44.189" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go index c0c52e2db0f..9c1ccde54ae 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -13,17 +13,46 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) +const ( + awsQueryError = "x-amzn-query-error" + // A valid header example - "x-amzn-query-error": ";" + awsQueryErrorPartsCount = 2 +) + // UnmarshalTypedError provides unmarshaling errors API response errors // for both typed and untyped errors. type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error + exceptions map[string]func(protocol.ResponseMetadata) error + queryExceptions map[string]func(protocol.ResponseMetadata, string) error } // NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the // set of exception names to the error unmarshalers func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { return &UnmarshalTypedError{ - exceptions: exceptions, + exceptions: exceptions, + queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{}, + } +} + +// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError +// before returning it +func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError { + unmarshaledError := NewUnmarshalTypedError(exceptions) + for _, fn := range optFns { + fn(unmarshaledError) + } + return unmarshaledError +} + +// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions. +// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found. +// See also [awsQueryCompatible trait] +// +// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait +func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) { + return func(typedError *UnmarshalTypedError) { + typedError.queryExceptions = queryExceptions } } @@ -50,18 +79,32 @@ func (u *UnmarshalTypedError) UnmarshalError( code := codeParts[len(codeParts)-1] msg := jsonErr.Message + queryCodeParts := queryCodeParts(resp, u) + if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value + // If query-compatible exceptions are found and query-error-header is found, + // then use associated constructor to get exception with query error code. + // + // If exception code is known, use associated constructor to get a value // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) + var v error + queryErrFn, queryExceptionsFound := u.queryExceptions[code] + if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound { + v = queryErrFn(respMeta, queryCodeParts[0]) + } else { + v = fn(respMeta) + } err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) if err != nil { return nil, err } - return v, nil } + if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 { + code = queryCodeParts[0] + } + // fallback to unmodeled generic exceptions return awserr.NewRequestFailure( awserr.New(code, msg, nil), @@ -70,6 +113,16 @@ func (u *UnmarshalTypedError) UnmarshalError( ), nil } +// A valid header example - "x-amzn-query-error": ";" +func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string { + queryCodeHeader := resp.Header.Get(awsQueryError) + var queryCodeParts []string + if queryCodeHeader != "" && len(u.queryExceptions) > 0 { + queryCodeParts = strings.Split(queryCodeHeader, ";") + } + return queryCodeParts +} + // UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc // protocol request errors var UnmarshalErrorHandler = request.NamedHandler{ diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index c0706bec93d..63729d0a78b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -56,12 +56,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // // Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. -// These temporary credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use AssumeRole within your account -// or for cross-account access. For a comparison of AssumeRole with other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// Amazon Web Services resources. These temporary credentials consist of an +// access key ID, a secret access key, and a security token. Typically, you +// use AssumeRole within your account or for cross-account access. For a comparison +// of AssumeRole with other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // @@ -1103,13 +1102,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // # Permissions // // You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: +// Amazon Web Services service with the following exceptions: // // - You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. +// API. This limitation does not apply to console sessions. // // - You cannot call any STS operations except GetCallerIdentity. // +// You can use temporary credentials for single sign-on (SSO) to the console. +// // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon diff --git a/vendor/modules.txt b/vendor/modules.txt index 1bc6de265a9..89b0234db29 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -103,7 +103,7 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.44.156 +# github.com/aws/aws-sdk-go v1.44.189 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr From 705d5266ca8df46269cc61a45b4be77c768f00cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 Jan 2023 10:04:15 -0800 Subject: [PATCH 059/133] Bump go.opentelemetry.io/otel/sdk from 1.11.2 to 1.12.0 (#5111) Bumps [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) from 1.11.2 to 1.12.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...v1.12.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 6 +- go.sum | 12 +- vendor/go.opentelemetry.io/otel/.lycheeignore | 1 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 138 +- vendor/go.opentelemetry.io/otel/Makefile | 10 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 11 +- .../exporters/otlp/internal/wrappederror.go | 61 + vendor/go.opentelemetry.io/otel/handler.go | 25 +- .../otel/internal/global/internal_logging.go | 30 +- .../otel/sdk/resource/builtin.go | 2 +- .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/process.go | 2 +- .../otel/sdk/trace/provider.go | 15 +- .../otel/sdk/trace/sampling.go | 10 +- .../otel/sdk/trace/span.go | 2 +- .../otel/semconv/v1.17.0/doc.go | 20 + .../otel/semconv/v1.17.0/exception.go | 20 + .../otel/semconv/v1.17.0/http.go | 21 + .../otel/semconv/v1.17.0/resource.go | 1118 ++++++++++ .../otel/semconv/v1.17.0/schema.go | 20 + .../otel/semconv/v1.17.0/trace.go | 1892 +++++++++++++++++ vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 4 +- vendor/modules.txt | 7 +- 26 files changed, 3370 insertions(+), 65 deletions(-) create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go diff --git a/go.mod b/go.mod index 1973920170a..06c92fb8054 100644 --- a/go.mod +++ b/go.mod @@ -59,12 +59,12 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.6 go.etcd.io/etcd/client/v3 v3.5.6 go.opentelemetry.io/contrib/propagators/aws v1.12.0 - go.opentelemetry.io/otel v1.11.2 + go.opentelemetry.io/otel v1.12.0 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 - go.opentelemetry.io/otel/sdk v1.11.2 - go.opentelemetry.io/otel/trace v1.11.2 + go.opentelemetry.io/otel/sdk v1.12.0 + go.opentelemetry.io/otel/trace v1.12.0 go.uber.org/atomic v1.10.0 golang.org/x/net v0.5.0 golang.org/x/sync v0.1.0 diff --git a/go.sum b/go.sum index f841f040f93..0ce80323fbf 100644 --- a/go.sum +++ b/go.sum @@ -1629,8 +1629,8 @@ go.opentelemetry.io/contrib/propagators/ot v1.9.0 h1:+pYoqyFoA3H6EZ7Wie2ZQdqS4Zf go.opentelemetry.io/contrib/propagators/ot v1.9.0/go.mod h1:D2GfaecHHX67fXT93/5iKl2DArjt8+H0XWtFD8b4Z+k= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= +go.opentelemetry.io/otel v1.12.0 h1:IgfC7kqQrRccIKuB7Cl+SRUmsKbEwSGPr0Eu+/ht1SQ= +go.opentelemetry.io/otel v1.12.0/go.mod h1:geaoz0L0r1BEOR81k7/n9W4TCXYCJ7bPO7K374jQHG0= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 h1:jrqVQNRKglMRBUrSf40J5mBYnUyleAKL+g0iuyEyaGA= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1/go.mod h1:zZWeMK8RgtSJddl2Oox9MfjMRgD/HqzuLujCFdF0CZU= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= @@ -1650,14 +1650,14 @@ go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2 go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= -go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= -go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= +go.opentelemetry.io/otel/sdk v1.12.0 h1:8npliVYV7qc0t1FKdpU08eMnOjgPFMnriPhn0HH4q3o= +go.opentelemetry.io/otel/sdk v1.12.0/go.mod h1:WYcvtgquYvgODEvxOry5owO2y9MyciW7JqMz6cpXShE= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/otel/trace v1.12.0 h1:p28in++7Kd0r2d8gSt931O57fdjUyWxkVbESuILAeUc= +go.opentelemetry.io/otel/trace v1.12.0/go.mod h1:pHlgBynn6s25qJ2szD+Bv+iwKJttjHSI3lUAyf0GNuQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 545d634525d..32e4812755e 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -1,3 +1,4 @@ http://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 9f130b8be10..2bfe0a9411b 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,139 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + ## [1.11.2/0.34.0] 2022-12-05 ### Added @@ -58,7 +191,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Prevent duplicate Prometheus description, unit, and type. (#3469) - Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) -## Removed +### Removed - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) @@ -2087,7 +2220,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.12.0...HEAD +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 [1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 [1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 [1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 68cdfef7d96..befb040a77b 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -208,11 +208,11 @@ check-clean-work-tree: SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) - @[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) - @[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) - @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - @$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) + [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: prerelease prerelease: | $(MULTIMOD) diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 71e57625479..77d56c93651 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -6,20 +6,25 @@ New versions of the [OpenTelemetry specification] mean new versions of the `semc The `semconv-generate` make target is used for this. 1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. -2. Run the `make semconv-generate ...` target from this repository. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. For example, ```sh -export TAG="v1.7.0" # Change to the release version you are generating. +export TAG="v1.13.0" # Change to the release version you are generating. export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" -git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" +git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG" +docker pull otel/semconvgen:latest make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. ``` This should create a new sub-package of [`semconv`](./semconv). Ensure things look correct before submitting a pull request to include the addition. +**Note**, the generation code was changed to generate versions >= 1.13. +To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). + ## Pre-Release First, decide which module sets will be released and update their versions diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go new file mode 100644 index 00000000000..217751da552 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +// ErrorKind is used to identify the kind of export error +// being wrapped. +type ErrorKind int + +const ( + // TracesExport indicates the error comes from the OTLP trace exporter. + TracesExport ErrorKind = iota +) + +// prefix returns a prefix for the Error() string. +func (k ErrorKind) prefix() string { + switch k { + case TracesExport: + return "traces export: " + default: + return "unknown: " + } +} + +// wrappedExportError wraps an OTLP exporter error with the kind of +// signal that produced it. +type wrappedExportError struct { + wrap error + kind ErrorKind +} + +// WrapTracesError wraps an error from the OTLP exporter for traces. +func WrapTracesError(err error) error { + return wrappedExportError{ + wrap: err, + kind: TracesExport, + } +} + +var _ error = wrappedExportError{} + +// Error attaches a prefix corresponding to the kind of exporter. +func (t wrappedExportError) Error() string { + return t.kind.prefix() + t.wrap.Error() +} + +// Unwrap returns the wrapped error. +func (t wrappedExportError) Unwrap() error { + return t.wrap +} diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 36cf09f7290..ecd363ab516 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -17,7 +17,8 @@ package otel // import "go.opentelemetry.io/otel" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" ) var ( @@ -34,28 +35,26 @@ var ( ) type delegator struct { - lock *sync.RWMutex - eh ErrorHandler + delegate unsafe.Pointer } func (d *delegator) Handle(err error) { - d.lock.RLock() - defer d.lock.RUnlock() - d.eh.Handle(err) + d.getDelegate().Handle(err) +} + +func (d *delegator) getDelegate() ErrorHandler { + return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) } // setDelegate sets the ErrorHandler delegate. func (d *delegator) setDelegate(eh ErrorHandler) { - d.lock.Lock() - defer d.lock.Unlock() - d.eh = eh + atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) } func defaultErrorHandler() *delegator { - return &delegator{ - lock: &sync.RWMutex{}, - eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, - } + d := &delegator{} + d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d } // errLogger logs errors if no delegate is set, otherwise they are delegated. diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index ccb3258711a..293c08961fb 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -17,7 +17,8 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" "github.com/go-logr/logr" "github.com/go-logr/stdr" @@ -27,37 +28,36 @@ import ( // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -var globalLoggerLock = &sync.RWMutex{} +var globalLogger unsafe.Pointer + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} // SetLogger overrides the globalLogger with l. // // To see Info messages use a logger with `l.V(1).Enabled() == true` // To see Debug messages use a logger with `l.V(5).Enabled() == true`. func SetLogger(l logr.Logger) { - globalLoggerLock.Lock() - defer globalLoggerLock.Unlock() - globalLogger = l + atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) +} + +func getLogger() logr.Logger { + return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) } // Info prints messages about the general state of the API or SDK. // This should usually be less then 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(1).Info(msg, keysAndValues...) + getLogger().V(1).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.Error(err, msg, keysAndValues...) + getLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(5).Info(msg, keysAndValues...) + getLogger().V(5).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 7af46c61af0..34a474891a4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 7a897e96977..6f7fd005b7a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -22,7 +22,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 1c349247b0a..deebe363a1d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 3b4d0c14dbd..ac520dd8673 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -19,7 +19,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 9a169f663fb..7eaddd34bfd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -22,7 +22,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type pidProvider func() int diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 327b8b41638..201c1781700 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -76,6 +76,7 @@ type TracerProvider struct { mu sync.Mutex namedTracer map[instrumentation.Scope]*tracer spanProcessors atomic.Value + isShutdown bool // These fields are not protected by the lock mu. They are assumed to be // immutable after creation of the TracerProvider. @@ -163,6 +164,9 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { p.mu.Lock() defer p.mu.Unlock() + if p.isShutdown { + return + } newSPS := spanProcessorStates{} newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...) newSPS = append(newSPS, newSpanProcessorState(sp)) @@ -173,6 +177,9 @@ func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { p.mu.Lock() defer p.mu.Unlock() + if p.isShutdown { + return + } old := p.spanProcessors.Load().(spanProcessorStates) if len(old) == 0 { return @@ -227,13 +234,18 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { return nil } -// Shutdown shuts down the span processors in the order they were registered. +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. func (p *TracerProvider) Shutdown(ctx context.Context) error { spss := p.spanProcessors.Load().(spanProcessorStates) if len(spss) == 0 { return nil } + p.mu.Lock() + defer p.mu.Unlock() + p.isShutdown = true + var retErr error for _, sps := range spss { select { @@ -255,6 +267,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { } } } + p.spanProcessors.Store(spanProcessorStates{}) return retErr } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index a6dcf4b307c..5ee9715d27b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -81,7 +81,7 @@ type traceIDRatioSampler struct { func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { psc := trace.SpanContextFromContext(p.ParentContext) - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, @@ -163,10 +163,10 @@ func NeverSample() Sampler { // the root(Sampler) is used to make sampling decision. If the span has // a parent, depending on whether the parent is remote and whether it // is sampled, one of the following samplers will apply: -// - remoteParentSampled(Sampler) (default: AlwaysOn) -// - remoteParentNotSampled(Sampler) (default: AlwaysOff) -// - localParentSampled(Sampler) (default: AlwaysOn) -// - localParentNotSampled(Sampler) (default: AlwaysOff) +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { return parentBased{ root: root, diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index b5d6f544176..5abb0b274d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -30,7 +30,7 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 00000000000..71a1f7748d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 00000000000..9b8c559de42 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 00000000000..d5c4b5c136a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 00000000000..add7987a22b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,1118 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + // The platform on which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in the + // [`os.type` and `os.name` attributes](./os.md). However, for consistency, the + // values in the `browser.platform` attribute should capture the exact value that + // the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + // Full user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` + // API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + // Preferred language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- + // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- + // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- + // us/global-infrastructure/geographies/), [Google Cloud + // regions](https://cloud.google.com/about/locations), or [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + // The name of the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- + // general.md#source-code-attributes) + // span attributes). + + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting `faas.id` as a span attribute instead. + + // The exact value to use for `faas.id` depends on the cloud provider: + + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // aliases.html) + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases. + // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- + // resource-names) + // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- + // us/rest/api/resources/resources/get-by-id) of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.We + // b/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + // The immutable version of the function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env- + // var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized Linux systems, the `machine-id` located in + // `/etc/machine-id` or `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name + // (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // Parent Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + // The version of the instrumentation scope - (`InstrumentationScope.Version` in + // OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // Deprecated, use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + // Deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 00000000000..42fc525d165 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 00000000000..01e5f072a21 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,1892 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This document defines the shared attributes used to report a single exception associated with a span or log. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// This document defines attributes for Events represented using Log Records. +const ( + // The name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + // The domain identifies the business context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec + // .md#id) uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m + // d#source-1) identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + // The [version of the CloudEvents specification](https://github.com/cloudevents/s + // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp + // ec.md#type) contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. + // md#subject) of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". In case there are multiple layers that could be considered for database + // name (e.g. Oracle instance name and schema name), the database name to be used + // is the more specific layer (e.g. Oracle schema name). + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not explicitly + // disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default database + // (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or + // GCP, the region in which a function is hosted is essential to uniquely identify + // the function and also part of its endpoint. Since it's part of the endpoint + // being called, the region is always known to clients. In these cases, + // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the + // client or not required for identifying the invoked function, setting + // `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + // Application layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + // Version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol used and + // might be different from the protocol client's version. If the HTTP client used + // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should + // be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + // Remote socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from `net.peer.name` + // and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, [etc](https://man7.org/linux/man- + // pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + // Remote socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + // Protocol [address family](https://man7.org/linux/man- + // pages/man7/address_families.7.html) which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of + // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry + // SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + // Logical remote hostname, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + // Logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Logical local hostname or similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + // Logical local port number, preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + // Local socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + // Local socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + // The internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + // The name of the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + // The mobile carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + // The mobile carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + // The column number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User-Agent](https://www.rfc- + // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- + // length) header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- + // length) header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Client +const ( + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// Semantic Convention for HTTP Server +const ( + // The URI scheme identifying the used protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + // The matched route (path template in the format used by the respective server + // framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and the URI + // path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which would + // identify the network-level peer, which may be a proxy. + + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. +const ( + // The name of the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + // The type of the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + // The GraphQL document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// Semantic convention describing per-message attributes populated on messaging spans or links. +const ( + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// Semantic convention for attributes that describe messaging destination on broker +const ( + // The message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + // The kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + // Low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for attributes that describe messaging source on broker +const ( + // The message source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or other + // entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely identify + // the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + // The kind of message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + // Low cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would be a + // source name involving a user name or product id. Although the source name in + // this case is of high cardinality, the underlying template is of low cardinality + // and can be effectively used for grouping and aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + // A boolean that is true if the message source is temporary and might not exist + // anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + // A boolean that is true if the message source is anonymous (could be unnamed or + // have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// This document defines general attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // A string identifying the kind of messaging operation as defined in the + // [Operation names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an operation on + // a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use + // it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // The identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both are + // present, or only `messaging.kafka.consumer.group`. For brokers, such as + // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + // Partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the + // value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + // The unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay and delay + // time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay and + // delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + // Type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + // The secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default version + // (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 00b79bcc25c..bda8f7cbfae 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.11.2" + return "1.12.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 611879def4f..66f45622215 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,7 +14,7 @@ module-sets: stable-v1: - version: v1.11.2 + version: v1.12.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing @@ -34,7 +34,7 @@ module-sets: - go.opentelemetry.io/otel/trace - go.opentelemetry.io/otel/sdk experimental-metrics: - version: v0.34.0 + version: v0.35.0 modules: - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus diff --git a/vendor/modules.txt b/vendor/modules.txt index 89b0234db29..49df6fed85c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -989,7 +989,7 @@ go.opentelemetry.io/contrib/propagators/jaeger # go.opentelemetry.io/contrib/propagators/ot v1.9.0 ## explicit; go 1.17 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.11.2 +# go.opentelemetry.io/otel v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1004,6 +1004,7 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.12.0 +go.opentelemetry.io/otel/semconv/v1.17.0 # go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 ## explicit; go 1.18 go.opentelemetry.io/otel/bridge/opentracing @@ -1030,7 +1031,7 @@ go.opentelemetry.io/otel/metric/instrument/syncfloat64 go.opentelemetry.io/otel/metric/instrument/syncint64 go.opentelemetry.io/otel/metric/internal/global go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/sdk v1.11.2 +# go.opentelemetry.io/otel/sdk v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal @@ -1038,7 +1039,7 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.11.2 +# go.opentelemetry.io/otel/trace v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/otel/trace # go.opentelemetry.io/proto/otlp v0.19.0 From a9e26b3028e2c9764401aad0b5c9b7fb246dff90 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 31 Jan 2023 11:20:33 -0800 Subject: [PATCH 060/133] Update thanos and prometheus (#5114) * Update thanos Signed-off-by: Alan Protasio * Push reduce one hash operation of Labels. Signed-off-by: tanghengjian <1040104807@qq.com> --------- Signed-off-by: Alan Protasio Signed-off-by: tanghengjian <1040104807@qq.com> Co-authored-by: tanghengjian <1040104807@qq.com> Signed-off-by: Alex Le --- CHANGELOG.md | 1 + go.mod | 32 +- go.sum | 97 +- pkg/ingester/active_series.go | 31 +- pkg/ingester/active_series_test.go | 41 +- pkg/ingester/ingester.go | 6 +- .../go/compute/internal/version.go | 2 +- .../go/compute/metadata/CHANGES.md | 7 + .../go/compute/metadata/metadata.go | 1 + vendor/cloud.google.com/go/iam/CHANGES.md | 14 + .../go/iam/apiv1/iampb}/iam_policy.pb.go | 4 +- .../go/iam/apiv1/iampb}/options.pb.go | 4 +- .../go/iam/apiv1/iampb}/policy.pb.go | 76 +- .../google/pprof/profile/legacy_profile.go | 1 - .../github.com/google/pprof/profile/merge.go | 99 + .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 8 + .../googleapis/gax-go/v2/apierror/apierror.go | 45 +- .../googleapis/gax-go/v2/internal/version.go | 2 +- vendor/github.com/grafana/regexp/README.md | 7 - vendor/github.com/grafana/regexp/backtrack.go | 26 +- vendor/github.com/grafana/regexp/exec.go | 71 +- vendor/github.com/grafana/regexp/onepass.go | 26 +- vendor/github.com/grafana/regexp/regexp.go | 40 +- .../github.com/grafana/regexp/syntax/prog.go | 25 +- .../prometheus/common/model/value.go | 264 ++- .../prometheus/common/model/value_float.go | 101 + .../common/model/value_histogram.go | 171 ++ .../prometheus/common/model/value_type.go | 83 + .../prometheus/discovery/file/file.go | 16 +- .../prometheus/model/labels/labels.go | 15 + .../prometheus/model/relabel/relabel.go | 29 +- .../prometheus/notifier/notifier.go | 38 - .../prometheus/prometheus/prompb/custom.go | 17 + .../prometheus/prometheus/promql/functions.go | 2 +- .../prometheus/promql/parser/parse.go | 2 +- .../prometheus/prometheus/promql/test.go | 4 +- .../prometheus/prometheus/rules/manager.go | 9 + .../prometheus/prometheus/scrape/scrape.go | 4 + .../prometheus/storage/interface.go | 3 +- .../prometheus/storage/remote/codec.go | 11 +- .../prometheus/storage/remote/read_handler.go | 4 + .../storage/remote/write_handler.go | 19 +- .../prometheus/prometheus/tsdb/compact.go | 2 +- .../prometheus/prometheus/tsdb/db.go | 6 +- .../prometheus/prometheus/tsdb/head_append.go | 10 +- .../prometheus/prometheus/tsdb/index/index.go | 68 +- .../prometheus/prometheus/tsdb/querier.go | 33 +- .../execution/function/functions.go | 9 + .../pkg/block/indexheader/binary_reader.go | 450 +++-- .../block/indexheader/lazy_binary_reader.go | 39 +- .../thanos-io/thanos/pkg/dedup/iter.go | 102 +- .../thanos/pkg/dedup/pushdown_iter.go | 5 +- .../thanos-io/thanos/pkg/gate/gate.go | 9 +- .../thanos-io/thanos/pkg/store/bucket.go | 29 +- .../thanos-io/thanos/pkg/store/proxy.go | 4 +- .../thanos-io/thanos/pkg/store/recover.go | 52 + .../thanos/pkg/store/storepb/custom.go | 28 + .../thanos/pkg/store/storepb/inprocess.go | 8 + .../pkg/store/storepb/prompb/types.pb.go | 1652 +++++++++++++++-- .../pkg/store/storepb/prompb/types.proto | 68 + .../thanos/pkg/store/storepb/types.pb.go | 78 +- .../thanos/pkg/store/storepb/types.proto | 1 + vendor/go.opencensus.io/Makefile | 8 +- vendor/go.opencensus.io/opencensus.go | 2 +- .../plugin/ocgrpc/client_metrics.go | 9 + .../plugin/ocgrpc/server_metrics.go | 9 + .../plugin/ocgrpc/stats_common.go | 23 +- .../go.opencensus.io/plugin/ochttp/server.go | 8 +- vendor/go.opencensus.io/stats/doc.go | 7 +- .../go.opencensus.io/stats/internal/record.go | 6 + vendor/go.opencensus.io/stats/record.go | 21 +- .../stats/view/aggregation.go | 6 +- .../go.opencensus.io/stats/view/collector.go | 9 +- vendor/go.opencensus.io/stats/view/doc.go | 2 +- vendor/go.opencensus.io/stats/view/worker.go | 27 +- vendor/go.opencensus.io/tag/profile_19.go | 1 + vendor/go.opencensus.io/tag/profile_not19.go | 1 + vendor/go.opencensus.io/trace/doc.go | 13 +- vendor/go.opencensus.io/trace/lrumap.go | 2 +- vendor/go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + .../net/http/otelhttp/handler.go | 23 +- .../net/http/otelhttp/version.go | 2 +- .../instrument/asyncfloat64/asyncfloat64.go | 16 +- .../instrument/asyncint64/asyncint64.go | 16 +- .../instrument/syncfloat64/syncfloat64.go | 8 + .../metric/instrument/syncint64/syncint64.go | 8 + .../go.opentelemetry.io/otel/metric/meter.go | 4 + vendor/golang.org/x/exp/slices/slices.go | 11 +- vendor/golang.org/x/exp/slices/sort.go | 2 +- .../api/googleapi/googleapi.go | 12 + .../iamcredentials/v1/iamcredentials-gen.go | 24 +- .../api/internal/gensupport/error.go | 24 + .../google.golang.org/api/internal/version.go | 2 +- vendor/google.golang.org/api/option/option.go | 4 +- .../api/storage/v1/storage-gen.go | 284 +-- .../api/transport/grpc/dial.go | 48 +- .../api/transport/grpc/dial_socketopt.go | 6 +- .../googleapis/api/annotations/client.pb.go | 1558 +++++++++++++++- .../googleapis/api/launch_stage.pb.go | 203 ++ .../genproto/googleapis/iam/v1/alias.go | 208 +++ vendor/modules.txt | 34 +- 103 files changed, 5464 insertions(+), 1302 deletions(-) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/iam_policy.pb.go (99%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/options.pb.go (99%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/policy.pb.go (94%) create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/store/recover.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/error.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 44ef946c98a..cc6bb3a17f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ * [ENHANCEMENT] Query Frontend: Log Vertical sharding information when `query_stats_enabled` is enabled. #5037 * [ENHANCEMENT] Ingester: The metadata APIs should honour `querier.query-ingesters-within` when `querier.query-store-for-labels-enabled` is true. #5027 * [ENHANCEMENT] Query Frontend: Skip instant query roundtripper if sharding is not applicable. #5062 +* [ENHANCEMENT] Push reduce one hash operation of Labels. #4945 #5114 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/go.mod b/go.mod index 06c92fb8054..2d7a6a3d430 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/gorilla/mux v1.8.0 - github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/hashicorp/consul/api v1.18.0 github.com/hashicorp/go-cleanhttp v0.5.2 @@ -44,15 +44,15 @@ require ( github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.39.0 - github.com/prometheus/prometheus v0.40.7 + github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 + github.com/prometheus/prometheus v0.41.0 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 - github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 + github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 - github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a + github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.6 @@ -77,9 +77,9 @@ require ( require ( cloud.google.com/go v0.105.0 // indirect - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect - cloud.google.com/go/iam v0.6.0 // indirect + cloud.google.com/go/compute v1.13.0 // indirect + cloud.google.com/go/compute/metadata v0.2.2 // indirect + cloud.google.com/go/iam v0.8.0 // indirect cloud.google.com/go/storage v1.27.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect @@ -133,10 +133,10 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e // indirect + github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.6.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -190,20 +190,20 @@ require ( github.com/weaveworks/promrus v1.2.0 // indirect github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect go.mongodb.org/mongo-driver v1.11.0 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.33.0 // indirect + go.opentelemetry.io/otel/metric v0.34.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/goleak v1.2.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect + golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/oauth2 v0.3.0 // indirect golang.org/x/sys v0.4.0 // indirect @@ -211,9 +211,9 @@ require ( golang.org/x/tools v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect - google.golang.org/api v0.102.0 // indirect + google.golang.org/api v0.104.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect + google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.66.6 // indirect diff --git a/go.sum b/go.sum index 0ce80323fbf..ddd7827eeb4 100644 --- a/go.sum +++ b/go.sum @@ -47,17 +47,17 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= +cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.6.0 h1:nsqQC88kT5Iwlm4MeNGTpfMWddp6NB/UOLFTH6m1QfQ= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/longrunning v0.1.1 h1:y50CXG4j0+qvEukslYFBCrzaXX0qpFbBzc3PchSu/LE= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -475,7 +475,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= -github.com/digitalocean/godo v1.88.0 h1:SAEdw63xOMmzlwCeCWjLH1GcyDPUjbSAR1Bh7VELxzc= +github.com/digitalocean/godo v1.91.1 h1:1o30VOCu1aC6488qBd0SkQiBeAZ35RSTvLwCA1pQMhc= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -513,12 +513,12 @@ github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= -github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= +github.com/efficientgo/e2e v0.14.1-0.20230119090947-fa7ceb0197c5 h1:N1fHVcNEPMJNB93sxT6icl5yvoFxa2sp3/1NnVlrAFc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -533,7 +533,7 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/envoyproxy/protoc-gen-validate v0.6.13 h1:TvDcILLkjuZV3ER58VkBmncKsLUBqBDxra/XctCzuMM= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= @@ -803,8 +803,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e h1:F1LLQqQ8WoIbyoxLUY+JUZe1kuHdxThM6CPUATzE6Io= -github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 h1:D5iJJZKAi0rU4e/5E58BkrnN+xeCDjAIqcm1GGxAGSI= +github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -822,11 +822,11 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= +github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -844,8 +844,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b h1:UlCBLaqvS4wVYNrMKSfqTBVsed/EOY9dnenhYZMUnrA= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= -github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -920,11 +920,11 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005 h1:jKwXhVS4F7qk0g8laz+Anz0g/6yaSJ3HqmSAuSNLUcA= +github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= +github.com/hetznercloud/hcloud-go v1.38.0 h1:K6Pd/mMdcLfBhvwG39qyAaacp4pCS3dKa8gChmLKxLg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -1288,7 +1288,7 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oracle/oci-go-sdk/v65 v65.13.0 h1:0+9ea5goYfhI3/MPfbIQU6yzHYWE6sCk6VuUepxk5Nk= -github.com/ovh/go-ovh v1.1.0 h1:bHXZmw8nTgZin4Nv7JuaLs0KG5x54EQR7migYTd1zrk= +github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1364,8 +1364,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 h1:hK3y58iUBjRFZ6kFNJTWsES1GnVKsqEYUeiyeRXridQ= +github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= @@ -1386,8 +1386,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.40.7 h1:cYtp4YrR9M99YpTUfXbei/HjIJJ+En23NKsTCeZ2U2w= -github.com/prometheus/prometheus v0.40.7/go.mod h1:nO+vI0cJo1ezp2DPGw5NEnTlYHGRpBFrqE4zb9O0g0U= +github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns= +github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1416,7 +1416,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -1508,12 +1508,12 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 h1:54iqf5p40TFGTc2Dp791P1/ncO2sTqvXdMLXqNpC/Gc= -github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4/go.mod h1:d52Wfzxs6L3xhc2snodyWvM2bZMMVn0XT2q4vsfBPVo= +github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 h1:D0ruzvS/U+49eMLg/7IIYK2iMdVP4PrJLpVHgguMVkg= +github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4/go.mod h1:d52Wfzxs6L3xhc2snodyWvM2bZMMVn0XT2q4vsfBPVo= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= -github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a h1:oN3VupYNkavPRvdXwq71p54SAFSbOGvL0qL7CeKFrJ0= -github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a/go.mod h1:xZKr/xpbijYM8EqTMpurqgKItyIPFy1wqUd/DMTDu4M= +github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 h1:8/GjzkmjltOUSO+7FZQhhpTj7SPDXZwY5bWassilNTo= +github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00/go.mod h1:oTvgV468yMvGgECD06eWuVWw0OBTHcznyT2BZl5rgeA= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1609,14 +1609,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 h1:aUEBEdCa6iamGzg6fuYxDA8ThxvOG240mAvWDU+XLio= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4/go.mod h1:l2MdsbKTocpPS5nQZscqTR9jd8u96VYZdcpF8Sye7mA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= go.opentelemetry.io/contrib/propagators/aws v1.12.0 h1:n3lNyZs2ytVEfFhcn2QO5Z80lgrMXyP1Ncx0C7rUU8A= @@ -1645,8 +1646,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKP go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E= -go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI= +go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= +go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= @@ -1732,8 +1733,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE= -golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4= +golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2222,8 +2223,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg= +google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2329,8 +2330,8 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= +google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2457,13 +2458,13 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= +k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= +k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2472,7 +2473,7 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= +k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= @@ -2492,17 +2493,17 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.0 h1:lyJt0TWMPaGoODa8B8bUuxgHS3W/m/bNr2cca3brA/g= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= @@ -2536,7 +2537,7 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/pkg/ingester/active_series.go b/pkg/ingester/active_series.go index ba3e2760d37..5285f279639 100644 --- a/pkg/ingester/active_series.go +++ b/pkg/ingester/active_series.go @@ -1,17 +1,12 @@ package ingester import ( - "hash" "math" "sync" "time" - "github.com/cespare/xxhash" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -53,30 +48,10 @@ func NewActiveSeries() *ActiveSeries { } // Updates series timestamp to 'now'. Function is called to make a copy of labels if entry doesn't exist yet. -func (c *ActiveSeries) UpdateSeries(series labels.Labels, now time.Time, labelsCopy func(labels.Labels) labels.Labels) { - fp := fingerprint(series) - stripeID := fp % numActiveSeriesStripes - - c.stripes[stripeID].updateSeriesTimestamp(now, series, fp, labelsCopy) -} - -var sep = []byte{model.SeparatorByte} - -var hashPool = sync.Pool{New: func() interface{} { return xxhash.New() }} - -func fingerprint(series labels.Labels) uint64 { - sum := hashPool.Get().(hash.Hash64) - defer hashPool.Put(sum) - - sum.Reset() - for _, label := range series { - _, _ = sum.Write(util.YoloBuf(label.Name)) - _, _ = sum.Write(sep) - _, _ = sum.Write(util.YoloBuf(label.Value)) - _, _ = sum.Write(sep) - } +func (c *ActiveSeries) UpdateSeries(series labels.Labels, hash uint64, now time.Time, labelsCopy func(labels.Labels) labels.Labels) { + stripeID := hash % numActiveSeriesStripes - return sum.Sum64() + c.stripes[stripeID].updateSeriesTimestamp(now, series, hash, labelsCopy) } // Purge removes expired entries from the cache. This function should be called diff --git a/pkg/ingester/active_series_test.go b/pkg/ingester/active_series_test.go index ab3c63db15d..02a5477194c 100644 --- a/pkg/ingester/active_series_test.go +++ b/pkg/ingester/active_series_test.go @@ -8,6 +8,7 @@ import ( "sync" "testing" "time" + "unsafe" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" @@ -15,20 +16,25 @@ import ( func copyFn(l labels.Labels) labels.Labels { return l } +func fromLabelToLabels(ls []labels.Label) labels.Labels { + return *(*labels.Labels)(unsafe.Pointer(&ls)) +} + func TestActiveSeries_UpdateSeries(t *testing.T) { ls1 := []labels.Label{{Name: "a", Value: "1"}} ls2 := []labels.Label{{Name: "a", Value: "2"}} c := NewActiveSeries() assert.Equal(t, 0, c.Active()) - - c.UpdateSeries(ls1, time.Now(), copyFn) + labels1Hash := fromLabelToLabels(ls1).Hash() + labels2Hash := fromLabelToLabels(ls2).Hash() + c.UpdateSeries(ls1, labels1Hash, time.Now(), copyFn) assert.Equal(t, 1, c.Active()) - c.UpdateSeries(ls1, time.Now(), copyFn) + c.UpdateSeries(ls1, labels1Hash, time.Now(), copyFn) assert.Equal(t, 1, c.Active()) - c.UpdateSeries(ls2, time.Now(), copyFn) + c.UpdateSeries(ls2, labels2Hash, time.Now(), copyFn) assert.Equal(t, 2, c.Active()) } @@ -46,7 +52,7 @@ func TestActiveSeries_Purge(t *testing.T) { c := NewActiveSeries() for i := 0; i < len(series); i++ { - c.UpdateSeries(series[i], time.Unix(int64(i), 0), copyFn) + c.UpdateSeries(series[i], fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), copyFn) } c.Purge(time.Unix(int64(ttl+1), 0)) @@ -62,24 +68,23 @@ func TestActiveSeries_PurgeOpt(t *testing.T) { metric := labels.NewBuilder(labels.FromStrings("__name__", "logs")) ls1 := metric.Set("_", "ypfajYg2lsv").Labels(nil) ls2 := metric.Set("_", "KiqbryhzUpn").Labels(nil) - c := NewActiveSeries() now := time.Now() - c.UpdateSeries(ls1, now.Add(-2*time.Minute), copyFn) - c.UpdateSeries(ls2, now, copyFn) + c.UpdateSeries(ls1, ls1.Hash(), now.Add(-2*time.Minute), copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now, copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) - c.UpdateSeries(ls1, now.Add(-1*time.Minute), copyFn) - c.UpdateSeries(ls2, now, copyFn) + c.UpdateSeries(ls1, ls1.Hash(), now.Add(-1*time.Minute), copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now, copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) // This will *not* update the series, since there is already newer timestamp. - c.UpdateSeries(ls2, now.Add(-1*time.Minute), copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now.Add(-1*time.Minute), copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) @@ -105,7 +110,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) wg := &sync.WaitGroup{} start := make(chan struct{}) max := int(math.Ceil(float64(b.N) / float64(goroutines))) - + labelhash := series.Hash() for i := 0; i < goroutines; i++ { wg.Add(1) go func() { @@ -116,7 +121,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) for ix := 0; ix < max; ix++ { now = now.Add(time.Duration(ix) * time.Millisecond) - c.UpdateSeries(series, now, copyFn) + c.UpdateSeries(series, labelhash, now, copyFn) } }() } @@ -137,15 +142,17 @@ func BenchmarkActiveSeries_UpdateSeries(b *testing.B) { name := nameBuf.String() series := make([]labels.Labels, b.N) + labelhash := make([]uint64, b.N) for s := 0; s < b.N; s++ { series[s] = labels.Labels{{Name: name, Value: name + strconv.Itoa(s)}} + labelhash[s] = series[s].Hash() } now := time.Now().UnixNano() b.ResetTimer() for ix := 0; ix < b.N; ix++ { - c.UpdateSeries(series[ix], time.Unix(0, now+int64(ix)), copyFn) + c.UpdateSeries(series[ix], labelhash[ix], time.Unix(0, now+int64(ix)), copyFn) } } @@ -165,8 +172,10 @@ func benchmarkPurge(b *testing.B, twice bool) { c := NewActiveSeries() series := [numSeries]labels.Labels{} + labelhash := [numSeries]uint64{} for s := 0; s < numSeries; s++ { series[s] = labels.Labels{{Name: "a", Value: strconv.Itoa(s)}} + labelhash[s] = series[s].Hash() } for i := 0; i < b.N; i++ { @@ -175,9 +184,9 @@ func benchmarkPurge(b *testing.B, twice bool) { // Prepare series for ix, s := range series { if ix < numExpiresSeries { - c.UpdateSeries(s, now.Add(-time.Minute), copyFn) + c.UpdateSeries(s, labelhash[ix], now.Add(-time.Minute), copyFn) } else { - c.UpdateSeries(s, now, copyFn) + c.UpdateSeries(s, labelhash[ix], now, copyFn) } } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index d032117b4d2..de512672f9b 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -967,7 +967,9 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte // has sorted labels once hit the ingester). // Look up a reference for this series. - ref, copiedLabels := app.GetRef(cortexpb.FromLabelAdaptersToLabels(ts.Labels)) + tsLabels := cortexpb.FromLabelAdaptersToLabels(ts.Labels) + tsLabelsHash := tsLabels.Hash() + ref, copiedLabels := app.GetRef(tsLabels, tsLabelsHash) // To find out if any sample was added to this series, we keep old value. oldSucceededSamplesCount := succeededSamplesCount @@ -1037,7 +1039,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte } if i.cfg.ActiveSeriesMetricsEnabled && succeededSamplesCount > oldSucceededSamplesCount { - db.activeSeries.UpdateSeries(cortexpb.FromLabelAdaptersToLabels(ts.Labels), startAppend, func(l labels.Labels) labels.Labels { + db.activeSeries.UpdateSeries(tsLabels, tsLabelsHash, startAppend, func(l labels.Labels) labels.Labels { // we must already have copied the labels if succeededSamplesCount has been incremented. return copiedLabels }) diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 5ac4a843e11..efedadbea25 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.12.1" +const Version = "1.13.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 8631b6d6d2c..6e3ee8d6ab1 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + ## [0.1.0] (2022-10-26) Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 50538b1d343..d4aad9bf395 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, Timeout: 5 * time.Second, } diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index c4ead20e09e..ced217827b0 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) + + +### Features + +* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) + + +### Features + +* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + ## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 6fbf54f4489..2793098aabc 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/iam_policy.proto -package iam +package iampb import ( context "context" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index abea46d9bc1..835f2171998 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/options.proto -package iam +package iampb import ( reflect "reflect" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go similarity index 94% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index 5869d92070e..ec7777a7687 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/policy.proto -package iam +package iampb import ( reflect "reflect" @@ -279,11 +279,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a policy - // that includes conditions + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -396,47 +396,43 @@ type Binding struct { // Specifies the principals requesting access for a Cloud Platform resource. // `members` can have the following values: // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. // - // * `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. // - // * `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . // + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // * `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go index 9ba9a77c924..8d07fd6c27c 100644 --- a/vendor/github.com/google/pprof/profile/legacy_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -865,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) { // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { - line = "" break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 90c5723d78f..4b66282cb8e 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -566,3 +566,102 @@ func (lm locationIDMap) set(id uint64, loc *Location) { } lm.sparse[id] = loc } + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 7ea75bfdc37..d88960b7ef1 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.6.0" + "v2": "2.7.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index fb8463a2cb1..b75170f2227 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,13 @@ # Changelog +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + ## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 787a619e373..aa6be1304f1 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -233,30 +233,49 @@ func (a *APIError) Metadata() map[string]string { } -// FromError parses a Status error or a googleapi.Error and builds an APIError. -func FromError(err error) (*APIError, bool) { - if err == nil { - return nil, false - } - - ae := APIError{err: err} +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error isHTTPErr := errors.As(err, &herr) switch { case isStatus: - ae.status = st - ae.details = parseDetails(st.Details()) + a.status = st + a.details = parseDetails(st.Details()) case isHTTPErr: - ae.httpErr = herr - ae.details = parseHTTPDetails(herr) + a.httpErr = herr + a.details = parseHTTPDetails(herr) default: - return nil, false + return false } + return true +} - return &ae, true +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true } // parseDetails accepts a slice of interface{} that should be backed by some diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 065312e827c..0ba5da1dd1e 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.6.0" +const Version = "2.7.0" diff --git a/vendor/github.com/grafana/regexp/README.md b/vendor/github.com/grafana/regexp/README.md index ff6e7fccd1b..756e60dcfdb 100644 --- a/vendor/github.com/grafana/regexp/README.md +++ b/vendor/github.com/grafana/regexp/README.md @@ -10,10 +10,3 @@ The `main` branch is non-optimised: switch over to [`speedup`](https://github.co ## Benchmarks: ![image](https://user-images.githubusercontent.com/8125524/152182951-856549ed-6044-4285-b799-69b31f598e32.png) - -## Links to upstream changes: -* [regexp: allow patterns with no alternates to be one-pass](https://go-review.googlesource.com/c/go/+/353711) -* [regexp: speed up onepass prefix check](https://go-review.googlesource.com/c/go/+/354909) -* [regexp: handle prefix string with fold-case](https://go-review.googlesource.com/c/go/+/358756) -* [regexp: avoid copying each instruction executed](https://go-review.googlesource.com/c/go/+/355789) -* [regexp: allow prefix string anchored at beginning](https://go-review.googlesource.com/c/go/+/377294) diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go index a8111a7eb8e..0739f5ff588 100644 --- a/vendor/github.com/grafana/regexp/backtrack.go +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -15,9 +15,8 @@ package regexp import ( + "regexp/syntax" "sync" - - "github.com/grafana/regexp/syntax" ) // A job is an entry on the backtracker's job stack. It holds @@ -337,19 +336,9 @@ func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []in // but we are not clearing visited between calls to TrySearch, // so no work is duplicated and it ends up still being linear. width := -1 - for pos <= end && width != 0 { - if len(b.cap) > 0 { - b.cap[0] = pos - } - if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { - // Match must be leftmost; done. - goto Match - } - _, width = i.step(pos) - pos += width - + for ; pos <= end && width != 0; pos += width { if len(re.prefix) > 0 { - // Match requires literal prefix; fast search for next occurrence. + // Match requires literal prefix; fast search for it. advance := i.index(re, pos) if advance < 0 { freeBitState(b) @@ -357,6 +346,15 @@ func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []in } pos += advance } + + if len(b.cap) > 0 { + b.cap[0] = pos + } + if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + // Match must be leftmost; done. + goto Match + } + _, width = i.step(pos) } freeBitState(b) return nil diff --git a/vendor/github.com/grafana/regexp/exec.go b/vendor/github.com/grafana/regexp/exec.go index 11a5eebfe25..3fc4b684feb 100644 --- a/vendor/github.com/grafana/regexp/exec.go +++ b/vendor/github.com/grafana/regexp/exec.go @@ -6,9 +6,8 @@ package regexp import ( "io" + "regexp/syntax" "sync" - - "github.com/grafana/regexp/syntax" ) // A queue is a 'sparse array' holding pending threads of execution. @@ -205,8 +204,6 @@ func (m *machine) match(i input, pos int) bool { // Have match; finished exploring alternatives. break } - // Note we don't check foldCase here, because Unicode folding is complicated; - // just let it fall through to EqualFold on the whole string. if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() { // Match requires literal prefix; fast search for it. advance := i.index(m.re, pos) @@ -404,46 +401,45 @@ func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap in } m := newOnePassMachine() - matched := false - i, _ := m.inputs.init(ir, ib, is) - - r, r1 := endOfText, endOfText - width, width1 := 0, 0 - var flag lazyFlag - var pc int - var inst *onePassInst - - // If there is a simple literal prefix, skip over it. - if pos == 0 && len(re.prefix) > 0 && i.canCheckPrefix() { - // Match requires literal prefix; fast search for it. - if !i.hasPrefix(re) { - goto Return - } - pos += len(re.prefix) - pc = int(re.prefixEnd) - } else { - pc = re.onepass.Start - } - if cap(m.matchcap) < ncap { m.matchcap = make([]int, ncap) } else { m.matchcap = m.matchcap[:ncap] } + matched := false for i := range m.matchcap { m.matchcap[i] = -1 } + i, _ := m.inputs.init(ir, ib, is) + + r, r1 := endOfText, endOfText + width, width1 := 0, 0 r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag if pos == 0 { flag = newLazyFlag(-1, r) } else { flag = i.context(pos) } + pc := re.onepass.Start + inst := &re.onepass.Inst[pc] // If there is a simple literal prefix, skip over it. - if r != endOfText { + if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && + len(re.prefix) > 0 && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + if !i.hasPrefix(re) { + goto Return + } + pos += len(re.prefix) + r, width = i.step(pos) r1, width1 = i.step(pos + width) + flag = i.context(pos) + pc = int(re.prefixEnd) } for { inst = &re.onepass.Inst[pc] @@ -532,29 +528,6 @@ func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap i return nil } - // Check prefix match before allocating data structures - if len(re.prefix) > 0 && r == nil { - if re.cond&syntax.EmptyBeginText != 0 { // anchored - if b != nil && !(&inputBytes{str: b[pos:]}).hasPrefix(re) { - return nil - } - if s != "" && !(&inputString{str: s[pos:]}).hasPrefix(re) { - return nil - } - } else { // non-anchored - var advance int - if b != nil { - advance = (&inputBytes{str: b}).index(re, pos) - } else { - advance = (&inputString{str: s}).index(re, pos) - } - if advance < 0 { - return nil - } - pos += advance - } - } - if re.onepass != nil { return re.doOnePass(r, b, s, pos, ncap, dstCap) } diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go index d3613bceaf7..bc47f4c4a83 100644 --- a/vendor/github.com/grafana/regexp/onepass.go +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -5,12 +5,11 @@ package regexp import ( + "regexp/syntax" "sort" "strings" "unicode" "unicode/utf8" - - "github.com/grafana/regexp/syntax" ) // "One-pass" regexp execution. @@ -39,10 +38,10 @@ type onePassInst struct { // is the entire match. Pc is the index of the last rune instruction // in the string. The OnePassPrefix skips over the mandatory // EmptyBeginText -func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, foldCase bool, pc uint32) { +func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { i := &p.Inst[p.Start] if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { - return "", i.Op == syntax.InstMatch, false, uint32(p.Start) + return "", i.Op == syntax.InstMatch, uint32(p.Start) } pc = i.Out i = &p.Inst[pc] @@ -52,13 +51,12 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, foldCase bool, } // Avoid allocation of buffer if prefix is empty. if iop(i) != syntax.InstRune || len(i.Rune) != 1 { - return "", i.Op == syntax.InstMatch, false, uint32(p.Start) + return "", i.Op == syntax.InstMatch, uint32(p.Start) } - foldCase = (syntax.Flags(i.Arg)&syntax.FoldCase != 0) // Have prefix; gather characters. var buf strings.Builder - for iop(i) == syntax.InstRune && len(i.Rune) == 1 && (syntax.Flags(i.Arg)&syntax.FoldCase != 0) == foldCase && i.Rune[0] != utf8.RuneError { + for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError { buf.WriteRune(i.Rune[0]) pc, i = i.Out, &p.Inst[i.Out] } @@ -67,7 +65,7 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, foldCase bool, p.Inst[i.Out].Op == syntax.InstMatch { complete = true } - return buf.String(), complete, foldCase, pc + return buf.String(), complete, pc } // OnePassNext selects the next actionable state of the prog, based on the input character. @@ -474,20 +472,12 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) { syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { return nil } - hasAlt := false - for _, inst := range prog.Inst { - if inst.Op == syntax.InstAlt || inst.Op == syntax.InstAltMatch { - hasAlt = true - break - } - } - // If we have alternates, every instruction leading to InstMatch must be EmptyEndText. - // Also, any match on empty text must be $. + // every instruction leading to InstMatch must be EmptyEndText for _, inst := range prog.Inst { opOut := prog.Inst[inst.Out].Op switch inst.Op { default: - if opOut == syntax.InstMatch && hasAlt { + if opOut == syntax.InstMatch { return nil } case syntax.InstAlt, syntax.InstAltMatch: diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go index 1479e373174..7958a397285 100644 --- a/vendor/github.com/grafana/regexp/regexp.go +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -72,13 +72,12 @@ package regexp import ( "bytes" "io" + "regexp/syntax" "strconv" "strings" "sync" "unicode" "unicode/utf8" - - "github.com/grafana/regexp/syntax" ) // Regexp is the representation of a compiled regular expression. @@ -95,7 +94,6 @@ type Regexp struct { prefixBytes []byte // prefix, as a []byte prefixRune rune // first rune in prefix prefixEnd uint32 // pc for last rune in prefix - prefixFoldCase bool // check prefix case-insensitive mpool int // pool for machines matchcap int // size of recorded match lengths prefixComplete bool // prefix is the entire regexp @@ -201,10 +199,10 @@ func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { minInputLen: minInputLen(re), } if regexp.onepass == nil { - regexp.prefix, regexp.prefixComplete, regexp.prefixFoldCase = prog.PrefixAndCase() + regexp.prefix, regexp.prefixComplete = prog.Prefix() regexp.maxBitStateLen = maxBitStateLen(prog) } else { - regexp.prefix, regexp.prefixComplete, regexp.prefixFoldCase, regexp.prefixEnd = onePassPrefix(prog) + regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) } if regexp.prefix != "" { // TODO(rsc): Remove this allocation by adding @@ -406,26 +404,10 @@ func (i *inputString) canCheckPrefix() bool { } func (i *inputString) hasPrefix(re *Regexp) bool { - if re.prefixFoldCase { - n := len(re.prefix) - return len(i.str) >= n && strings.EqualFold(i.str[0:n], re.prefix) - } return strings.HasPrefix(i.str, re.prefix) } func (i *inputString) index(re *Regexp, pos int) int { - if re.prefixFoldCase { - n := len(re.prefix) - // Brute-force compare at every position; could replace with sophisticated algo like strings.Index - for p := pos; p+n <= len(i.str); { - if strings.EqualFold(i.str[p:p+n], re.prefix) { - return p - pos - } - _, w := i.step(p) - p += w - } - return -1 - } return strings.Index(i.str[pos:], re.prefix) } @@ -469,26 +451,10 @@ func (i *inputBytes) canCheckPrefix() bool { } func (i *inputBytes) hasPrefix(re *Regexp) bool { - if re.prefixFoldCase { - n := len(re.prefixBytes) - return len(i.str) >= n && bytes.EqualFold(i.str[0:n], re.prefixBytes) - } return bytes.HasPrefix(i.str, re.prefixBytes) } func (i *inputBytes) index(re *Regexp, pos int) int { - if re.prefixFoldCase { - n := len(re.prefixBytes) - // Brute-force compare at every position; could replace with sophisticated algo like bytes.Index - for p := pos; p+n <= len(i.str); { - if bytes.EqualFold(i.str[p:p+n], re.prefixBytes) { - return p - pos - } - _, w := i.step(p) - p += w - } - return -1 - } return bytes.Index(i.str[pos:], re.prefixBytes) } diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go index 2bf1dcaac02..896cdc42c2a 100644 --- a/vendor/github.com/grafana/regexp/syntax/prog.go +++ b/vendor/github.com/grafana/regexp/syntax/prog.go @@ -146,37 +146,20 @@ func (i *Inst) op() InstOp { // regexp must start with. Complete is true if the prefix // is the entire match. func (p *Prog) Prefix() (prefix string, complete bool) { - prefix, complete, foldCase := p.PrefixAndCase() - if foldCase { - return "", false - } - return prefix, complete -} - -// Prefix returns a literal string that all matches for the -// regexp must start with. Complete is true if the prefix -// is the entire match. FoldCase is true if the string should -// match in upper or lower case. -func (p *Prog) PrefixAndCase() (prefix string, complete bool, foldCase bool) { - i := &p.Inst[p.Start] - // Skip any no-op, capturing or begin-text instructions - for i.Op == InstNop || i.Op == InstCapture || (i.Op == InstEmptyWidth && EmptyOp(i.Arg)&EmptyBeginText != 0) { - i = &p.Inst[i.Out] - } + i := p.skipNop(uint32(p.Start)) // Avoid allocation of buffer if prefix is empty. if i.op() != InstRune || len(i.Rune) != 1 { - return "", i.Op == InstMatch, false + return "", i.Op == InstMatch } // Have prefix; gather characters. var buf strings.Builder - foldCase = (Flags(i.Arg)&FoldCase != 0) - for i.op() == InstRune && len(i.Rune) == 1 && (Flags(i.Arg)&FoldCase != 0) == foldCase && i.Rune[0] != utf8.RuneError { + for i.op() == InstRune && len(i.Rune) == 1 && Flags(i.Arg)&FoldCase == 0 && i.Rune[0] != utf8.RuneError { buf.WriteRune(i.Rune[0]) i = p.skipNop(i.Out) } - return buf.String(), i.Op == InstMatch, foldCase + return buf.String(), i.Op == InstMatch } // StartCond returns the leading empty-width conditions that must diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a283..543399e36e6 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: *s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: *s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,43 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } +type sampleHistogramPairPtr struct { + Timestamp Time + Histogram *SampleHistogram +} + +func (s *sampleHistogramPairPtr) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + // UnmarshalJSON implements json.Unmarshaler. +// TODO: simplify and remove the need for both sampleHistogramPairPtr and SampleHistogramPair func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram sampleHistogramPairPtr `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: sampleHistogramPairPtr{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +141,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +193,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 00000000000..9266fb9854c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 00000000000..a4be0bca311 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,171 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + Histogram SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(&o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 00000000000..726c50ee638 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index 5aa7f2e5f43..c45595c6ddc 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -39,18 +39,23 @@ import ( ) var ( + fileSDReadErrorsCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }) fileSDScanDuration = prometheus.NewSummary( prometheus.SummaryOpts{ Name: "prometheus_sd_file_scan_duration_seconds", Help: "The duration of the File-SD scan in seconds.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) - fileSDReadErrorsCount = prometheus.NewCounter( + fileSDTimeStamp = NewTimestampCollector() + fileWatcherErrorsCount = prometheus.NewCounter( prometheus.CounterOpts{ - Name: "prometheus_sd_file_read_errors_total", - Help: "The number of File-SD read errors.", + Name: "prometheus_sd_file_watcher_errors_total", + Help: "The number of File-SD errors caused by filesystem watch failures.", }) - fileSDTimeStamp = NewTimestampCollector() patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) @@ -62,7 +67,7 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(fileSDScanDuration, fileSDReadErrorsCount, fileSDTimeStamp) + prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount) } // SDConfig is the configuration for file based discovery. @@ -237,6 +242,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + fileWatcherErrorsCount.Inc() return } d.watcher = watcher diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 48237bdc0ee..aafba218aa9 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -20,6 +20,7 @@ import ( "strconv" "github.com/cespare/xxhash/v2" + "github.com/prometheus/common/model" ) // Well-known label names used by Prometheus components. @@ -134,6 +135,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { } // Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. func (ls Labels) Hash() uint64 { // Use xxhash.Sum64(b) for fast path as it's faster. b := make([]byte, 0, 1024) @@ -311,6 +313,19 @@ func (ls Labels) WithoutEmpty() Labels { return ls } +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + for _, l := range ls { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return false + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return false + } + } + return true +} + // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { if len(ls) != len(o) { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index e0d7f6ddf54..c731f6e0d3b 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -45,6 +45,10 @@ const ( Keep Action = "keep" // Drop drops targets for which the input does match the regex. Drop Action = "drop" + // KeepEqual drops targets for which the input does not match the target. + KeepEqual Action = "keepequal" + // Drop drops targets for which the input does match the target. + DropEqual Action = "dropequal" // HashMod sets a label to the modulus of a hash of labels. HashMod Action = "hashmod" // LabelMap copies labels to other labelnames based on a regex. @@ -66,7 +70,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } switch act := Action(strings.ToLower(s)); act { - case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase: + case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase, KeepEqual, DropEqual: *a = act return nil } @@ -109,13 +113,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Modulus == 0 && c.Action == HashMod { return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") } - if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase) && c.TargetLabel == "" { + if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase) && !relabelTarget.MatchString(c.TargetLabel) { + if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !relabelTarget.MatchString(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if (c.Action == Lowercase || c.Action == Uppercase) && c.Replacement != DefaultRelabelConfig.Replacement { + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { @@ -125,6 +129,15 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } + if c.Action == DropEqual || c.Action == KeepEqual { + if c.Regex != DefaultRelabelConfig.Regex || + c.Modulus != DefaultRelabelConfig.Modulus || + c.Separator != DefaultRelabelConfig.Separator || + c.Replacement != DefaultRelabelConfig.Replacement { + return fmt.Errorf("%s action requires only 'source_labels' and `target_label`, and no other fields", c.Action) + } + } + if c.Action == LabelDrop || c.Action == LabelKeep { if c.SourceLabels != nil || c.TargetLabel != DefaultRelabelConfig.TargetLabel || @@ -225,6 +238,14 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels if !cfg.Regex.MatchString(val) { return nil } + case DropEqual: + if lset.Get(cfg.TargetLabel) == val { + return nil + } + case KeepEqual: + if lset.Get(cfg.TargetLabel) != val { + return nil + } case Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 99886bd4bc3..fd89a029c7d 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -19,11 +19,9 @@ import ( "encoding/json" "fmt" "io" - "net" "net/http" "net/url" "path" - "strings" "sync" "time" @@ -727,47 +725,11 @@ func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig continue } - lb := labels.NewBuilder(lset) - - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) bool { - // If we can split, a port exists and we don't have to add one. - if _, _, err := net.SplitHostPort(s); err == nil { - return false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return err == nil - } addr := lset.Get(model.AddressLabel) - // If it's an address with no trailing port, infer it based on the used scheme. - if addPort(addr) { - // Addresses reaching this point are already wrapped in [] if necessary. - switch lset.Get(model.SchemeLabel) { - case "http", "": - addr = addr + ":80" - case "https": - addr = addr + ":443" - default: - return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } - // Meta labels are deleted after relabelling. Other internal labels propagate to - // the target which decides whether they will be part of their label set. - for _, l := range lset { - if strings.HasPrefix(l.Name, model.MetaLabelPrefix) { - lb.Del(l.Name) - } - } - res = append(res, alertmanagerLabels{lset}) } return res, droppedAlertManagers, nil diff --git a/vendor/github.com/prometheus/prometheus/prompb/custom.go b/vendor/github.com/prometheus/prometheus/prompb/custom.go index 0b3820c4d28..4b07187bd28 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/custom.go +++ b/vendor/github.com/prometheus/prometheus/prompb/custom.go @@ -13,5 +13,22 @@ package prompb +import ( + "sync" +) + func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } + +func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { + size := r.Size() + data, ok := p.Get().(*[]byte) + if ok && cap(*data) >= size { + n, err := r.MarshalToSizedBuffer((*data)[:size]) + if err != nil { + return nil, err + } + return (*data)[:n], nil + } + return r.Marshal() +} diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 8e9e6f96548..d481cb7358b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -302,7 +302,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // The trend factor argument. tf := vals[2].(Vector)[0].V - // Sanity check the input. + // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 7a0e48464be..6c37ce6fc6a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -432,7 +432,7 @@ func (p *parser) expectType(node Node, want ValueType, context string) { } } -// checkAST checks the sanity of the provided AST. This includes type checking. +// checkAST checks the validity of the provided AST. This includes type checking. func (p *parser) checkAST(node Node) (typ ValueType) { // For expressions the type is determined by their Type function. // Lists do not have a type but are not invalid either. diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index 41526ccdc5f..ab2a159c4ee 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -491,8 +491,8 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa }) if containsNonStepInvariant { - // Since there is a step invariant function, we cannot automatically - // generate step invariant test cases for it sanely. + // Expression contains a function whose result can vary with evaluation + // time, even though its arguments are step invariant: skip it. return nil, nil } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index ced61ea4925..42f1b59ce0a 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -673,6 +673,9 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { rule.SetLastError(err) sp.SetStatus(codes.Error, err.Error()) unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ @@ -700,6 +703,9 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Series no longer exposed, mark it stale. _, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { case unwrappedErr == nil: case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): @@ -727,6 +733,9 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { // Rule that produced series no longer configured, mark it stale. _, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { case unwrappedErr == nil: case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 04375ab564b..e9d172a3e8a 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -1609,6 +1609,10 @@ loop: err = errNameLabelMandatory break loop } + if !lset.IsValid() { + err = fmt.Errorf("invalid metric name or label names: %s", lset.String()) + break loop + } // If any label limits is exceeded the scrape should fail. if err = verifyLabelLimits(lset, sl.labelLimits); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 9df4b75f5be..22d3b418602 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -248,7 +248,8 @@ type GetRef interface { // Returns reference number that can be used to pass to Appender.Append(), // and a set of labels that will not cause another copy when passed to Appender.Append(). // 0 means the appender does not have a reference to this series. - GetRef(lset labels.Labels) (SeriesRef, labels.Labels) + // hash should be a hash of lset. + GetRef(lset labels.Labels, hash uint64) (SeriesRef, labels.Labels) } // ExemplarAppender provides an interface for adding samples to exemplar storage, which diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 9b6f5a5ab18..48c2d8615f8 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -20,6 +20,7 @@ import ( "net/http" "sort" "strings" + "sync" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" @@ -193,6 +194,7 @@ func StreamChunkedReadResponses( ss storage.ChunkSeriesSet, sortedExternalLabels []prompb.Label, maxBytesInFrame int, + marshalPool *sync.Pool, ) (storage.Warnings, error) { var ( chks []prompb.Chunk @@ -234,12 +236,14 @@ func StreamChunkedReadResponses( continue } - b, err := proto.Marshal(&prompb.ChunkedReadResponse{ + resp := &prompb.ChunkedReadResponse{ ChunkedSeries: []*prompb.ChunkedSeries{ {Labels: lbls, Chunks: chks}, }, QueryIndex: queryIndex, - }) + } + + b, err := resp.PooledMarshal(marshalPool) if err != nil { return ss.Warnings(), fmt.Errorf("marshal ChunkedReadResponse: %w", err) } @@ -247,6 +251,9 @@ func StreamChunkedReadResponses( if _, err := stream.Write(b); err != nil { return ss.Warnings(), fmt.Errorf("write to stream: %w", err) } + + // We immediately flush the Write() so it is safe to return to the pool. + marshalPool.Put(&b) chks = chks[:0] } if err := iter.Err(); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index e1f1df21c19..116eb9596c4 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -17,6 +17,7 @@ import ( "context" "net/http" "sort" + "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -37,6 +38,7 @@ type readHandler struct { remoteReadMaxBytesInFrame int remoteReadGate *gate.Gate queries prometheus.Gauge + marshalPool *sync.Pool } // NewReadHandler creates a http.Handler that accepts remote read requests and @@ -49,6 +51,7 @@ func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storag remoteReadSampleLimit: remoteReadSampleLimit, remoteReadGate: gate.New(remoteReadConcurrencyLimit), remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame, + marshalPool: &sync.Pool{}, queries: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "prometheus", @@ -225,6 +228,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re querier.Select(true, hints, filteredMatchers...), sortedExternalLabels, h.remoteReadMaxBytesInFrame, + h.marshalPool, ) if err != nil { return err diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index ad1f0f3ae1b..7a2a9951cf6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -67,11 +67,14 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause. func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error { - unwrapedErr := errors.Unwrap(err) + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } switch { - case errors.Is(unwrapedErr, storage.ErrNotFound): + case errors.Is(unwrappedErr, storage.ErrNotFound): return storage.ErrNotFound - case errors.Is(unwrapedErr, storage.ErrOutOfOrderExemplar): + case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar): *outOfOrderErrs++ level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) return nil @@ -98,8 +101,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err for _, s := range ts.Samples { _, err = app.Append(0, labels, s.Timestamp, s.Value) if err != nil { - unwrapedErr := errors.Unwrap(err) - if errors.Is(unwrapedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrapedErr, storage.ErrOutOfBounds) || errors.Is(unwrapedErr, storage.ErrDuplicateSampleForTimestamp) { + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } + if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err @@ -123,6 +129,9 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err _, err = app.AppendHistogram(0, labels, hp.Timestamp, hs) if err != nil { unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } // Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is // a note indicating its inclusion in the future. if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 6f6de1e6335..1476e6f3d93 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -734,7 +734,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, all = additionalPostingsFunc.GetPostings(all, indexr) } // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) + sets = append(sets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) syms := indexr.Symbols() if i == 0 { symbols = syms diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index def4442f520..7a165e431b1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -66,7 +66,7 @@ const ( // ErrNotReady is returned if the underlying storage is not ready yet. var ErrNotReady = errors.New("TSDB not ready") -// DefaultOptions used for the DB. They are sane for setups using +// DefaultOptions used for the DB. They are reasonable for setups using // millisecond precision timestamps. func DefaultOptions() *Options { return &Options{ @@ -998,9 +998,9 @@ type dbAppender struct { var _ storage.GetRef = dbAppender{} -func (a dbAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { +func (a dbAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { if g, ok := a.Appender.(storage.GetRef); ok { - return g.GetRef(lset) + return g.GetRef(lset, hash) } return 0, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index a3b5027ea49..d5003188d2c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -98,9 +98,9 @@ func (h *Head) initTime(t int64) { h.maxTime.CompareAndSwap(math.MinInt64, t) } -func (a *initAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { +func (a *initAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { if g, ok := a.app.(storage.GetRef); ok { - return g.GetRef(lset) + return g.GetRef(lset, hash) } return 0, nil } @@ -440,7 +440,7 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { } // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't -// use getOrCreate or make any of the lset sanity checks that Append does. +// use getOrCreate or make any of the lset validity checks that Append does. func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { // Check if exemplar storage is enabled. if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { @@ -647,8 +647,8 @@ func checkHistogramBuckets(buckets []int64) (uint64, error) { var _ storage.GetRef = &headAppender{} -func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { - s := a.head.series.getByHash(lset.Hash(), lset) +func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { + s := a.head.series.getByHash(hash, lset) if s == nil { return 0, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 06a0f3e71ee..9d7897860bd 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -1164,44 +1164,37 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { // Earlier V1 formats don't have a sorted postings offset table, so // load the whole offset table into memory. r.postingsV1 = map[string]map[string]uint64{} - if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) + if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error { + if _, ok := r.postingsV1[string(name)]; !ok { + r.postingsV1[string(name)] = map[string]uint64{} + r.postings[string(name)] = nil // Used to get a list of labelnames in places. } - if _, ok := r.postingsV1[key[0]]; !ok { - r.postingsV1[key[0]] = map[string]uint64{} - r.postings[key[0]] = nil // Used to get a list of labelnames in places. - } - r.postingsV1[key[0]][key[1]] = off + r.postingsV1[string(name)][string(value)] = off return nil }); err != nil { return nil, errors.Wrap(err, "read postings table") } } else { - var lastKey []string + var lastName, lastValue []byte lastOff := 0 valueCount := 0 // For the postings offset table we keep every label name but only every nth // label value (plus the first and last one), to save memory. - if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) - } - if _, ok := r.postings[key[0]]; !ok { + if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, _ uint64, off int) error { + if _, ok := r.postings[string(name)]; !ok { // Next label name. - r.postings[key[0]] = []postingOffset{} - if lastKey != nil { + r.postings[string(name)] = []postingOffset{} + if lastName != nil { // Always include last value for each label name. - r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff}) + r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff}) } - lastKey = nil valueCount = 0 } if valueCount%symbolFactor == 0 { - r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off}) - lastKey = nil + r.postings[string(name)] = append(r.postings[string(name)], postingOffset{value: string(value), off: off}) + lastName, lastValue = nil, nil } else { - lastKey = key + lastName, lastValue = name, value lastOff = off } valueCount++ @@ -1209,8 +1202,8 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { }); err != nil { return nil, errors.Wrap(err, "read postings table") } - if lastKey != nil { - r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff}) + if lastName != nil { + r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff}) } // Trim any extra space in the slices. for k, v := range r.postings { @@ -1251,15 +1244,12 @@ type Range struct { // for all postings lists. func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { m := map[labels.Label]Range{} - if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) - } + if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error { d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable) if d.Err() != nil { return d.Err() } - m[labels.Label{Name: key[0], Value: key[1]}] = Range{ + m[labels.Label{Name: string(name), Value: string(value)}] = Range{ Start: int64(off) + 4, End: int64(off) + 4 + int64(d.Len()), } @@ -1412,29 +1402,29 @@ func (s *symbolsIter) Next() bool { func (s symbolsIter) At() string { return s.cur } func (s symbolsIter) Err() error { return s.err } -// ReadOffsetTable reads an offset table and at the given position calls f for each -// found entry. If f returns an error it stops decoding and returns the received error. -func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64, int) error) error { +// ReadPostingsOffsetTable reads the postings offset table and at the given position calls f for each +// found entry. +// The name and value parameters passed to f reuse the backing memory of the underlying byte slice, +// so they shouldn't be persisted without previously copying them. +// If f returns an error it stops decoding and returns the received error. +func ReadPostingsOffsetTable(bs ByteSlice, off uint64, f func(name, value []byte, postingsOffset uint64, labelOffset int) error) error { d := encoding.NewDecbufAt(bs, int(off), castagnoliTable) startLen := d.Len() cnt := d.Be32() for d.Err() == nil && d.Len() > 0 && cnt > 0 { offsetPos := startLen - d.Len() - keyCount := d.Uvarint() - // The Postings offset table takes only 2 keys per entry (name and value of label), - // and the LabelIndices offset table takes only 1 key per entry (a label name). - // Hence setting the size to max of both, i.e. 2. - keys := make([]string, 0, 2) - for i := 0; i < keyCount; i++ { - keys = append(keys, d.UvarintStr()) + if keyCount := d.Uvarint(); keyCount != 2 { + return errors.Errorf("unexpected number of keys for postings offset table %d", keyCount) } + name := d.UvarintBytes() + value := d.UvarintBytes() o := d.Uvarint64() if d.Err() != nil { break } - if err := f(keys, o, offsetPos); err != nil { + if err := f(name, value, o, offsetPos); err != nil { return err } cnt-- diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 0f9b4d3b47d..cc765903c00 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -19,6 +19,7 @@ import ( "strings" "unicode/utf8" + "github.com/oklog/ulid" "github.com/pkg/errors" "golang.org/x/exp/slices" @@ -47,6 +48,7 @@ func init() { } type blockBaseQuerier struct { + blockID ulid.ULID index IndexReader chunks ChunkReader tombstones tombstones.Reader @@ -77,6 +79,7 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er tombsr = tombstones.NewMemTombstones() } return &blockBaseQuerier{ + blockID: b.Meta().ULID, mint: mint, maxt: maxt, index: indexr, @@ -178,7 +181,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, if sortSeries { p = q.index.SortedPostings(p) } - return newBlockChunkSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return newBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) } func findSetMatches(pattern string) []string { @@ -427,6 +430,7 @@ func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]strin // Iterated series are trimmed with given min and max time as well as tombstones. // See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating. type blockBaseSeriesSet struct { + blockID ulid.ULID p index.Postings index IndexReader chunks ChunkReader @@ -471,7 +475,14 @@ func (b *blockBaseSeriesSet) Next() bool { var trimFront, trimBack bool // Copy chunks as iterables are reusable. - chks := make([]chunks.Meta, 0, len(b.bufChks)) + // Count those in range to size allocation (roughly - ignoring tombstones). + nChks := 0 + for _, chk := range b.bufChks { + if !(chk.MaxTime < b.mint || chk.MinTime > b.maxt) { + nChks++ + } + } + chks := make([]chunks.Meta, 0, nChks) // Prefilter chunks and pick those which are not entirely deleted or totally outside of the requested range. for _, chk := range b.bufChks { @@ -481,10 +492,10 @@ func (b *blockBaseSeriesSet) Next() bool { if chk.MinTime > b.maxt { continue } - - if !(tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(intervals)) { - chks = append(chks, chk) + if (tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(intervals)) { + continue } + chks = append(chks, chk) // If still not entirely deleted, check if trim is needed based on requested time range. if !b.disableTrimming { @@ -512,7 +523,7 @@ func (b *blockBaseSeriesSet) Next() bool { copy(b.currLabels, b.bufLbls) b.currIterFn = func() *populateWithDelGenericSeriesIterator { - return newPopulateWithDelGenericSeriesIterator(b.chunks, chks, intervals) + return newPopulateWithDelGenericSeriesIterator(b.blockID, b.chunks, chks, intervals) } return true } @@ -539,7 +550,8 @@ func (b *blockBaseSeriesSet) Warnings() storage.Warnings { return nil } // means that the chunk iterator in currChkMeta is invalid and a chunk rewrite // is needed, for which currDelIter should be used. type populateWithDelGenericSeriesIterator struct { - chunks ChunkReader + blockID ulid.ULID + chunks ChunkReader // chks are expected to be sorted by minTime and should be related to // the same, single series. chks []chunks.Meta @@ -554,11 +566,13 @@ type populateWithDelGenericSeriesIterator struct { } func newPopulateWithDelGenericSeriesIterator( + blockID ulid.ULID, chunks ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals, ) *populateWithDelGenericSeriesIterator { return &populateWithDelGenericSeriesIterator{ + blockID: blockID, chunks: chunks, chks: chks, i: -1, @@ -577,7 +591,7 @@ func (p *populateWithDelGenericSeriesIterator) next() bool { p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta) if p.err != nil { - p.err = errors.Wrapf(p.err, "cannot populate chunk %d", p.currChkMeta.Ref) + p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String()) return false } @@ -837,9 +851,10 @@ type blockChunkSeriesSet struct { blockBaseSeriesSet } -func newBlockChunkSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { +func newBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { return &blockChunkSeriesSet{ blockBaseSeriesSet{ + blockID: id, index: i, chunks: c, tombstones: t, diff --git a/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go b/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go index 1229f2da2d4..4a887a3d2aa 100644 --- a/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go +++ b/vendor/github.com/thanos-community/promql-engine/execution/function/functions.go @@ -72,6 +72,15 @@ var Funcs = map[string]FunctionCall{ "deg": simpleFunc(func(v float64) float64 { return v * 180 / math.Pi }), + "sgn": simpleFunc(func(v float64) float64 { + var sign float64 + if v > 0 { + sign = 1 + } else if v < 0 { + sign = -1 + } + return sign + }), "timestamp": func(f FunctionArgs) promql.Sample { return promql.Sample{ Point: promql.Point{ diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index c62cf48335b..0c7d062c9c3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -5,6 +5,7 @@ package indexheader import ( "bufio" + "bytes" "context" "encoding/binary" "hash" @@ -69,57 +70,64 @@ type BinaryTOC struct { PostingsOffsetTable uint64 } -// WriteBinary build index-header file from the pieces of index in object storage. -func WriteBinary(ctx context.Context, bkt objstore.BucketReader, id ulid.ULID, filename string) (err error) { +// WriteBinary build index header from the pieces of index in object storage, and cached in file if necessary. +func WriteBinary(ctx context.Context, bkt objstore.BucketReader, id ulid.ULID, filename string) ([]byte, error) { ir, indexVersion, err := newChunkedIndexReader(ctx, bkt, id) if err != nil { - return errors.Wrap(err, "new index reader") + return nil, errors.Wrap(err, "new index reader") + } + tmpFilename := "" + if filename != "" { + tmpFilename = filename + ".tmp" } - tmpFilename := filename + ".tmp" // Buffer for copying and encbuffers. // This also will control the size of file writer buffer. buf := make([]byte, 32*1024) - bw, err := newBinaryWriter(tmpFilename, buf) + bw, err := newBinaryWriter(id, tmpFilename, buf) if err != nil { - return errors.Wrap(err, "new binary index header writer") + return nil, errors.Wrap(err, "new binary index header writer") } defer runutil.CloseWithErrCapture(&err, bw, "close binary writer for %s", tmpFilename) if err := bw.AddIndexMeta(indexVersion, ir.toc.PostingsTable); err != nil { - return errors.Wrap(err, "add index meta") + return nil, errors.Wrap(err, "add index meta") } if err := ir.CopySymbols(bw.SymbolsWriter(), buf); err != nil { - return err + return nil, err } - if err := bw.f.Flush(); err != nil { - return errors.Wrap(err, "flush") + if err := bw.writer.Flush(); err != nil { + return nil, errors.Wrap(err, "flush") } if err := ir.CopyPostingsOffsets(bw.PostingOffsetsWriter(), buf); err != nil { - return err + return nil, err } - if err := bw.f.Flush(); err != nil { - return errors.Wrap(err, "flush") + if err := bw.writer.Flush(); err != nil { + return nil, errors.Wrap(err, "flush") } if err := bw.WriteTOC(); err != nil { - return errors.Wrap(err, "write index header TOC") + return nil, errors.Wrap(err, "write index header TOC") } - if err := bw.f.Flush(); err != nil { - return errors.Wrap(err, "flush") + if err := bw.writer.Flush(); err != nil { + return nil, errors.Wrap(err, "flush") } - if err := bw.f.f.Sync(); err != nil { - return errors.Wrap(err, "sync") + if err := bw.writer.Sync(); err != nil { + return nil, errors.Wrap(err, "sync") } - // Create index-header in atomic way, to avoid partial writes (e.g during restart or crash of store GW). - return os.Rename(tmpFilename, filename) + if tmpFilename != "" { + // Create index-header in atomic way, to avoid partial writes (e.g during restart or crash of store GW). + return nil, os.Rename(tmpFilename, filename) + } + + return bw.Buffer(), nil } type chunkedIndexReader struct { @@ -231,7 +239,7 @@ func (r *chunkedIndexReader) CopyPostingsOffsets(w io.Writer, buf []byte) (err e // TODO(bwplotka): Add padding for efficient read. type binaryWriter struct { - f *FileWriter + writer PosWriter toc BinaryTOC @@ -241,38 +249,48 @@ type binaryWriter struct { crc32 hash.Hash } -func newBinaryWriter(fn string, buf []byte) (w *binaryWriter, err error) { - dir := filepath.Dir(fn) - - df, err := fileutil.OpenDir(dir) - if os.IsNotExist(err) { - if err := os.MkdirAll(dir, os.ModePerm); err != nil { - return nil, err - } - df, err = fileutil.OpenDir(dir) - } +func newBinaryWriter(id ulid.ULID, cacheFilename string, buf []byte) (w *binaryWriter, err error) { + var memoryWriter *MemoryWriter + memoryWriter, err = NewMemoryWriter(id, len(buf)) if err != nil { - return nil, err } + var binWriter PosWriter = memoryWriter - defer runutil.CloseWithErrCapture(&err, df, "dir close") + if cacheFilename != "" { + dir := filepath.Dir(cacheFilename) - if err := os.RemoveAll(fn); err != nil { - return nil, errors.Wrap(err, "remove any existing index at path") - } + df, err := fileutil.OpenDir(dir) + if os.IsNotExist(err) { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return nil, err + } + df, err = fileutil.OpenDir(dir) + } + if err != nil { + return nil, err + } - // We use file writer for buffers not larger than reused one. - f, err := NewFileWriter(fn, len(buf)) - if err != nil { - return nil, err - } - if err := df.Sync(); err != nil { - return nil, errors.Wrap(err, "sync dir") + defer runutil.CloseWithErrCapture(&err, df, "dir close") + + if err := os.RemoveAll(cacheFilename); err != nil { + return nil, errors.Wrap(err, "remove any existing index at path") + } + + // We use file writer for buffers not larger than reused one. + var fileWriter *FileWriter + fileWriter, err = NewFileWriter(cacheFilename, memoryWriter) + if err != nil { + return nil, err + } + if err := df.Sync(); err != nil { + return nil, errors.Wrap(err, "sync dir") + } + binWriter = fileWriter } w = &binaryWriter{ - f: f, + writer: binWriter, // Reusable memory. buf: encoding.Encbuf{B: buf}, @@ -283,38 +301,42 @@ func newBinaryWriter(fn string, buf []byte) (w *binaryWriter, err error) { w.buf.PutBE32(MagicIndex) w.buf.PutByte(BinaryFormatV1) - return w, w.f.Write(w.buf.Get()) + return w, w.writer.Write(w.buf.Get()) } -type FileWriter struct { - f *os.File - fbuf *bufio.Writer - pos uint64 - name string +type PosWriter interface { + Pos() uint64 + Write(bufs ...[]byte) error + Buffer() []byte + Flush() error + Sync() error + Close() error +} + +type MemoryWriter struct { + id ulid.ULID + buf bytes.Buffer + pos uint64 } // TODO(bwplotka): Added size to method, upstream this. -func NewFileWriter(name string, size int) (*FileWriter, error) { - f, err := os.OpenFile(filepath.Clean(name), os.O_CREATE|os.O_RDWR, 0600) - if err != nil { - return nil, err - } - return &FileWriter{ - f: f, - fbuf: bufio.NewWriterSize(f, size), - pos: 0, - name: name, +func NewMemoryWriter(id ulid.ULID, size int) (*MemoryWriter, error) { + var buf bytes.Buffer + return &MemoryWriter{ + id: id, + buf: buf, + pos: 0, }, nil } -func (fw *FileWriter) Pos() uint64 { - return fw.pos +func (mw *MemoryWriter) Pos() uint64 { + return mw.pos } -func (fw *FileWriter) Write(bufs ...[]byte) error { +func (mw *MemoryWriter) Write(bufs ...[]byte) error { for _, b := range bufs { - n, err := fw.fbuf.Write(b) - fw.pos += uint64(n) + n, err := mw.buf.Write(b) + mw.pos += uint64(n) if err != nil { return err } @@ -322,40 +344,83 @@ func (fw *FileWriter) Write(bufs ...[]byte) error { // offset references in v1 are only 4 bytes large. // Once we move to compressed/varint representations in those areas, this limitation // can be lifted. - if fw.pos > 16*math.MaxUint32 { - return errors.Errorf("%q exceeding max size of 64GiB", fw.name) + if mw.pos > 16*math.MaxUint32 { + return errors.Errorf("%q exceeding max size of 64GiB", mw.id) } } return nil } -func (fw *FileWriter) Flush() error { - return fw.fbuf.Flush() +func (mw *MemoryWriter) Buffer() []byte { + return mw.buf.Bytes() } -func (fw *FileWriter) WriteAt(buf []byte, pos uint64) error { - if err := fw.Flush(); err != nil { - return err - } - _, err := fw.f.WriteAt(buf, int64(pos)) - return err +func (mw *MemoryWriter) Flush() error { + return nil } -// AddPadding adds zero byte padding until the file size is a multiple size. -func (fw *FileWriter) AddPadding(size int) error { - p := fw.pos % uint64(size) - if p == 0 { - return nil +func (mw *MemoryWriter) Sync() error { + return nil +} + +func (mw *MemoryWriter) Close() error { + return mw.Flush() +} + +type FileWriter struct { + f *os.File + memWriter *MemoryWriter + fileWriter *bufio.Writer + name string +} + +// TODO(bwplotka): Added size to method, upstream this. +func NewFileWriter(name string, memWriter *MemoryWriter) (*FileWriter, error) { + f, err := os.OpenFile(filepath.Clean(name), os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return nil, err } - p = uint64(size) - p + return &FileWriter{ + f: f, + memWriter: memWriter, + fileWriter: bufio.NewWriterSize(f, memWriter.buf.Len()), + name: name, + }, nil +} + +func (fw *FileWriter) Pos() uint64 { + return fw.memWriter.Pos() +} - if err := fw.Write(make([]byte, p)); err != nil { - return errors.Wrap(err, "add padding") +func (fw *FileWriter) Write(bufs ...[]byte) error { + if err := fw.memWriter.Write(bufs...); err != nil { + return err + } + for _, b := range bufs { + _, err := fw.fileWriter.Write(b) + if err != nil { + return err + } } return nil } +func (fw *FileWriter) Buffer() []byte { + return fw.memWriter.Buffer() +} + +func (fw *FileWriter) Flush() error { + if err := fw.memWriter.Flush(); err != nil { + return err + } + + return fw.fileWriter.Flush() +} + func (fw *FileWriter) Close() error { + if err := fw.memWriter.Close(); err != nil { + return err + } if err := fw.Flush(); err != nil { return err } @@ -365,6 +430,13 @@ func (fw *FileWriter) Close() error { return fw.f.Close() } +func (fw *FileWriter) Sync() error { + if err := fw.memWriter.Sync(); err != nil { + return err + } + return fw.f.Sync() +} + func (fw *FileWriter) Remove() error { return os.Remove(fw.name) } @@ -373,16 +445,16 @@ func (w *binaryWriter) AddIndexMeta(indexVersion int, indexPostingOffsetTable ui w.buf.Reset() w.buf.PutByte(byte(indexVersion)) w.buf.PutBE64(indexPostingOffsetTable) - return w.f.Write(w.buf.Get()) + return w.writer.Write(w.buf.Get()) } func (w *binaryWriter) SymbolsWriter() io.Writer { - w.toc.Symbols = w.f.Pos() + w.toc.Symbols = w.writer.Pos() return w } func (w *binaryWriter) PostingOffsetsWriter() io.Writer { - w.toc.PostingsOffsetTable = w.f.Pos() + w.toc.PostingsOffsetTable = w.writer.Pos() return w } @@ -394,17 +466,21 @@ func (w *binaryWriter) WriteTOC() error { w.buf.PutHash(w.crc32) - return w.f.Write(w.buf.Get()) + return w.writer.Write(w.buf.Get()) } func (w *binaryWriter) Write(p []byte) (int, error) { - n := w.f.Pos() - err := w.f.Write(p) - return int(w.f.Pos() - n), err + n := w.writer.Pos() + err := w.writer.Write(p) + return int(w.writer.Pos() - n), err +} + +func (w *binaryWriter) Buffer() []byte { + return w.writer.Buffer() } func (w *binaryWriter) Close() error { - return w.f.Close() + return w.writer.Close() } type postingValueOffsets struct { @@ -458,21 +534,45 @@ type BinaryReader struct { // NewBinaryReader loads or builds new index-header if not present on disk. func NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int) (*BinaryReader, error) { - binfn := filepath.Join(dir, id.String(), block.IndexHeaderFilename) - br, err := newFileBinaryReader(binfn, postingOffsetsInMemSampling) - if err == nil { - return br, nil + if dir != "" { + binfn := filepath.Join(dir, id.String(), block.IndexHeaderFilename) + br, err := newFileBinaryReader(binfn, postingOffsetsInMemSampling) + if err == nil { + return br, nil + } + + level.Debug(logger).Log("msg", "failed to read index-header from disk; recreating", "path", binfn, "err", err) + + start := time.Now() + if _, err := WriteBinary(ctx, bkt, id, binfn); err != nil { + return nil, errors.Wrap(err, "write index header") + } + + level.Debug(logger).Log("msg", "built index-header file", "path", binfn, "elapsed", time.Since(start)) + return newFileBinaryReader(binfn, postingOffsetsInMemSampling) + } else { + buf, err := WriteBinary(ctx, bkt, id, "") + if err != nil { + return nil, errors.Wrap(err, "generate index header") + } + + return newMemoryBinaryReader(buf, postingOffsetsInMemSampling) } +} - level.Debug(logger).Log("msg", "failed to read index-header from disk; recreating", "path", binfn, "err", err) +func newMemoryBinaryReader(buf []byte, postingOffsetsInMemSampling int) (bw *BinaryReader, err error) { + r := &BinaryReader{ + b: realByteSlice(buf), + c: nil, + postings: map[string]*postingValueOffsets{}, + postingOffsetsInMemSampling: postingOffsetsInMemSampling, + } - start := time.Now() - if err := WriteBinary(ctx, bkt, id, binfn); err != nil { - return nil, errors.Wrap(err, "write index header") + if err := r.init(); err != nil { + return nil, err } - level.Debug(logger).Log("msg", "built index-header file", "path", binfn, "elapsed", time.Since(start)) - return newFileBinaryReader(binfn, postingOffsetsInMemSampling) + return r, nil } func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *BinaryReader, err error) { @@ -493,12 +593,44 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina postingOffsetsInMemSampling: postingOffsetsInMemSampling, } + if err := r.init(); err != nil { + return nil, err + } + + return r, nil +} + +// newBinaryTOCFromByteSlice return parsed TOC from given index header byte slice. +func newBinaryTOCFromByteSlice(bs index.ByteSlice) (*BinaryTOC, error) { + if bs.Len() < binaryTOCLen { + return nil, encoding.ErrInvalidSize + } + b := bs.Range(bs.Len()-binaryTOCLen, bs.Len()) + + expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) + d := encoding.Decbuf{B: b[:len(b)-4]} + + if d.Crc32(castagnoliTable) != expCRC { + return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read index header TOC") + } + + if err := d.Err(); err != nil { + return nil, err + } + + return &BinaryTOC{ + Symbols: d.Be64(), + PostingsOffsetTable: d.Be64(), + }, nil +} + +func (r *BinaryReader) init() (err error) { // Verify header. if r.b.Len() < headerLen { - return nil, errors.Wrap(encoding.ErrInvalidSize, "index header's header") + return errors.Wrap(encoding.ErrInvalidSize, "index header's header") } if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex { - return nil, errors.Errorf("invalid magic number %x", m) + return errors.Errorf("invalid magic number %x", m) } r.version = int(r.b.Range(4, 5)[0]) r.indexVersion = int(r.b.Range(5, 6)[0]) @@ -506,51 +638,48 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina r.indexLastPostingEnd = int64(binary.BigEndian.Uint64(r.b.Range(6, headerLen))) if r.version != BinaryFormatV1 { - return nil, errors.Errorf("unknown index header file version %d", r.version) + return errors.Errorf("unknown index header file version %d", r.version) } r.toc, err = newBinaryTOCFromByteSlice(r.b) if err != nil { - return nil, errors.Wrap(err, "read index header TOC") + return errors.Wrap(err, "read index header TOC") } // TODO(bwplotka): Consider contributing to Prometheus to allow specifying custom number for symbolsFactor. r.symbols, err = index.NewSymbols(r.b, r.indexVersion, int(r.toc.Symbols)) if err != nil { - return nil, errors.Wrap(err, "read symbols") + return errors.Wrap(err, "read symbols") } - var lastKey []string + var lastName, lastValue []byte if r.indexVersion == index.FormatV1 { // Earlier V1 formats don't have a sorted postings offset table, so // load the whole offset table into memory. r.postingsV1 = map[string]map[string]index.Range{} var prevRng index.Range - if err := index.ReadOffsetTable(r.b, r.toc.PostingsOffsetTable, func(key []string, off uint64, _ int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) + if err := index.ReadPostingsOffsetTable(r.b, r.toc.PostingsOffsetTable, func(name, value []byte, postingsOffset uint64, _ int) error { + if lastName != nil { + prevRng.End = int64(postingsOffset - crc32.Size) + r.postingsV1[string(lastName)][string(lastValue)] = prevRng } - if lastKey != nil { - prevRng.End = int64(off - crc32.Size) - r.postingsV1[lastKey[0]][lastKey[1]] = prevRng + if _, ok := r.postingsV1[string(name)]; !ok { + r.postingsV1[string(name)] = map[string]index.Range{} + r.postings[string(name)] = nil // Used to get a list of labelnames in places. } - if _, ok := r.postingsV1[key[0]]; !ok { - r.postingsV1[key[0]] = map[string]index.Range{} - r.postings[key[0]] = nil // Used to get a list of labelnames in places. - } - - lastKey = key - prevRng = index.Range{Start: int64(off + postingLengthFieldSize)} + lastName = name + lastValue = value + prevRng = index.Range{Start: int64(postingsOffset + postingLengthFieldSize)} return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return errors.Wrap(err, "read postings table") } - if lastKey != nil { + if string(lastName) != "" { prevRng.End = r.indexLastPostingEnd - crc32.Size - r.postingsV1[lastKey[0]][lastKey[1]] = prevRng + r.postingsV1[string(lastName)][string(lastValue)] = prevRng } } else { lastTableOff := 0 @@ -558,46 +687,44 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina // For the postings offset table we keep every label name but only every nth // label value (plus the first and last one), to save memory. - if err := index.ReadOffsetTable(r.b, r.toc.PostingsOffsetTable, func(key []string, off uint64, tableOff int) error { - if len(key) != 2 { - return errors.Errorf("unexpected key length for posting table %d", len(key)) - } - - if _, ok := r.postings[key[0]]; !ok { + if err := index.ReadPostingsOffsetTable(r.b, r.toc.PostingsOffsetTable, func(name, value []byte, postingsOffset uint64, labelOffset int) error { + if _, ok := r.postings[string(name)]; !ok { // Not seen before label name. - r.postings[key[0]] = &postingValueOffsets{} - if lastKey != nil { + r.postings[string(name)] = &postingValueOffsets{} + if lastName != nil { // Always include last value for each label name, unless it was just added in previous iteration based // on valueCount. - if (valueCount-1)%postingOffsetsInMemSampling != 0 { - r.postings[lastKey[0]].offsets = append(r.postings[lastKey[0]].offsets, postingOffset{value: lastKey[1], tableOff: lastTableOff}) + if (valueCount-1)%r.postingOffsetsInMemSampling != 0 { + r.postings[string(lastName)].offsets = append(r.postings[string(lastName)].offsets, postingOffset{value: string(lastValue), tableOff: lastTableOff}) } - r.postings[lastKey[0]].lastValOffset = int64(off - crc32.Size) - lastKey = nil + r.postings[string(lastName)].lastValOffset = int64(postingsOffset - crc32.Size) + lastName = nil + lastValue = nil } valueCount = 0 } - lastKey = key - lastTableOff = tableOff + lastName = name + lastValue = value + lastTableOff = labelOffset valueCount++ - if (valueCount-1)%postingOffsetsInMemSampling == 0 { - r.postings[key[0]].offsets = append(r.postings[key[0]].offsets, postingOffset{value: key[1], tableOff: tableOff}) + if (valueCount-1)%r.postingOffsetsInMemSampling == 0 { + r.postings[string(name)].offsets = append(r.postings[string(name)].offsets, postingOffset{value: string(value), tableOff: labelOffset}) } return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return errors.Wrap(err, "read postings table") } - if lastKey != nil { - if (valueCount-1)%postingOffsetsInMemSampling != 0 { + if lastName != nil { + if (valueCount-1)%r.postingOffsetsInMemSampling != 0 { // Always include last value for each label name if not included already based on valueCount. - r.postings[lastKey[0]].offsets = append(r.postings[lastKey[0]].offsets, postingOffset{value: lastKey[1], tableOff: lastTableOff}) + r.postings[string(lastName)].offsets = append(r.postings[string(lastName)].offsets, postingOffset{value: string(lastValue), tableOff: lastTableOff}) } // In any case lastValOffset is unknown as don't have next posting anymore. Guess from TOC table. // In worst case we will overfetch a few bytes. - r.postings[lastKey[0]].lastValOffset = r.indexLastPostingEnd - crc32.Size + r.postings[string(lastName)].lastValOffset = r.indexLastPostingEnd - crc32.Size } // Trim any extra space in the slices. for k, v := range r.postings { @@ -614,38 +741,14 @@ func newFileBinaryReader(path string, postingOffsetsInMemSampling int) (bw *Bina } off, err := r.symbols.ReverseLookup(k) if err != nil { - return nil, errors.Wrap(err, "reverse symbol lookup") + return errors.Wrap(err, "reverse symbol lookup") } r.nameSymbols[off] = k } r.dec = &index.Decoder{LookupSymbol: r.LookupSymbol} - return r, nil -} - -// newBinaryTOCFromByteSlice return parsed TOC from given index header byte slice. -func newBinaryTOCFromByteSlice(bs index.ByteSlice) (*BinaryTOC, error) { - if bs.Len() < binaryTOCLen { - return nil, encoding.ErrInvalidSize - } - b := bs.Range(bs.Len()-binaryTOCLen, bs.Len()) - - expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) - d := encoding.Decbuf{B: b[:len(b)-4]} - - if d.Crc32(castagnoliTable) != expCRC { - return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read index header TOC") - } - - if err := d.Err(); err != nil { - return nil, err - } - - return &BinaryTOC{ - Symbols: d.Be64(), - PostingsOffsetTable: d.Be64(), - }, nil + return nil } func (r *BinaryReader) IndexVersion() (int, error) { @@ -915,7 +1018,12 @@ func (r *BinaryReader) LabelNames() ([]string, error) { return labelNames, nil } -func (r *BinaryReader) Close() error { return r.c.Close() } +func (r *BinaryReader) Close() error { + if r.c == nil { + return nil + } + return r.c.Close() +} type realByteSlice []byte diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go index 13bf4768123..c3bee382c2f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go @@ -71,7 +71,6 @@ type LazyBinaryReader struct { logger log.Logger bkt objstore.BucketReader dir string - filepath string id ulid.ULID postingOffsetsInMemSampling int metrics *LazyBinaryReaderMetrics @@ -99,22 +98,23 @@ func NewLazyBinaryReader( metrics *LazyBinaryReaderMetrics, onClosed func(*LazyBinaryReader), ) (*LazyBinaryReader, error) { - filepath := filepath.Join(dir, id.String(), block.IndexHeaderFilename) - - // If the index-header doesn't exist we should download it. - if _, err := os.Stat(filepath); err != nil { - if !os.IsNotExist(err) { - return nil, errors.Wrap(err, "read index header") + if dir != "" { + indexHeaderFile := filepath.Join(dir, id.String(), block.IndexHeaderFilename) + // If the index-header doesn't exist we should download it. + if _, err := os.Stat(indexHeaderFile); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrap(err, "read index header") + } + + level.Debug(logger).Log("msg", "the index-header doesn't exist on disk; recreating", "path", indexHeaderFile) + + start := time.Now() + if _, err := WriteBinary(ctx, bkt, id, indexHeaderFile); err != nil { + return nil, errors.Wrap(err, "write index header") + } + + level.Debug(logger).Log("msg", "built index-header file", "path", indexHeaderFile, "elapsed", time.Since(start)) } - - level.Debug(logger).Log("msg", "the index-header doesn't exist on disk; recreating", "path", filepath) - - start := time.Now() - if err := WriteBinary(ctx, bkt, id, filepath); err != nil { - return nil, errors.Wrap(err, "write index header") - } - - level.Debug(logger).Log("msg", "built index-header file", "path", filepath, "elapsed", time.Since(start)) } return &LazyBinaryReader{ @@ -122,7 +122,6 @@ func NewLazyBinaryReader( logger: logger, bkt: bkt, dir: dir, - filepath: filepath, id: id, postingOffsetsInMemSampling: postingOffsetsInMemSampling, metrics: metrics, @@ -241,7 +240,7 @@ func (r *LazyBinaryReader) load() (returnErr error) { return r.readerErr } - level.Debug(r.logger).Log("msg", "lazy loading index-header file", "path", r.filepath) + level.Debug(r.logger).Log("msg", "lazy loading index-header", "block", r.id) r.metrics.loadCount.Inc() startTime := time.Now() @@ -249,11 +248,11 @@ func (r *LazyBinaryReader) load() (returnErr error) { if err != nil { r.metrics.loadFailedCount.Inc() r.readerErr = err - return errors.Wrapf(err, "lazy load index-header file at %s", r.filepath) + return errors.Wrapf(err, "lazy load index-header for block %s", r.id) } r.reader = reader - level.Debug(r.logger).Log("msg", "lazy loaded index-header file", "path", r.filepath, "elapsed", time.Since(startTime)) + level.Debug(r.logger).Log("msg", "lazy loaded index-header", "block", r.id, "elapsed", time.Since(startTime)) r.metrics.loadDuration.Observe(time.Since(startTime).Seconds()) return nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go index 0aa76651cc9..a84ea54767b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go @@ -323,7 +323,7 @@ type adjustableSeriesIterator interface { // adjustAtValue allows to adjust value by implementation if needed knowing the last value. This is used by counter // implementation which can adjust for obsolete counter value. - adjustAtValue(lastValue float64) + adjustAtValue(lastFloatValue float64) } type noopAdjustableSeriesIterator struct { @@ -360,11 +360,11 @@ type counterErrAdjustSeriesIterator struct { errAdjust float64 } -func (it *counterErrAdjustSeriesIterator) adjustAtValue(lastValue float64) { +func (it *counterErrAdjustSeriesIterator) adjustAtValue(lastFloatValue float64) { _, v := it.At() - if lastValue > v { + if lastFloatValue > v { // This replica has obsolete value (did not see the correct "end" of counter value before app restart). Adjust. - it.errAdjust += lastValue - v + it.errAdjust += lastFloatValue - v } } @@ -380,8 +380,8 @@ type dedupSeriesIterator struct { // TODO(bwplotka): Don't base on LastT, but on detected scrape interval. This will allow us to be more // responsive to gaps: https://github.com/thanos-io/thanos/issues/981, let's do it in next PR. - lastT int64 - lastV float64 + lastT int64 + lastIter chunkenc.Iterator penA, penB int64 useA bool @@ -389,23 +389,24 @@ type dedupSeriesIterator struct { func newDedupSeriesIterator(a, b adjustableSeriesIterator) *dedupSeriesIterator { return &dedupSeriesIterator{ - a: a, - b: b, - lastT: math.MinInt64, - lastV: float64(math.MinInt64), - aval: a.Next(), - bval: b.Next(), + a: a, + b: b, + lastT: math.MinInt64, + lastIter: a, + aval: a.Next(), + bval: b.Next(), } } func (it *dedupSeriesIterator) Next() chunkenc.ValueType { - lastValue := it.lastV + lastFloatVal, isFloatVal := it.lastFloatVal() lastUseA := it.useA defer func() { - if it.useA != lastUseA { + if it.useA != lastUseA && isFloatVal { // We switched replicas. // Ensure values are correct bases on value before At. - it.adjustAtValue(lastValue) + // TODO(rabenhorst): Investigate if we also need to implement adjusting histograms here. + it.adjustAtValue(lastFloatVal) } }() @@ -421,14 +422,16 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { if it.aval == chunkenc.ValNone { it.useA = false if it.bval != chunkenc.ValNone { - it.lastT, it.lastV = it.b.At() + it.lastT = it.b.AtT() + it.lastIter = it.b it.penB = 0 } return it.bval } if it.bval == chunkenc.ValNone { it.useA = true - it.lastT, it.lastV = it.a.At() + it.lastT = it.a.AtT() + it.lastIter = it.a it.penA = 0 return it.aval } @@ -436,8 +439,8 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { // with the smaller timestamp. // The applied penalty potentially already skipped potential samples already // that would have resulted in exaggerated sampling frequency. - ta, va := it.a.At() - tb, vb := it.b.At() + ta := it.a.AtT() + tb := it.b.AtT() it.useA = ta <= tb @@ -458,7 +461,8 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { } it.penA = 0 it.lastT = ta - it.lastV = va + it.lastIter = it.a + return it.aval } if it.lastT != math.MinInt64 { @@ -468,26 +472,40 @@ func (it *dedupSeriesIterator) Next() chunkenc.ValueType { } it.penB = 0 it.lastT = tb - it.lastV = vb + it.lastIter = it.b return it.bval } -func (it *dedupSeriesIterator) adjustAtValue(lastValue float64) { - if it.aval != chunkenc.ValNone { - it.a.adjustAtValue(lastValue) +func (it *dedupSeriesIterator) lastFloatVal() (float64, bool) { + if it.useA && it.aval == chunkenc.ValFloat { + _, v := it.lastIter.At() + return v, true } - if it.bval != chunkenc.ValNone { - it.b.adjustAtValue(lastValue) + if !it.useA && it.bval == chunkenc.ValFloat { + _, v := it.lastIter.At() + return v, true + } + return 0, false +} + +func (it *dedupSeriesIterator) adjustAtValue(lastFloatValue float64) { + if it.aval == chunkenc.ValFloat { + it.a.adjustAtValue(lastFloatValue) + } + if it.bval == chunkenc.ValFloat { + it.b.adjustAtValue(lastFloatValue) } } -// TODO(rabenhorst): Native histogram support needs to be implemented, float type hardcoded. func (it *dedupSeriesIterator) Seek(t int64) chunkenc.ValueType { // Don't use underlying Seek, but iterate over next to not miss gaps. for { - ts, _ := it.At() + ts := it.AtT() if ts >= t { - return chunkenc.ValFloat + if it.useA { + return it.a.Seek(ts) + } + return it.b.Seek(ts) } if it.Next() == chunkenc.ValNone { return chunkenc.ValNone @@ -496,27 +514,23 @@ func (it *dedupSeriesIterator) Seek(t int64) chunkenc.ValueType { } func (it *dedupSeriesIterator) At() (int64, float64) { - if it.useA { - return it.a.At() - } - return it.b.At() + return it.lastIter.At() } -// TODO(rabenhorst): Needs to be implemented for native histogram support. func (it *dedupSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - panic("not implemented") + return it.lastIter.AtHistogram() } func (it *dedupSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - panic("not implemented") + return it.lastIter.AtFloatHistogram() } func (it *dedupSeriesIterator) AtT() int64 { var t int64 if it.useA { - t, _ = it.a.At() + t = it.a.AtT() } else { - t, _ = it.b.At() + t = it.b.AtT() } return t } @@ -553,18 +567,16 @@ func (it *boundedSeriesIterator) At() (t int64, v float64) { return it.it.At() } -// TODO(rabenhorst): Needs to be implemented for native histogram support. func (it *boundedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - panic("not implemented") + return it.it.AtHistogram() } func (it *boundedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - panic("not implemented") + return it.it.AtFloatHistogram() } func (it *boundedSeriesIterator) AtT() int64 { - t, _ := it.it.At() - return t + return it.it.AtT() } func (it *boundedSeriesIterator) Next() chunkenc.ValueType { @@ -572,14 +584,14 @@ func (it *boundedSeriesIterator) Next() chunkenc.ValueType { if valueType == chunkenc.ValNone { return chunkenc.ValNone } - t, _ := it.it.At() + t := it.it.AtT() // Advance the iterator if we are before the valid interval. if t < it.mint { if it.Seek(it.mint) == chunkenc.ValNone { return chunkenc.ValNone } - t, _ = it.it.At() + t = it.it.AtT() } // Once we passed the valid interval, there is no going back. if t <= it.maxt { diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go index 00c970b9296..76f8958e79f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/pushdown_iter.go @@ -122,14 +122,13 @@ func (it *pushdownSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHis } func (it *pushdownSeriesIterator) AtT() int64 { - t, _ := it.a.At() + t := it.a.AtT() return t } -// TODO(rabenhorst): Native histogram support needs to be implemented, currently float type is hardcoded. func (it *pushdownSeriesIterator) Seek(t int64) chunkenc.ValueType { for { - ts, _ := it.At() + ts := it.AtT() if ts >= t { return chunkenc.ValFloat } diff --git a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go index 36a16a3112a..3df0fc69898 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go +++ b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go @@ -89,13 +89,20 @@ func (k *Keeper) NewGate(maxConcurrent int) Gate { func New(reg prometheus.Registerer, maxConcurrent int) Gate { promauto.With(reg).NewGauge(MaxGaugeOpts).Set(float64(maxConcurrent)) + var gate Gate + if maxConcurrent <= 0 { + gate = NewNoop() + } else { + gate = promgate.New(maxConcurrent) + } + return InstrumentGateDuration( promauto.With(reg).NewHistogram(DurationHistogramOpts), InstrumentGateTotal( promauto.With(reg).NewCounter(TotalCounterOpts), InstrumentGateInFlight( promauto.With(reg).NewGauge(InFlightGaugeOpts), - promgate.New(maxConcurrent), + gate, ), ), ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index b1fbd898a25..23517b8d91c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -495,6 +495,10 @@ func NewBucketStore( return nil, errors.Wrap(err, "validate config") } + if dir == "" { + return s, nil + } + if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create dir") } @@ -591,6 +595,10 @@ func (s *BucketStore) InitialSync(ctx context.Context) error { return errors.Wrap(err, "sync block") } + if s.dir == "" { + return nil + } + fis, err := os.ReadDir(s.dir) if err != nil { return errors.Wrap(err, "read dir") @@ -624,15 +632,20 @@ func (s *BucketStore) getBlock(id ulid.ULID) *bucketBlock { } func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err error) { - dir := filepath.Join(s.dir, meta.ULID.String()) + var dir string + if s.dir != "" { + dir = filepath.Join(s.dir, meta.ULID.String()) + } start := time.Now() level.Debug(s.logger).Log("msg", "loading new block", "id", meta.ULID) defer func() { if err != nil { s.metrics.blockLoadFailures.Inc() - if err2 := os.RemoveAll(dir); err2 != nil { - level.Warn(s.logger).Log("msg", "failed to remove block we cannot load", "err", err2) + if dir != "" { + if err2 := os.RemoveAll(dir); err2 != nil { + level.Warn(s.logger).Log("msg", "failed to remove block we cannot load", "err", err2) + } } level.Warn(s.logger).Log("msg", "loading block failed", "elapsed", time.Since(start), "id", meta.ULID, "err", err) } else { @@ -721,6 +734,11 @@ func (s *BucketStore) removeBlock(id ulid.ULID) error { if err := b.Close(); err != nil { return errors.Wrap(err, "close block") } + + if b.dir == "" { + return nil + } + return os.RemoveAll(b.dir) } @@ -1208,6 +1226,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } shardMatcher := req.ShardInfo.Matcher(&s.buffers) + blockClient := newBlockSeriesClient( srv.Context(), s.logger, @@ -1220,9 +1239,11 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie s.seriesBatchSize, s.metrics.chunkFetchDuration, ) + defer blockClient.Close() g.Go(func() error { + span, _ := tracing.StartSpan(gctx, "bucket_store_block_series", tracing.Tags{ "block.id": blk.meta.ULID, "block.mint": blk.meta.MinTime, @@ -1464,6 +1485,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq SkipChunks: true, } blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + defer blockClient.Close() if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, @@ -1638,6 +1660,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR SkipChunks: true, } blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + defer blockClient.Close() if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index d9245035f45..d969ddb64f6 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -91,9 +91,9 @@ func newProxyStoreMetrics(reg prometheus.Registerer) *proxyStoreMetrics { return &m } -func RegisterStoreServer(storeSrv storepb.StoreServer) func(*grpc.Server) { +func RegisterStoreServer(storeSrv storepb.StoreServer, logger log.Logger) func(*grpc.Server) { return func(s *grpc.Server) { - storepb.RegisterStoreServer(s, storeSrv) + storepb.RegisterStoreServer(s, NewRecoverableStoreServer(logger, storeSrv)) } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/recover.go b/vendor/github.com/thanos-io/thanos/pkg/store/recover.go new file mode 100644 index 00000000000..c453c3bec3c --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/recover.go @@ -0,0 +1,52 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package store + +import ( + "fmt" + "runtime" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/thanos-io/thanos/pkg/store/storepb" +) + +type recoverableStoreServer struct { + logger log.Logger + storepb.StoreServer +} + +func NewRecoverableStoreServer(logger log.Logger, storeServer storepb.StoreServer) *recoverableStoreServer { + return &recoverableStoreServer{logger: logger, StoreServer: storeServer} +} + +func (r *recoverableStoreServer) Series(request *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + defer r.recover(srv) + return r.StoreServer.Series(request, srv) +} + +func (r *recoverableStoreServer) recover(srv storepb.Store_SeriesServer) { + e := recover() + if e == nil { + return + } + + switch err := e.(type) { + case runtime.Error: + // Print the stack trace but do not inhibit the running application. + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + level.Error(r.logger).Log("msg", "runtime panic in TSDB Series server", "err", err.Error(), "stacktrace", string(buf)) + if err := srv.Send(storepb.NewWarnSeriesResponse(err)); err != nil { + level.Error(r.logger).Log("err", err) + } + default: + if err := srv.Send(storepb.NewWarnSeriesResponse(errors.New(fmt.Sprintf("unknown error while processing Series: %v", e)))); err != nil { + level.Error(r.logger).Log("err", err) + } + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go index c1f4b9b8bfe..85f858562bb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -13,10 +13,12 @@ import ( "github.com/gogo/protobuf/types" "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "google.golang.org/grpc/codes" "github.com/thanos-io/thanos/pkg/store/labelpb" + prompb "github.com/thanos-io/thanos/pkg/store/storepb/prompb" ) var PartialResponseStrategyValues = func() []string { @@ -552,3 +554,29 @@ func (m *QueryHints) IsSafeToExecute() bool { return false } + +// HistogramProtoToHistogram extracts a (normal integer) Histogram from the +// provided proto message. The caller has to make sure that the proto message +// represents an interger histogram and not a float histogram. +func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { + return &histogram.Histogram{ + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountInt(), + Count: hp.GetCountInt(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeDeltas(), + } +} + +func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go index aeb4a25aef2..7d2b8c7f619 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go @@ -55,6 +55,14 @@ type inProcessStream struct { err chan error } +func NewInProcessStream(ctx context.Context, bufferSize int) *inProcessStream { + return &inProcessStream{ + ctx: ctx, + recv: make(chan *SeriesResponse, bufferSize), + err: make(chan error), + } +} + func (s *inProcessStream) Context() context.Context { return s.ctx } func (s *inProcessStream) Send(r *SeriesResponse) error { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go index d8ef292dcfa..f5332f345ba 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go @@ -70,6 +70,37 @@ func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_166e07899dab7c14, []int{0, 0} } +type Histogram_ResetHint int32 + +const ( + Histogram_UNKNOWN Histogram_ResetHint = 0 + Histogram_YES Histogram_ResetHint = 1 + Histogram_NO Histogram_ResetHint = 2 + Histogram_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "UNKNOWN", + 1: "YES", + 2: "NO", + 3: "GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "UNKNOWN": 0, + "YES": 1, + "NO": 2, + "GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_166e07899dab7c14, []int{3, 0} +} + type LabelMatcher_Type int32 const ( @@ -98,25 +129,28 @@ func (x LabelMatcher_Type) String() string { } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{4, 0} + return fileDescriptor_166e07899dab7c14, []int{6, 0} } // We require this to match chunkenc.Encoding. type Chunk_Encoding int32 const ( - Chunk_UNKNOWN Chunk_Encoding = 0 - Chunk_XOR Chunk_Encoding = 1 + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 ) var Chunk_Encoding_name = map[int32]string{ 0: "UNKNOWN", 1: "XOR", + 2: "HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "UNKNOWN": 0, - "XOR": 1, + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, } func (x Chunk_Encoding) String() string { @@ -124,7 +158,7 @@ func (x Chunk_Encoding) String() string { } func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{6, 0} + return fileDescriptor_166e07899dab7c14, []int{8, 0} } type MetricMetadata struct { @@ -305,20 +339,315 @@ func (m *Exemplar) GetTimestamp() int64 { return 0 } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus_copy.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_166e07899dab7c14, []int{3} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []*BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []*BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_UNKNOWN +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_166e07899dab7c14, []int{4} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + // TimeSeries represents samples and labels for a single time series. type TimeSeries struct { // Labels have to be sorted by label names and without duplicated label names. // TODO(bwplotka): Don't use zero copy ZLabels, see https://github.com/thanos-io/thanos/pull/3279 for details. - Labels []github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` - Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Labels []github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` } func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{3} + return fileDescriptor_166e07899dab7c14, []int{5} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -361,6 +690,13 @@ func (m *TimeSeries) GetExemplars() []Exemplar { return nil } +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + // Matcher specifies a rule, which can match or set of labels or not. type LabelMatcher struct { Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus_copy.LabelMatcher_Type" json:"type,omitempty"` @@ -372,7 +708,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{4} + return fileDescriptor_166e07899dab7c14, []int{6} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -436,7 +772,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} } func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (*ReadHints) ProtoMessage() {} func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{5} + return fileDescriptor_166e07899dab7c14, []int{7} } func (m *ReadHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +863,7 @@ func (m *Chunk) Reset() { *m = Chunk{} } func (m *Chunk) String() string { return proto.CompactTextString(m) } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{6} + return fileDescriptor_166e07899dab7c14, []int{8} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -596,7 +932,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_166e07899dab7c14, []int{7} + return fileDescriptor_166e07899dab7c14, []int{9} } func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,11 +970,14 @@ func (m *ChunkedSeries) GetChunks() []Chunk { func init() { proto.RegisterEnum("prometheus_copy.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("prometheus_copy.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) proto.RegisterEnum("prometheus_copy.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterEnum("prometheus_copy.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterType((*MetricMetadata)(nil), "prometheus_copy.MetricMetadata") proto.RegisterType((*Sample)(nil), "prometheus_copy.Sample") proto.RegisterType((*Exemplar)(nil), "prometheus_copy.Exemplar") + proto.RegisterType((*Histogram)(nil), "prometheus_copy.Histogram") + proto.RegisterType((*BucketSpan)(nil), "prometheus_copy.BucketSpan") proto.RegisterType((*TimeSeries)(nil), "prometheus_copy.TimeSeries") proto.RegisterType((*LabelMatcher)(nil), "prometheus_copy.LabelMatcher") proto.RegisterType((*ReadHints)(nil), "prometheus_copy.ReadHints") @@ -649,57 +988,79 @@ func init() { func init() { proto.RegisterFile("store/storepb/prompb/types.proto", fileDescriptor_166e07899dab7c14) } var fileDescriptor_166e07899dab7c14 = []byte{ - // 796 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4f, 0x8f, 0xdb, 0x44, - 0x14, 0xcf, 0xd8, 0x8e, 0x1d, 0xbf, 0xfd, 0x83, 0x35, 0x2a, 0xd4, 0xbb, 0x42, 0x59, 0xcb, 0xa7, - 0x08, 0x81, 0x23, 0xb5, 0x15, 0x5c, 0x0a, 0xd2, 0x6e, 0xe5, 0x6e, 0x2b, 0x70, 0xa2, 0x4e, 0xb2, - 0x02, 0x7a, 0x89, 0x26, 0xce, 0xd4, 0xb1, 0x1a, 0xff, 0x91, 0x67, 0x82, 0x36, 0xdf, 0x82, 0x33, - 0x37, 0xc4, 0x8d, 0x1b, 0x7c, 0x8a, 0x1e, 0x7b, 0x44, 0x1c, 0x2a, 0xb4, 0x7b, 0xe2, 0x5b, 0xa0, - 0x19, 0x3b, 0xf5, 0xa6, 0x4b, 0xaf, 0xbd, 0xac, 0xde, 0xfb, 0xfd, 0xde, 0x3f, 0xbf, 0xf7, 0xdb, - 0x09, 0x78, 0x5c, 0x14, 0x15, 0x1b, 0xaa, 0xbf, 0xe5, 0x7c, 0x58, 0x56, 0x45, 0x56, 0xce, 0x87, - 0x62, 0x53, 0x32, 0x1e, 0x94, 0x55, 0x21, 0x0a, 0xfc, 0x91, 0xc4, 0x98, 0x58, 0xb2, 0x35, 0x9f, - 0xc5, 0x45, 0xb9, 0x39, 0xbe, 0x93, 0x14, 0x49, 0xa1, 0xb8, 0xa1, 0xb4, 0xea, 0xb0, 0xe3, 0xa3, - 0xba, 0xd0, 0x8a, 0xce, 0xd9, 0x6a, 0xb7, 0x82, 0xff, 0xab, 0x06, 0x87, 0x11, 0x13, 0x55, 0x1a, - 0x47, 0x4c, 0xd0, 0x05, 0x15, 0x14, 0x7f, 0x03, 0x86, 0x8c, 0x70, 0x91, 0x87, 0x06, 0x87, 0xf7, - 0x3e, 0x0b, 0xde, 0xe9, 0x11, 0xec, 0x86, 0x37, 0xee, 0x74, 0x53, 0x32, 0xa2, 0xf2, 0xf0, 0xe7, - 0x80, 0x33, 0x85, 0xcd, 0x5e, 0xd0, 0x2c, 0x5d, 0x6d, 0x66, 0x39, 0xcd, 0x98, 0xab, 0x79, 0x68, - 0x60, 0x13, 0xa7, 0x66, 0x1e, 0x2b, 0x62, 0x44, 0x33, 0x86, 0x31, 0x18, 0x4b, 0xb6, 0x2a, 0x5d, - 0x43, 0xf1, 0xca, 0x96, 0xd8, 0x3a, 0x4f, 0x85, 0xdb, 0xad, 0x31, 0x69, 0xfb, 0x1b, 0x80, 0xb6, - 0x13, 0xde, 0x03, 0xeb, 0x62, 0xf4, 0xed, 0x68, 0xfc, 0xfd, 0xc8, 0xe9, 0x48, 0xe7, 0xd1, 0xf8, - 0x62, 0x34, 0x0d, 0x89, 0x83, 0xb0, 0x0d, 0xdd, 0xf3, 0xd3, 0x8b, 0xf3, 0xd0, 0xd1, 0xf0, 0x01, - 0xd8, 0x4f, 0x9e, 0x4e, 0xa6, 0xe3, 0x73, 0x72, 0x1a, 0x39, 0x3a, 0xc6, 0x70, 0xa8, 0x98, 0x16, - 0x33, 0x64, 0xea, 0xe4, 0x22, 0x8a, 0x4e, 0xc9, 0x8f, 0x4e, 0x17, 0xf7, 0xc0, 0x78, 0x3a, 0x7a, - 0x3c, 0x76, 0x4c, 0xbc, 0x0f, 0xbd, 0xc9, 0xf4, 0x74, 0x1a, 0x4e, 0xc2, 0xa9, 0x63, 0xf9, 0x0f, - 0xc1, 0x9c, 0xd0, 0xac, 0x5c, 0x31, 0x7c, 0x07, 0xba, 0x3f, 0xd1, 0xd5, 0xba, 0xde, 0x0d, 0x22, - 0xb5, 0x83, 0x3f, 0x05, 0x5b, 0xa4, 0x19, 0xe3, 0x82, 0x66, 0xa5, 0xfa, 0x4e, 0x9d, 0xb4, 0x80, - 0xff, 0x1b, 0x82, 0x5e, 0x78, 0xc9, 0xb2, 0x72, 0x45, 0x2b, 0x1c, 0x83, 0xa9, 0xae, 0xc0, 0x5d, - 0xe4, 0xe9, 0x83, 0xbd, 0x7b, 0x07, 0x81, 0x58, 0xd2, 0xbc, 0xe0, 0xc1, 0x77, 0x12, 0x3d, 0x7b, - 0xf8, 0xea, 0xcd, 0x49, 0xe7, 0xef, 0x37, 0x27, 0x0f, 0x92, 0x54, 0x2c, 0xd7, 0xf3, 0x20, 0x2e, - 0xb2, 0x61, 0x1d, 0xf0, 0x45, 0x5a, 0x34, 0xd6, 0xb0, 0x7c, 0x99, 0x0c, 0x77, 0x0e, 0x1a, 0x3c, - 0x57, 0xd9, 0xa4, 0x29, 0xdd, 0x4e, 0xa9, 0xbd, 0x77, 0x4a, 0xfd, 0xdd, 0x29, 0xff, 0x45, 0x00, - 0xd3, 0x34, 0x63, 0x13, 0x56, 0xa5, 0x8c, 0x7f, 0x98, 0x39, 0xbf, 0x02, 0x8b, 0xab, 0xbd, 0x72, - 0x57, 0x53, 0x5d, 0xee, 0xde, 0xd2, 0x5a, 0xbd, 0xf7, 0x33, 0x43, 0xf6, 0x23, 0xdb, 0x68, 0xfc, - 0x35, 0xd8, 0xac, 0xd9, 0x28, 0x77, 0x75, 0x95, 0x7a, 0x74, 0x2b, 0x75, 0xbb, 0xf3, 0x26, 0xb9, - 0xcd, 0xf0, 0x7f, 0x41, 0xb0, 0xaf, 0x26, 0x89, 0xa8, 0x88, 0x97, 0xac, 0xc2, 0x5f, 0xee, 0x28, - 0xde, 0xbf, 0x55, 0xea, 0x66, 0x70, 0x70, 0x43, 0xe9, 0x18, 0x8c, 0x1b, 0xda, 0x56, 0x76, 0xbb, - 0x7c, 0x5d, 0x81, 0xb5, 0xe3, 0x0f, 0xc0, 0x50, 0xba, 0x35, 0x41, 0x0b, 0x9f, 0x39, 0x1d, 0x6c, - 0x81, 0x3e, 0x0a, 0x9f, 0x39, 0x48, 0x02, 0x44, 0x6a, 0x55, 0x02, 0x24, 0x74, 0x74, 0xff, 0x0f, - 0x04, 0x36, 0x61, 0x74, 0xf1, 0x24, 0xcd, 0x05, 0xc7, 0x77, 0xc1, 0xe2, 0x82, 0x95, 0xb3, 0x8c, - 0xab, 0xe1, 0x74, 0x62, 0x4a, 0x37, 0xe2, 0xb2, 0xf5, 0x8b, 0x75, 0x1e, 0x6f, 0x5b, 0x4b, 0x1b, - 0x1f, 0x41, 0x8f, 0x0b, 0x5a, 0x09, 0x19, 0x5d, 0x1f, 0xd8, 0x52, 0x7e, 0xc4, 0xf1, 0xc7, 0x60, - 0xb2, 0x7c, 0x21, 0x09, 0x43, 0x11, 0x5d, 0x96, 0x2f, 0x22, 0x8e, 0x8f, 0xa1, 0x97, 0x54, 0xc5, - 0xba, 0x4c, 0xf3, 0xc4, 0xed, 0x7a, 0xfa, 0xc0, 0x26, 0x6f, 0x7d, 0x7c, 0x08, 0xda, 0x7c, 0xe3, - 0x9a, 0x1e, 0x1a, 0xf4, 0x88, 0x36, 0xdf, 0xc8, 0xea, 0x15, 0xcd, 0x13, 0x26, 0x8b, 0x58, 0x75, - 0x75, 0xe5, 0x47, 0xdc, 0xff, 0x13, 0x41, 0xf7, 0xd1, 0x72, 0x9d, 0xbf, 0xc4, 0x7d, 0xd8, 0xcb, - 0xd2, 0x7c, 0x26, 0x75, 0xd5, 0xce, 0x6c, 0x67, 0x69, 0x2e, 0xb5, 0x15, 0x71, 0xc5, 0xd3, 0xcb, - 0xb7, 0x7c, 0xf3, 0xcf, 0x92, 0xd1, 0xcb, 0x86, 0xbf, 0xdf, 0x5c, 0x42, 0x57, 0x97, 0x38, 0xb9, - 0x75, 0x09, 0xd5, 0x25, 0x08, 0xf3, 0xb8, 0x58, 0xa4, 0x79, 0xd2, 0x9e, 0x41, 0xbe, 0x44, 0xea, - 0xd3, 0xf6, 0x89, 0xb2, 0x7d, 0x0f, 0x7a, 0xdb, 0xa8, 0xdd, 0xc7, 0xc2, 0x02, 0xfd, 0x87, 0x31, - 0x71, 0x90, 0xff, 0x3b, 0x82, 0x03, 0x55, 0x8e, 0x2d, 0x3e, 0xa4, 0xe8, 0x1f, 0x80, 0x19, 0xcb, - 0xae, 0x5b, 0xcd, 0x7f, 0xf2, 0xff, 0xdf, 0xd8, 0xa8, 0xb6, 0x89, 0x3d, 0xf3, 0x5e, 0x5d, 0xf5, - 0xd1, 0xeb, 0xab, 0x3e, 0xfa, 0xe7, 0xaa, 0x8f, 0x7e, 0xbe, 0xee, 0x77, 0x5e, 0x5f, 0xf7, 0x3b, - 0x7f, 0x5d, 0xf7, 0x3b, 0xcf, 0xcd, 0xfa, 0x67, 0x61, 0x6e, 0xaa, 0xf7, 0xfc, 0xfe, 0x7f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x98, 0xc8, 0x2e, 0xfd, 0x35, 0x06, 0x00, 0x00, + // 1139 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x8f, 0x1a, 0xc7, + 0x13, 0x67, 0x66, 0x60, 0x60, 0x6a, 0x01, 0x8f, 0x5b, 0xfe, 0xdb, 0xe3, 0xfd, 0x27, 0x2c, 0x19, + 0xe5, 0x81, 0xac, 0x04, 0x24, 0x7b, 0x95, 0x5c, 0x36, 0x51, 0x96, 0x0d, 0xfb, 0x50, 0x02, 0xc8, + 0x0d, 0xab, 0xc4, 0xbe, 0xa0, 0x06, 0x7a, 0x99, 0xd1, 0x32, 0x0f, 0x4d, 0x37, 0xd6, 0x92, 0x4f, + 0x91, 0x73, 0x6e, 0x51, 0x6e, 0xc9, 0x29, 0x1f, 0x21, 0x37, 0x9f, 0x22, 0x1f, 0xa3, 0x1c, 0xac, + 0x68, 0xf7, 0x8b, 0x44, 0xdd, 0x33, 0xc3, 0xc0, 0x12, 0x4b, 0x39, 0xf9, 0x82, 0xaa, 0x7e, 0xf5, + 0xfa, 0x4d, 0x57, 0x75, 0x35, 0x50, 0x67, 0x3c, 0x88, 0x68, 0x4b, 0xfe, 0x86, 0xe3, 0x56, 0x18, + 0x05, 0x5e, 0x38, 0x6e, 0xf1, 0x65, 0x48, 0x59, 0x33, 0x8c, 0x02, 0x1e, 0xa0, 0x3b, 0x02, 0xa3, + 0xdc, 0xa1, 0x0b, 0x36, 0x9a, 0x04, 0xe1, 0x72, 0xf7, 0xde, 0x2c, 0x98, 0x05, 0xd2, 0xd6, 0x12, + 0x52, 0xec, 0xb6, 0xfb, 0x30, 0x4e, 0x34, 0x27, 0x63, 0x3a, 0xdf, 0xcc, 0x60, 0xff, 0xa4, 0x42, + 0xb5, 0x4b, 0x79, 0xe4, 0x4e, 0xba, 0x94, 0x93, 0x29, 0xe1, 0x04, 0x7d, 0x01, 0x79, 0xe1, 0x61, + 0x29, 0x75, 0xa5, 0x51, 0x7d, 0xfc, 0xa8, 0x79, 0xab, 0x46, 0x73, 0xd3, 0x3d, 0x51, 0x87, 0xcb, + 0x90, 0x62, 0x19, 0x87, 0x3e, 0x06, 0xe4, 0x49, 0x6c, 0x74, 0x41, 0x3c, 0x77, 0xbe, 0x1c, 0xf9, + 0xc4, 0xa3, 0x96, 0x5a, 0x57, 0x1a, 0x06, 0x36, 0x63, 0xcb, 0xb1, 0x34, 0xf4, 0x88, 0x47, 0x11, + 0x82, 0xbc, 0x43, 0xe7, 0xa1, 0x95, 0x97, 0x76, 0x29, 0x0b, 0x6c, 0xe1, 0xbb, 0xdc, 0x2a, 0xc4, + 0x98, 0x90, 0xed, 0x25, 0x40, 0x56, 0x09, 0xed, 0x40, 0xf1, 0xbc, 0xf7, 0x75, 0xaf, 0xff, 0x6d, + 0xcf, 0xcc, 0x09, 0xe5, 0xa8, 0x7f, 0xde, 0x1b, 0x76, 0xb0, 0xa9, 0x20, 0x03, 0x0a, 0x27, 0x87, + 0xe7, 0x27, 0x1d, 0x53, 0x45, 0x15, 0x30, 0x4e, 0xcf, 0x06, 0xc3, 0xfe, 0x09, 0x3e, 0xec, 0x9a, + 0x1a, 0x42, 0x50, 0x95, 0x96, 0x0c, 0xcb, 0x8b, 0xd0, 0xc1, 0x79, 0xb7, 0x7b, 0x88, 0x9f, 0x99, + 0x05, 0x54, 0x82, 0xfc, 0x59, 0xef, 0xb8, 0x6f, 0xea, 0xa8, 0x0c, 0xa5, 0xc1, 0xf0, 0x70, 0xd8, + 0x19, 0x74, 0x86, 0x66, 0xd1, 0x3e, 0x00, 0x7d, 0x40, 0xbc, 0x70, 0x4e, 0xd1, 0x3d, 0x28, 0xbc, + 0x20, 0xf3, 0x45, 0x7c, 0x36, 0x0a, 0x8e, 0x15, 0xf4, 0x0e, 0x18, 0xdc, 0xf5, 0x28, 0xe3, 0xc4, + 0x0b, 0xe5, 0x77, 0x6a, 0x38, 0x03, 0xec, 0x9f, 0x15, 0x28, 0x75, 0xae, 0xa8, 0x17, 0xce, 0x49, + 0x84, 0x26, 0xa0, 0xcb, 0x2e, 0x30, 0x4b, 0xa9, 0x6b, 0x8d, 0x9d, 0xc7, 0x95, 0x26, 0x77, 0x88, + 0x1f, 0xb0, 0xe6, 0x37, 0x02, 0x6d, 0x1f, 0xbc, 0x7c, 0xbd, 0x97, 0xfb, 0xeb, 0xf5, 0xde, 0xfe, + 0xcc, 0xe5, 0xce, 0x62, 0xdc, 0x9c, 0x04, 0x5e, 0x2b, 0x76, 0xf8, 0xc4, 0x0d, 0x12, 0xa9, 0x15, + 0x5e, 0xce, 0x5a, 0x1b, 0x0d, 0x6d, 0x3e, 0x97, 0xd1, 0x38, 0x49, 0x9d, 0xb1, 0x54, 0xdf, 0xc8, + 0x52, 0xbb, 0xcd, 0xf2, 0x8f, 0x02, 0x18, 0xa7, 0x2e, 0xe3, 0xc1, 0x2c, 0x22, 0x1e, 0x7a, 0x17, + 0x8c, 0x49, 0xb0, 0xf0, 0xf9, 0xc8, 0xf5, 0xb9, 0xfc, 0xd6, 0xfc, 0x69, 0x0e, 0x97, 0x24, 0x74, + 0xe6, 0x73, 0xf4, 0x1e, 0xec, 0xc4, 0xe6, 0x8b, 0x79, 0x40, 0x78, 0x5c, 0xe6, 0x34, 0x87, 0x41, + 0x82, 0xc7, 0x02, 0x43, 0x26, 0x68, 0x6c, 0xe1, 0xc9, 0x3a, 0x0a, 0x16, 0x22, 0xba, 0x0f, 0x3a, + 0x9b, 0x38, 0xd4, 0x23, 0xb2, 0xd5, 0x77, 0x71, 0xa2, 0xa1, 0x0f, 0xa0, 0xfa, 0x3d, 0x8d, 0x82, + 0x11, 0x77, 0x22, 0xca, 0x9c, 0x60, 0x3e, 0x95, 0x6d, 0x57, 0x70, 0x45, 0xa0, 0xc3, 0x14, 0x44, + 0x1f, 0x26, 0x6e, 0x19, 0x2f, 0x5d, 0xf2, 0x52, 0x70, 0x59, 0xe0, 0x47, 0x29, 0xb7, 0x47, 0x60, + 0xae, 0xf9, 0xc5, 0x04, 0x8b, 0x92, 0xa0, 0x82, 0xab, 0x2b, 0xcf, 0x98, 0x64, 0x1b, 0xaa, 0x3e, + 0x9d, 0x11, 0xee, 0xbe, 0xa0, 0x23, 0x16, 0x12, 0x9f, 0x59, 0x25, 0xd9, 0x95, 0xff, 0x6f, 0xcd, + 0x7c, 0x7b, 0x31, 0xb9, 0xa4, 0x7c, 0x10, 0x12, 0x1f, 0x57, 0xd2, 0x10, 0xa1, 0x31, 0xf4, 0x11, + 0xdc, 0x59, 0xe5, 0x98, 0xd2, 0x39, 0x27, 0xcc, 0x32, 0xea, 0x5a, 0x03, 0xe1, 0x55, 0xea, 0xaf, + 0x24, 0xba, 0xe1, 0x28, 0xc9, 0x31, 0x0b, 0xea, 0x5a, 0x43, 0xc9, 0x1c, 0x25, 0x33, 0x26, 0x58, + 0x85, 0x01, 0x73, 0xd7, 0x58, 0xed, 0xfc, 0x07, 0x56, 0x69, 0xc8, 0x8a, 0xd5, 0x2a, 0x47, 0xc2, + 0xaa, 0x1c, 0xb3, 0x4a, 0xe1, 0x8c, 0xd5, 0xca, 0x31, 0x61, 0x55, 0x89, 0x59, 0xa5, 0x70, 0xc2, + 0xea, 0x08, 0x20, 0xa2, 0x8c, 0xf2, 0x91, 0x23, 0xce, 0xbe, 0x2a, 0x77, 0xc3, 0xfb, 0x5b, 0x8c, + 0x56, 0x23, 0xd4, 0xc4, 0xc2, 0xf9, 0xd4, 0xf5, 0x39, 0x36, 0xa2, 0x54, 0xdc, 0x9c, 0xc1, 0x3b, + 0xb7, 0x67, 0x70, 0x1f, 0x8c, 0x55, 0xd4, 0xe6, 0x0d, 0x2f, 0x82, 0xf6, 0xac, 0x33, 0x30, 0x15, + 0xa4, 0x83, 0xda, 0xeb, 0x9b, 0x6a, 0x76, 0xcb, 0xb5, 0x76, 0x11, 0x0a, 0x92, 0x78, 0xbb, 0x0c, + 0x90, 0x75, 0xde, 0x3e, 0x00, 0xc8, 0x8e, 0x47, 0x0c, 0x5f, 0x70, 0x71, 0xc1, 0x68, 0x3c, 0xcd, + 0x77, 0x71, 0xa2, 0x09, 0x7c, 0x4e, 0xfd, 0x19, 0x77, 0xe4, 0x10, 0x57, 0x70, 0xa2, 0xd9, 0xbf, + 0xaa, 0x00, 0x43, 0xd7, 0xa3, 0x03, 0x1a, 0xb9, 0x94, 0xbd, 0x9d, 0x6b, 0xfb, 0x19, 0x14, 0x99, + 0x5c, 0x33, 0xcc, 0x52, 0x65, 0x95, 0x07, 0x5b, 0xc7, 0x1b, 0xaf, 0xa1, 0x76, 0x5e, 0xd4, 0xc3, + 0xa9, 0x37, 0xfa, 0x1c, 0x0c, 0x9a, 0x2c, 0x18, 0x66, 0x69, 0x32, 0xf4, 0xe1, 0x56, 0x68, 0xba, + 0x82, 0x92, 0xe0, 0x2c, 0x02, 0x7d, 0x09, 0xe0, 0xa4, 0x6d, 0x63, 0x56, 0x5e, 0xc6, 0xef, 0xbe, + 0xb9, 0xb3, 0x49, 0x82, 0xb5, 0x18, 0xfb, 0x47, 0x05, 0xca, 0xf2, 0x5b, 0xba, 0x84, 0x4f, 0x1c, + 0x1a, 0xa1, 0x4f, 0x37, 0x9e, 0x10, 0x7b, 0x2b, 0xd9, 0xba, 0x73, 0x73, 0xed, 0xe9, 0x40, 0x90, + 0x5f, 0x7b, 0x2c, 0xa4, 0x9c, 0x6d, 0x33, 0x4d, 0x82, 0xb1, 0x62, 0x37, 0x20, 0x2f, 0x1f, 0x02, + 0x1d, 0xd4, 0xce, 0xd3, 0x78, 0x42, 0x7a, 0x9d, 0xa7, 0xf1, 0x84, 0x60, 0xb1, 0xfc, 0x05, 0x80, + 0x3b, 0xa6, 0x66, 0xff, 0xa6, 0x88, 0xb1, 0x22, 0x53, 0x31, 0x55, 0x0c, 0x3d, 0x80, 0x22, 0xe3, + 0x34, 0x1c, 0x79, 0x4c, 0x92, 0xd3, 0xb0, 0x2e, 0xd4, 0x2e, 0x13, 0xa5, 0x2f, 0x16, 0xfe, 0x24, + 0x2d, 0x2d, 0x64, 0xf4, 0x10, 0x4a, 0x8c, 0x93, 0x88, 0x0b, 0xef, 0x78, 0x63, 0x16, 0xa5, 0xde, + 0x65, 0xe8, 0x7f, 0xa0, 0x53, 0x7f, 0x3a, 0x92, 0x07, 0x26, 0x0c, 0x05, 0xea, 0x4f, 0xbb, 0x0c, + 0xed, 0x42, 0x69, 0x16, 0x05, 0x8b, 0xd0, 0xf5, 0x67, 0x56, 0xa1, 0xae, 0x35, 0x0c, 0xbc, 0xd2, + 0x51, 0x15, 0xd4, 0xf1, 0x52, 0x6e, 0xad, 0x12, 0x56, 0xc7, 0x4b, 0x91, 0x3d, 0x22, 0xfe, 0x8c, + 0x8a, 0x24, 0xc5, 0x38, 0xbb, 0xd4, 0xbb, 0xcc, 0xfe, 0x5d, 0x81, 0xc2, 0x91, 0xb3, 0xf0, 0x2f, + 0x51, 0x0d, 0x76, 0x3c, 0xd7, 0x1f, 0x89, 0x4b, 0x92, 0x71, 0x36, 0x3c, 0xd7, 0x17, 0xd3, 0xd9, + 0x65, 0xd2, 0x4e, 0xae, 0x56, 0xf6, 0xe4, 0xf5, 0xf1, 0xc8, 0x55, 0x62, 0x7f, 0x92, 0x74, 0x42, + 0x93, 0x9d, 0xd8, 0xdb, 0xea, 0x84, 0xac, 0xd2, 0xec, 0xf8, 0x93, 0x60, 0xea, 0xfa, 0xb3, 0xac, + 0x0d, 0xe2, 0x69, 0x97, 0x9f, 0x56, 0xc6, 0x52, 0xb6, 0x5b, 0x50, 0x4a, 0xbd, 0xb6, 0xee, 0xe6, + 0x77, 0x7d, 0xf1, 0xf2, 0x6e, 0x3c, 0xb7, 0xaa, 0xfd, 0x8b, 0x02, 0x15, 0x99, 0x9d, 0x4e, 0xdf, + 0xe6, 0x2d, 0xda, 0x07, 0x7d, 0x22, 0xaa, 0xa6, 0x97, 0xe8, 0xfe, 0xbf, 0x7f, 0x72, 0x32, 0xc5, + 0x89, 0x6f, 0xbb, 0xfe, 0xf2, 0xba, 0xa6, 0xbc, 0xba, 0xae, 0x29, 0x7f, 0x5f, 0xd7, 0x94, 0x1f, + 0x6e, 0x6a, 0xb9, 0x57, 0x37, 0xb5, 0xdc, 0x9f, 0x37, 0xb5, 0xdc, 0x73, 0x3d, 0xfe, 0xdb, 0x35, + 0xd6, 0xe5, 0xff, 0xa5, 0x27, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x67, 0xfa, 0x2f, 0x95, + 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -833,7 +1194,7 @@ func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { +func (m *Histogram) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -843,20 +1204,59 @@ func (m *TimeSeries) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -864,13 +1264,42 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x5a } } - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintTypes(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -878,27 +1307,210 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x42 } } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err } - i-- - dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1176,6 +1788,119 @@ func (m *Exemplar) Size() (n int) { return n } +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + return n +} + func (m *TimeSeries) Size() (n int) { if m == nil { return 0 @@ -1200,6 +1925,12 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } return n } @@ -1665,7 +2396,7 @@ func (m *Exemplar) Unmarshal(dAtA []byte) error { } return nil } -func (m *TimeSeries) Unmarshal(dAtA []byte) error { +func (m *Histogram) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1688,17 +2419,17 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) } - var msglen int + var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1708,31 +2439,39 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF } - var msglen int + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1742,31 +2481,29 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Samples = append(m.Samples, Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) } - var msglen int + var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1776,26 +2513,669 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Exemplars = append(m.Exemplars, Exemplar{}) + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto index f62ff7552bf..0fea825508b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto @@ -62,6 +62,72 @@ message Exemplar { int64 timestamp = 3; } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + + // TimeSeries represents samples and labels for a single time series. message TimeSeries { // Labels have to be sorted by label names and without duplicated label names. @@ -69,6 +135,7 @@ message TimeSeries { repeated thanos.Label labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel"]; repeated Sample samples = 2 [(gogoproto.nullable) = false]; repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; } // Matcher specifies a rule, which can match or set of labels or not. @@ -104,6 +171,7 @@ message Chunk { enum Encoding { UNKNOWN = 0; XOR = 1; + HISTOGRAM = 2; } Encoding type = 3; bytes data = 4; diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go index 271a785bf51..a87a135ba0a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go @@ -63,15 +63,18 @@ func (PartialResponseStrategy) EnumDescriptor() ([]byte, []int) { type Chunk_Encoding int32 const ( - Chunk_XOR Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 0 + Chunk_HISTOGRAM Chunk_Encoding = 1 ) var Chunk_Encoding_name = map[int32]string{ 0: "XOR", + 1: "HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "XOR": 0, + "XOR": 0, + "HISTOGRAM": 1, } func (x Chunk_Encoding) String() string { @@ -287,41 +290,42 @@ func init() { func init() { proto.RegisterFile("store/storepb/types.proto", fileDescriptor_121fba57de02d8e0) } var fileDescriptor_121fba57de02d8e0 = []byte{ - // 536 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x4f, 0x6f, 0xd3, 0x4e, - 0x10, 0xf5, 0xda, 0x8e, 0x93, 0xcc, 0xaf, 0x3f, 0x64, 0x96, 0x0a, 0xdc, 0x1e, 0x9c, 0xc8, 0x08, - 0x11, 0x55, 0xaa, 0x2d, 0x15, 0x8e, 0x5c, 0x12, 0x94, 0x1b, 0xb4, 0x74, 0x1b, 0x09, 0xd4, 0x0b, - 0xda, 0xb8, 0x2b, 0xdb, 0x6a, 0xfc, 0x47, 0xf6, 0xba, 0x24, 0xdf, 0x02, 0xc4, 0x9d, 0xcf, 0x93, - 0x63, 0x8f, 0x88, 0x43, 0x04, 0xc9, 0x17, 0x41, 0x1e, 0x3b, 0x40, 0xa4, 0x5c, 0xac, 0xf1, 0x7b, - 0x6f, 0x66, 0x76, 0xde, 0xce, 0xc2, 0x51, 0x21, 0xd3, 0x5c, 0x78, 0xf8, 0xcd, 0xa6, 0x9e, 0x5c, - 0x64, 0xa2, 0x70, 0xb3, 0x3c, 0x95, 0x29, 0x35, 0x64, 0xc8, 0x93, 0xb4, 0x38, 0x3e, 0x0c, 0xd2, - 0x20, 0x45, 0xc8, 0xab, 0xa2, 0x9a, 0x3d, 0x6e, 0x12, 0x67, 0x7c, 0x2a, 0x66, 0xbb, 0x89, 0xce, - 0x1d, 0xb4, 0x5e, 0x87, 0x65, 0x72, 0x4b, 0x4f, 0x40, 0xaf, 0x70, 0x8b, 0xf4, 0xc9, 0xe0, 0xc1, - 0xd9, 0x63, 0xb7, 0x2e, 0xe8, 0x22, 0xe9, 0x8e, 0x13, 0x3f, 0xbd, 0x89, 0x92, 0x80, 0xa1, 0x86, - 0x52, 0xd0, 0x6f, 0xb8, 0xe4, 0x96, 0xda, 0x27, 0x83, 0x03, 0x86, 0x31, 0xb5, 0x40, 0x0f, 0x79, - 0x11, 0x5a, 0x5a, 0x9f, 0x0c, 0xf4, 0x91, 0xbe, 0x5c, 0xf5, 0x08, 0x43, 0xc4, 0x79, 0x04, 0x9d, - 0x6d, 0x3e, 0x6d, 0x83, 0xf6, 0xe1, 0x82, 0x99, 0x8a, 0xf3, 0x8d, 0x80, 0x71, 0x25, 0xf2, 0x48, - 0x14, 0xd4, 0x07, 0x03, 0x4f, 0x56, 0x58, 0xa4, 0xaf, 0x0d, 0xfe, 0x3b, 0xfb, 0x7f, 0xdb, 0xfb, - 0x4d, 0x85, 0x8e, 0x5e, 0x2d, 0x57, 0x3d, 0xe5, 0xc7, 0xaa, 0xf7, 0x32, 0x88, 0x64, 0x58, 0x4e, - 0x5d, 0x3f, 0x8d, 0xbd, 0x5a, 0x70, 0x1a, 0xa5, 0x4d, 0xe4, 0x65, 0xb7, 0x81, 0xb7, 0x33, 0xa4, - 0x7b, 0x8d, 0xd9, 0xac, 0x29, 0x4d, 0x3d, 0x30, 0xfc, 0x6a, 0x94, 0xc2, 0x52, 0xb1, 0xc9, 0xc3, - 0x6d, 0x93, 0x61, 0x10, 0xe4, 0x38, 0x24, 0x9e, 0x59, 0x61, 0x8d, 0xcc, 0xf9, 0xaa, 0x42, 0xf7, - 0x0f, 0x47, 0x8f, 0xa0, 0x13, 0x47, 0xc9, 0x47, 0x19, 0xc5, 0xb5, 0x43, 0x1a, 0x6b, 0xc7, 0x51, - 0x32, 0x89, 0x62, 0x81, 0x14, 0x9f, 0xd7, 0x94, 0xda, 0x50, 0x7c, 0x8e, 0x54, 0x0f, 0xb4, 0x9c, - 0x7f, 0x42, 0x4b, 0xfe, 0x19, 0x0b, 0x2b, 0xb2, 0x8a, 0xa1, 0x4f, 0xa1, 0xe5, 0xa7, 0x65, 0x22, - 0x2d, 0x7d, 0x9f, 0xa4, 0xe6, 0xaa, 0x2a, 0x45, 0x19, 0x5b, 0xad, 0xbd, 0x55, 0x8a, 0x32, 0xae, - 0x04, 0x71, 0x94, 0x58, 0xc6, 0x5e, 0x41, 0x1c, 0x25, 0x28, 0xe0, 0x73, 0xab, 0xbd, 0x5f, 0xc0, - 0xe7, 0xf4, 0x39, 0xb4, 0xb1, 0x97, 0xc8, 0xad, 0xce, 0x3e, 0xd1, 0x96, 0x75, 0xbe, 0x10, 0x38, - 0x40, 0x63, 0xdf, 0x72, 0xe9, 0x87, 0x22, 0xa7, 0xa7, 0x3b, 0x6b, 0x73, 0xb4, 0x73, 0x75, 0x8d, - 0xc6, 0x9d, 0x2c, 0x32, 0xf1, 0x77, 0x73, 0x12, 0xde, 0x18, 0xd5, 0x65, 0x18, 0xd3, 0x43, 0x68, - 0xdd, 0xf1, 0x59, 0x29, 0xd0, 0xa7, 0x2e, 0xab, 0x7f, 0x9c, 0x01, 0xe8, 0x55, 0x1e, 0x35, 0x40, - 0x1d, 0x5f, 0x9a, 0x4a, 0xb5, 0x39, 0xe7, 0xe3, 0x4b, 0x93, 0x54, 0x00, 0x1b, 0x9b, 0x2a, 0x02, - 0x6c, 0x6c, 0x6a, 0x27, 0x2e, 0x3c, 0x79, 0xc7, 0x73, 0x19, 0xf1, 0x19, 0x13, 0x45, 0x96, 0x26, - 0x85, 0xb8, 0x92, 0x39, 0x97, 0x22, 0x58, 0xd0, 0x0e, 0xe8, 0xef, 0x87, 0xec, 0xdc, 0x54, 0x68, - 0x17, 0x5a, 0xc3, 0xd1, 0x05, 0x9b, 0x98, 0x64, 0xf4, 0x6c, 0xf9, 0xcb, 0x56, 0x96, 0x6b, 0x9b, - 0xdc, 0xaf, 0x6d, 0xf2, 0x73, 0x6d, 0x93, 0xcf, 0x1b, 0x5b, 0xb9, 0xdf, 0xd8, 0xca, 0xf7, 0x8d, - 0xad, 0x5c, 0xb7, 0x9b, 0xe7, 0x35, 0x35, 0xf0, 0x81, 0xbc, 0xf8, 0x1d, 0x00, 0x00, 0xff, 0xff, - 0x13, 0xe7, 0xb3, 0x25, 0x76, 0x03, 0x00, 0x00, + // 553 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xc7, 0xbd, 0xb6, 0xe3, 0x24, 0xf3, 0x6b, 0x7f, 0x32, 0xab, 0x0a, 0xdc, 0x1e, 0x9c, 0xc8, + 0x08, 0x11, 0x55, 0xaa, 0x2d, 0x15, 0x8e, 0x5c, 0x12, 0x14, 0x01, 0x12, 0x6d, 0xe8, 0x26, 0x12, + 0xa8, 0x17, 0xb4, 0x71, 0x57, 0xb6, 0xd5, 0xf8, 0x8f, 0xbc, 0x6b, 0x48, 0x1e, 0x80, 0x3b, 0x88, + 0x3b, 0xcf, 0x93, 0x63, 0x8f, 0x88, 0x43, 0x04, 0xc9, 0x8b, 0x20, 0xaf, 0x1d, 0x20, 0x52, 0x2e, + 0xd6, 0x78, 0x3e, 0xdf, 0x99, 0xd9, 0x99, 0x9d, 0x85, 0x63, 0x2e, 0xd2, 0x9c, 0x79, 0xf2, 0x9b, + 0x4d, 0x3d, 0xb1, 0xc8, 0x18, 0x77, 0xb3, 0x3c, 0x15, 0x29, 0x36, 0x44, 0x48, 0x93, 0x94, 0x9f, + 0x1c, 0x05, 0x69, 0x90, 0x4a, 0x97, 0x57, 0x5a, 0x15, 0x3d, 0xa9, 0x03, 0x67, 0x74, 0xca, 0x66, + 0xbb, 0x81, 0xce, 0x27, 0x04, 0x8d, 0xe7, 0x61, 0x91, 0xdc, 0xe2, 0x53, 0xd0, 0x4b, 0x60, 0xa1, + 0x2e, 0xea, 0xfd, 0x7f, 0x7e, 0xdf, 0xad, 0x32, 0xba, 0x12, 0xba, 0xc3, 0xc4, 0x4f, 0x6f, 0xa2, + 0x24, 0x20, 0x52, 0x83, 0x31, 0xe8, 0x37, 0x54, 0x50, 0x4b, 0xed, 0xa2, 0xde, 0x01, 0x91, 0x36, + 0xb6, 0x40, 0x0f, 0x29, 0x0f, 0x2d, 0xad, 0x8b, 0x7a, 0xfa, 0x40, 0x5f, 0xae, 0x3a, 0x88, 0x48, + 0x8f, 0xe3, 0x40, 0x6b, 0x1b, 0x8f, 0x9b, 0xa0, 0xbd, 0x1b, 0x11, 0x53, 0xc1, 0x87, 0xd0, 0x7e, + 0xf9, 0x6a, 0x3c, 0x19, 0xbd, 0x20, 0xfd, 0x0b, 0x13, 0x39, 0xdf, 0x10, 0x18, 0x63, 0x96, 0x47, + 0x8c, 0x63, 0x1f, 0x0c, 0x79, 0x52, 0x6e, 0xa1, 0xae, 0xd6, 0xfb, 0xef, 0xfc, 0x70, 0x7b, 0x94, + 0xd7, 0xa5, 0x77, 0xf0, 0x6c, 0xb9, 0xea, 0x28, 0x3f, 0x56, 0x9d, 0xa7, 0x41, 0x24, 0xc2, 0x62, + 0xea, 0xfa, 0x69, 0xec, 0x55, 0x82, 0xb3, 0x28, 0xad, 0x2d, 0x2f, 0xbb, 0x0d, 0xbc, 0x9d, 0xa6, + 0xdd, 0x6b, 0x19, 0x4d, 0xea, 0xd4, 0xd8, 0x03, 0xc3, 0x2f, 0x3b, 0xe3, 0x96, 0x2a, 0x8b, 0xdc, + 0xdb, 0x16, 0xe9, 0x07, 0x41, 0x2e, 0x7b, 0x96, 0x2d, 0x28, 0xa4, 0x96, 0x39, 0x5f, 0x55, 0x68, + 0xff, 0x61, 0xf8, 0x18, 0x5a, 0x71, 0x94, 0xbc, 0x17, 0x51, 0x5c, 0x0d, 0x4c, 0x23, 0xcd, 0x38, + 0x4a, 0x26, 0x51, 0xcc, 0x24, 0xa2, 0xf3, 0x0a, 0xa9, 0x35, 0xa2, 0x73, 0x89, 0x3a, 0xa0, 0xe5, + 0xf4, 0xa3, 0x9c, 0xd0, 0x3f, 0x6d, 0xc9, 0x8c, 0xa4, 0x24, 0xf8, 0x21, 0x34, 0xfc, 0xb4, 0x48, + 0x84, 0xa5, 0xef, 0x93, 0x54, 0xac, 0xcc, 0xc2, 0x8b, 0xd8, 0x6a, 0xec, 0xcd, 0xc2, 0x8b, 0xb8, + 0x14, 0xc4, 0x51, 0x62, 0x19, 0x7b, 0x05, 0x71, 0x94, 0x48, 0x01, 0x9d, 0x5b, 0xcd, 0xfd, 0x02, + 0x3a, 0xc7, 0x8f, 0xa1, 0x29, 0x6b, 0xb1, 0xdc, 0x6a, 0xed, 0x13, 0x6d, 0xa9, 0xf3, 0x05, 0xc1, + 0x81, 0x1c, 0xec, 0x05, 0x15, 0x7e, 0xc8, 0x72, 0x7c, 0xb6, 0xb3, 0x45, 0xc7, 0x3b, 0x57, 0x57, + 0x6b, 0xdc, 0xc9, 0x22, 0x63, 0x7f, 0x17, 0x29, 0xa1, 0xf5, 0xa0, 0xda, 0x44, 0xda, 0xf8, 0x08, + 0x1a, 0x1f, 0xe8, 0xac, 0x60, 0x72, 0x4e, 0x6d, 0x52, 0xfd, 0x38, 0x3d, 0xd0, 0xcb, 0x38, 0x6c, + 0x80, 0x3a, 0xbc, 0x32, 0x95, 0x72, 0x91, 0x2e, 0x87, 0x57, 0x26, 0x2a, 0x1d, 0x64, 0x68, 0xaa, + 0xd2, 0x41, 0x86, 0xa6, 0x76, 0xea, 0xc2, 0x83, 0x37, 0x34, 0x17, 0x11, 0x9d, 0x11, 0xc6, 0xb3, + 0x34, 0xe1, 0x6c, 0x2c, 0x72, 0x2a, 0x58, 0xb0, 0xc0, 0x2d, 0xd0, 0xdf, 0xf6, 0xc9, 0xa5, 0xa9, + 0xe0, 0x36, 0x34, 0xfa, 0x83, 0x11, 0x99, 0x98, 0x68, 0xf0, 0x68, 0xf9, 0xcb, 0x56, 0x96, 0x6b, + 0x1b, 0xdd, 0xad, 0x6d, 0xf4, 0x73, 0x6d, 0xa3, 0xcf, 0x1b, 0x5b, 0xb9, 0xdb, 0xd8, 0xca, 0xf7, + 0x8d, 0xad, 0x5c, 0x37, 0xeb, 0xe7, 0x36, 0x35, 0xe4, 0x83, 0x79, 0xf2, 0x3b, 0x00, 0x00, 0xff, + 0xff, 0x44, 0x9d, 0x95, 0xa2, 0x86, 0x03, 0x00, 0x00, } func (m *Chunk) Marshal() (dAtA []byte, err error) { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto index 3739e23b8f4..67c93fa52ed 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto @@ -23,6 +23,7 @@ option (gogoproto.goproto_sizecache_all) = false; message Chunk { enum Encoding { XOR = 0; + HISTOGRAM = 1; } Encoding type = 1; bytes data = 2; diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index b3ce3df3032..d896edc9968 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b4368c1..11e31f421c5 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index 49fde3d8c82..fb3c19d6b64 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -28,6 +28,7 @@ var ( ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) @@ -70,6 +71,14 @@ var ( Aggregation: view.Count(), } + ClientStartedRPCsView = &view.View{ + Measure: ClientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of started client RPCs.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: view.Count(), + } + ClientSentMessagesPerRPCView = &view.View{ Measure: ClientSentMessagesPerRPC, Name: "grpc.io/client/sent_messages_per_rpc", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index b2059824a85..fe0e971086e 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -27,6 +27,7 @@ var ( ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) ) @@ -73,6 +74,14 @@ var ( Aggregation: view.Count(), } + ServerStartedRPCsView = &view.View{ + Measure: ServerStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of started server RPCs.", + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: view.Count(), + } + ServerReceivedMessagesPerRPCView = &view.View{ Name: "grpc.io/server/received_messages_per_rpc", Description: "Distribution of messages received count per RPC, by method.", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index 89cac9c4ec0..9cb27320ca1 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -82,8 +82,10 @@ func methodName(fullname string) string { // statsHandleRPC processes the RPC events. func statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { - case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: // do nothing for client + case *stats.Begin: + handleRPCBegin(ctx, st) case *stats.OutPayload: handleRPCOutPayload(ctx, st) case *stats.InPayload: @@ -95,6 +97,25 @@ func statsHandleRPC(ctx context.Context, s stats.RPCStats) { } } +func handleRPCBegin(ctx context.Context, s *stats.Begin) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + } + + if s.IsClient() { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ClientStartedRPCs.M(1))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ServerStartedRPCs.M(1))) + } +} + func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index c7ea6423572..f7c8434be06 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 00d473ee029..31477a464fd 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629b6..436dc791f83 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 2b97283462e..8b5b99803ce 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 748bd568cda..61f72d20da3 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -90,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index ac22c93a2b5..bcd6e08c748 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1ff2..60bf0e39254 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 6e8d18b7f6d..6a79cd8a34c 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34a2..8fb17226fe3 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56b7..e28cf13cde9 100644 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f38e..7a1616a55c5 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 908c2497ed5..80095a5f6c0 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf2847..b8fc1e495a9 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859c0..da488fc8740 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 4c037f1d8e0..fd91e4162da 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -164,10 +164,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } var bw bodyWrapper - // if request body is nil we don't want to mutate the body as it will affect - // the identity of it in an unforeseeable way because we assert ReadCloser - // fulfills a certain interface and it is indeed nil. - if r.Body != nil { + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { bw.ReadCloser = r.Body bw.record = readRecordFunc r.Body = &bw @@ -180,7 +180,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators} + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: 200, // default status code in case the Handler doesn't write anything + } // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -230,10 +236,9 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, if wrote > 0 { attributes = append(attributes, WroteBytesKey.Int64(wrote)) } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) - span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer)) - } + attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) + span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer)) + if werr != nil && werr != io.EOF { attributes = append(attributes, WriteErrorKey.String(werr.Error())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 6ef983af4cb..822491db06e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.36.4" + return "0.37.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go index 5c8260ceb6f..330a14c2f64 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -34,9 +36,11 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact Counter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -47,9 +51,11 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact UpDownCounter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -60,6 +66,8 @@ type UpDownCounter interface { } // Gauge is an instrument that records independent readings. +// +// Warning: methods may be added to this interface in minor releases. type Gauge interface { // Observe records the state of the instrument to be x. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go index b07409c7931..4fce9963c3b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -34,9 +36,11 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact Counter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -47,9 +51,11 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { - // Observe records the state of the instrument to be x. The value of x is - // assumed to be the exact UpDownCounter value to record. + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. // // It is only valid to call this within a callback. If called outside of the // registered callback it should have no effect on the instrument, and an @@ -60,6 +66,8 @@ type UpDownCounter interface { } // Gauge is an instrument that records independent readings. +// +// Warning: methods may be added to this interface in minor releases. type Gauge interface { // Observe records the state of the instrument to be x. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go index 435db1127bc..2ec192f70d8 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -32,6 +34,8 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { // Add records a change to the counter. Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) @@ -40,6 +44,8 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { // Add records a change to the counter. Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) @@ -48,6 +54,8 @@ type UpDownCounter interface { } // Histogram is an instrument that records a distribution of values. +// +// Warning: methods may be added to this interface in minor releases. type Histogram interface { // Record adds an additional value to the distribution. Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go index c77a4672860..03b5d53e636 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go @@ -22,6 +22,8 @@ import ( ) // InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. type InstrumentProvider interface { // Counter creates an instrument for recording increasing values. Counter(name string, opts ...instrument.Option) (Counter, error) @@ -32,6 +34,8 @@ type InstrumentProvider interface { } // Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. type Counter interface { // Add records a change to the counter. Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) @@ -40,6 +44,8 @@ type Counter interface { } // UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. type UpDownCounter interface { // Add records a change to the counter. Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) @@ -48,6 +54,8 @@ type UpDownCounter interface { } // Histogram is an instrument that records a distribution of values. +// +// Warning: methods may be added to this interface in minor releases. type Histogram interface { // Record adds an additional value to the distribution. Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 21fc1c499fb..23e6853afbb 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -26,6 +26,8 @@ import ( // MeterProvider provides access to named Meter instances, for instrumenting // an application or library. +// +// Warning: methods may be added to this interface in minor releases. type MeterProvider interface { // Meter creates an instance of a `Meter` interface. The instrumentationName // must be the name of the library providing instrumentation. This name may @@ -36,6 +38,8 @@ type MeterProvider interface { } // Meter provides access to instrument instances for recording metrics. +// +// Warning: methods may be added to this interface in minor releases. type Meter interface { // AsyncInt64 is the namespace for the Asynchronous Integer instruments. // diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 0c756c46cc8..01bed6c6814 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -128,6 +128,12 @@ func Contains[E comparable](s []E, v E) bool { return Index(s, v) >= 0 } +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + // Insert inserts the values v... into s at index i, // returning the modified slice. // In the returned slice r, r[i] == v[0]. @@ -165,6 +171,7 @@ func Delete[S ~[]E, E any](s S, i, j int) S { // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice tot := len(s[:i]) + len(v) + len(s[j:]) if tot <= cap(s) { s2 := s[:tot] @@ -193,7 +200,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s; it does not create a new slice. func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 @@ -210,7 +217,7 @@ func Compact[S ~[]E, E comparable](s S) S { // CompactFunc is like Compact but uses a comparison function. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd102..35a5d8f06d4 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -30,7 +30,7 @@ func SortFunc[E any](x []E, less func(a, b E) bool) { pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) } -// SortStable sorts the slice x while keeping the original order of equal +// SortStableFunc sorts the slice x while keeping the original order of equal // elements, using less to compare elements. func SortStableFunc[E any](x []E, less func(a, b E) bool) { stableLessFunc(x, len(x), less) diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 75248fd16ec..65b125abd2c 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -79,6 +79,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -122,6 +125,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index e450fba31cb..63e5ef64c0d 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -598,17 +598,17 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GenerateAccessTokenResponse{ ServerResponse: googleapi.ServerResponse{ @@ -745,17 +745,17 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GenerateIdTokenResponse{ ServerResponse: googleapi.ServerResponse{ @@ -892,17 +892,17 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SignBlobResponse{ ServerResponse: googleapi.ServerResponse{ @@ -1039,17 +1039,17 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SignJwtResponse{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/internal/gensupport/error.go b/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 00000000000..886c6532b15 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 9e416e92ba1..e96a3316453 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.102.0" +const Version = "0.104.0" diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index f56a8c1d906..b2085a1949a 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -96,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 35cea853eb1..4613ebdfa72 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -2560,7 +2560,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -2708,17 +2708,17 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -2865,17 +2865,17 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3025,17 +3025,17 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -3181,17 +3181,17 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3347,17 +3347,17 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3511,7 +3511,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -3688,17 +3688,17 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -3883,17 +3883,17 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -4107,17 +4107,17 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4352,17 +4352,17 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Buckets{ ServerResponse: googleapi.ServerResponse{ @@ -4553,17 +4553,17 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4801,17 +4801,17 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -5019,17 +5019,17 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5182,17 +5182,17 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5432,17 +5432,17 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -5631,7 +5631,7 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5744,7 +5744,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5892,17 +5892,17 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6050,17 +6050,17 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6227,17 +6227,17 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -6395,17 +6395,17 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6561,17 +6561,17 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6713,7 +6713,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -6859,17 +6859,17 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -7019,17 +7019,17 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -7181,17 +7181,17 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notifications{ ServerResponse: googleapi.ServerResponse{ @@ -7342,7 +7342,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -7516,17 +7516,17 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -7699,17 +7699,17 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -7885,17 +7885,17 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -8067,17 +8067,17 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8259,17 +8259,17 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8500,17 +8500,17 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -8858,17 +8858,17 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -9165,7 +9165,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -9395,7 +9395,7 @@ func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() - return nil, err + return nil, gensupport.WrapError(err) } return res, nil } @@ -9414,17 +9414,17 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -9639,17 +9639,17 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -10001,17 +10001,17 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) if rx != nil { @@ -10028,7 +10028,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { } defer res.Body.Close() if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } } ret := &Object{ @@ -10352,17 +10352,17 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Objects{ ServerResponse: googleapi.ServerResponse{ @@ -10674,17 +10674,17 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -11081,17 +11081,17 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RewriteResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11373,17 +11373,17 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -11563,17 +11563,17 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11828,17 +11828,17 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -12135,17 +12135,17 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Channel{ ServerResponse: googleapi.ServerResponse{ @@ -12344,17 +12344,17 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKey{ ServerResponse: googleapi.ServerResponse{ @@ -12493,7 +12493,7 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -12640,17 +12640,17 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -12841,17 +12841,17 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeysMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13043,17 +13043,17 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13211,17 +13211,17 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAccount{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index c86f56507f5..efcc8e6c641 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" // Install grpclb, which is required for direct path. @@ -126,10 +127,26 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } - var grpcOpts []grpc.DialOption + + var transportCreds credentials.TransportCredentials if insecure { - grpcOpts = []grpc.DialOption{grpc.WithInsecure()} - } else if !o.NoAuth { + transportCreds = grpcinsecure.NewCredentials() + } else { + transportCreds = credentials.NewTLS(&tls.Config{ + GetClientCertificate: clientCertSource, + }) + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Authentication can only be sent when communicating over a secure connection. + // + // TODO: Should we be more lenient in the future and allow sending credentials + // when dialing an insecure connection? + if !o.NoAuth && !insecure { if o.APIKey != "" { log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") } @@ -142,8 +159,17 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C o.QuotaProject = internal.QuotaProjectFromCreds(creds) } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + ) + // Attempt Direct Path: if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))} if timeoutDialerOption != nil { @@ -153,9 +179,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { // google-c2p resolver target must not have a port number if addr, _, err := net.SplitHostPort(endpoint); err == nil { - endpoint = "google-c2p-experimental:///" + addr + endpoint = "google-c2p:///" + addr } else { - endpoint = "google-c2p-experimental:///" + endpoint + endpoint = "google-c2p:///" + endpoint } } else { if !strings.HasPrefix(endpoint, "dns:///") { @@ -169,18 +195,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. - } else { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, - requestReason: o.RequestReason, - }), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - } } } diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go index 4bf9e821723..507cd3ec63a 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -12,7 +12,6 @@ import ( "net" "syscall" - "golang.org/x/sys/unix" "google.golang.org/grpc" ) @@ -20,6 +19,9 @@ const ( // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By // default is 20 seconds. tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 ) func init() { @@ -33,7 +35,7 @@ func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { var syscallErr error controlErr := c.Control(func(fd uintptr) { syscallErr = syscall.SetsockoptInt( - int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) }) if syscallErr != nil { return syscallErr diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 66fdb650f4b..ec7c602ecf9 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,17 +15,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.18.1 // source: google/api/client.proto package annotations import ( reflect "reflect" + sync "sync" + api "google.golang.org/genproto/googleapis/api" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -35,6 +38,1047 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a place that API users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_behavior: + // - selector: CreateAdDomain + // long_running: + // initial_poll_delay: + // seconds: 60 # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: + // seconds: 360 # 6 minutes + // total_poll_timeout: + // seconds: 54000 # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), @@ -78,26 +1122,26 @@ var ( // // For example, the proto RPC and annotation: // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } // // Would add the following Java overload (in addition to the method accepting // the request object): // - // public final Subscription createSubscription(String name, String topic) + // public final Subscription createSubscription(String name, String topic) // // The following backwards-compatibility guidelines apply: // - // * Adding this annotation to an unannotated method is backwards + // - Adding this annotation to an unannotated method is backwards // compatible. - // * Adding this annotation to a method which already has existing + // - Adding this annotation to a method which already has existing // method signature annotations is backwards compatible if and only if // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is + // - Modifying or removing an existing method signature annotation is // a breaking change. - // * Re-ordering existing method signature annotations is a breaking + // - Re-ordering existing method signature annotations is a breaking // change. // // repeated string method_signature = 1051; @@ -111,10 +1155,10 @@ var ( // // Example: // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } // // optional string default_host = 1049; E_DefaultHost = &file_google_api_client_proto_extTypes[1] @@ -122,22 +1166,22 @@ var ( // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } // // If there is more than one scope, use a comma-separated string: // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } // // optional string oauth_scopes = 1050; E_OauthScopes = &file_google_api_client_proto_extTypes[2] @@ -148,44 +1192,278 @@ var File_google_api_client_proto protoreflect.FileDescriptor var file_google_api_client_proto_rawDesc = []byte{ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, - 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, - 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, 0x55, 0x72, 0x69, 0x12, + 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, + 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, + 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, + 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, + 0xe0, 0x03, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, + 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, + 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, + 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, + 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, + 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, + 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, + 0x4c, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, + 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x94, 0x02, + 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, + 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, + 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, + 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, + 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, + 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, + 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, + 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x2a, + 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, + 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, + 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, + 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData } +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_google_api_client_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 16: google.api.LaunchStage + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ - 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 0, // [0:3] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 26, // [26:29] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -193,18 +1471,178 @@ func file_google_api_client_proto_init() { if File_google_api_client_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, + NumEnums: 2, + NumMessages: 14, NumExtensions: 3, NumServices: 0, }, GoTypes: file_google_api_client_proto_goTypes, DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, ExtensionInfos: file_google_api_client_proto_extTypes, }.Build() File_google_api_client_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 00000000000..71075313773 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.18.1 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go new file mode 100644 index 00000000000..9fb745926a5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go @@ -0,0 +1,208 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package iam aliases all exported identifiers in package +// "cloud.google.com/go/iam/apiv1/iampb". +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package iam + +import ( + src "cloud.google.com/go/iam/apiv1/iampb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb +const ( + AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED + AuditConfigDelta_ADD = src.AuditConfigDelta_ADD + AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE + AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ + AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ + AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE + AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED + BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED + BindingDelta_ADD = src.BindingDelta_ADD + BindingDelta_REMOVE = src.BindingDelta_REMOVE +) + +// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb +var ( + AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name + AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value + AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name + AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value + BindingDelta_Action_name = src.BindingDelta_Action_name + BindingDelta_Action_value = src.BindingDelta_Action_value + File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto + File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto + File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto +) + +// Specifies the audit configuration for a service. The configuration +// determines which permission types are logged, and what identities, if any, +// are exempted from logging. An AuditConfig must have one or more +// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a +// specific service, the union of the two AuditConfigs is used for that +// service: the log_types specified in each AuditConfig are enabled, and the +// exempted_members in each AuditLogConfig are exempted. Example Policy with +// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": +// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": +// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For +// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts jose@example.com from DATA_READ logging, and +// aliya@example.com from DATA_WRITE logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfig = src.AuditConfig + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta = src.AuditConfigDelta + +// The type of action performed on an audit configuration in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta_Action = src.AuditConfigDelta_Action + +// Provides the configuration for logging a type of permissions. Example: { +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables +// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from +// DATA_READ logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig = src.AuditLogConfig + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig_LogType = src.AuditLogConfig_LogType + +// Associates `members`, or principals, with a `role`. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Binding = src.Binding + +// One delta entry for Binding. Each individual change (only one member in +// each entry) to a binding will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta = src.BindingDelta + +// The type of action performed on a Binding in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta_Action = src.BindingDelta_Action + +// Request message for `GetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetIamPolicyRequest = src.GetIamPolicyRequest + +// Encapsulates settings provided to GetIamPolicy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetPolicyOptions = src.GetPolicyOptions + +// IAMPolicyClient is the client API for IAMPolicy service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyClient = src.IAMPolicyClient + +// IAMPolicyServer is the server API for IAMPolicy service. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyServer = src.IAMPolicyServer + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. A `Policy` is a collection of +// `bindings`. A `binding` binds one or more `members`, or principals, to a +// single `role`. Principals can be user accounts, service accounts, Google +// groups, and domains (such as G Suite). A `role` is a named list of +// permissions; each `role` can be an IAM predefined role or a user-created +// custom role. For some types of Google Cloud resources, a `binding` can also +// specify a `condition`, which is a logical expression that allows access to a +// resource only if the expression evaluates to `true`. A condition can add +// constraints based on attributes of the request, the resource, or both. To +// learn which resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": +// "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": +// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - +// user:mike@example.com - group:admins@example.com - domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com +// role: roles/resourcemanager.organizationViewer condition: title: expirable +// access description: Does not grant access after Sep 2020 expression: +// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= +// version: 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Policy = src.Policy + +// The difference delta between two policies. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type PolicyDelta = src.PolicyDelta + +// Request message for `SetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type SetIamPolicyRequest = src.SetIamPolicyRequest + +// Request message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsRequest = src.TestIamPermissionsRequest + +// Response message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsResponse = src.TestIamPermissionsResponse + +// UnimplementedIAMPolicyServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { + return src.NewIAMPolicyClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + src.RegisterIAMPolicyServer(s, srv) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 49df6fed85c..749e561a8c0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,15 +4,16 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.12.1 +# cloud.google.com/go/compute v1.13.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.1 +# cloud.google.com/go/compute/metadata v0.2.2 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.6.0 +# cloud.google.com/go/iam v0.8.0 ## explicit; go 1.19 cloud.google.com/go/iam +cloud.google.com/go/iam/apiv1/iampb # cloud.google.com/go/storage v1.27.0 ## explicit; go 1.17 cloud.google.com/go/storage @@ -415,7 +416,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e +# github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 ## explicit; go 1.18 github.com/google/pprof/profile # github.com/google/uuid v1.3.0 @@ -425,7 +426,7 @@ github.com/google/uuid ## explicit; go 1.18 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.6.0 +# github.com/googleapis/gax-go/v2 v2.7.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -434,7 +435,7 @@ github.com/googleapis/gax-go/v2/internal # github.com/gorilla/mux v1.8.0 ## explicit; go 1.12 github.com/gorilla/mux -# github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 +# github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd ## explicit; go 1.17 github.com/grafana/regexp github.com/grafana/regexp/syntax @@ -677,7 +678,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.39.0 +# github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 ## explicit; go 1.17 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -696,7 +697,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.40.7 +# github.com/prometheus/prometheus v0.41.0 ## explicit; go 1.18 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -795,7 +796,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-community/promql-engine v0.0.0-20230123143605-0ad3f3b2e4b4 +# github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 ## explicit; go 1.19 github.com/thanos-community/promql-engine/api github.com/thanos-community/promql-engine/engine @@ -824,7 +825,7 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing -# github.com/thanos-io/thanos v0.29.1-0.20230103123855-3327c510076a +# github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -952,7 +953,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -971,7 +972,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 ## explicit; go 1.18 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp # go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 @@ -1020,7 +1021,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform # go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc -# go.opentelemetry.io/otel/metric v0.33.0 +# go.opentelemetry.io/otel/metric v0.34.0 ## explicit; go 1.18 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/global @@ -1075,7 +1076,7 @@ golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 +# golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/slices @@ -1160,7 +1161,7 @@ golang.org/x/xerrors/internal gonum.org/v1/gonum/floats gonum.org/v1/gonum/floats/scalar gonum.org/v1/gonum/internal/asm/f64 -# google.golang.org/api v0.102.0 +# google.golang.org/api v0.104.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1193,8 +1194,9 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c +# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 ## explicit; go 1.19 +google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/iam/v1 From 61438ce56fe6685515b9a4804fa62077fdf6ef58 Mon Sep 17 00:00:00 2001 From: Kama Huang <121007071+kama910@users.noreply.github.com> Date: Wed, 1 Feb 2023 18:51:01 -0800 Subject: [PATCH 061/133] fix response status code panic (#5117) Signed-off-by: Kama Huang Signed-off-by: Alex Le --- pkg/frontend/transport/handler.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 7e6c8298a92..47c61c83ea8 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -147,9 +147,11 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.reportSlowQuery(r, queryString, queryResponseTime) } if f.cfg.QueryStatsEnabled { - statusCode := resp.StatusCode + var statusCode int if err != nil { statusCode = getStatusCodeFromError(err) + } else if resp != nil { + statusCode = resp.StatusCode } f.reportQueryStats(r, queryString, queryResponseTime, stats, err, statusCode) From 1bfd0764e42ab3c193bb69df37f9ac2f4ee25a36 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Feb 2023 13:54:29 -0800 Subject: [PATCH 062/133] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc (#5109) Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) from 1.11.2 to 1.12.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...v1.12.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 8 +- go.sum | 16 ++-- .../exporters/otlp/internal/retry/retry.go | 7 +- .../otel/exporters/otlp/otlptrace/exporter.go | 7 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 7 +- .../grpc/balancer/grpclb/grpclb.go | 2 +- .../grpclb/{state => grpclbstate}/state.go | 8 +- .../grpc_binarylog_v1/binarylog.pb.go | 7 +- vendor/google.golang.org/grpc/clientconn.go | 9 ++ .../internal/proto/grpc_gcp/altscontext.pb.go | 7 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 7 +- .../grpc_gcp/transport_security_common.pb.go | 7 +- vendor/google.golang.org/grpc/dialoptions.go | 10 +- .../grpc/health/grpc_health_v1/health.pb.go | 7 +- .../grpc/internal/binarylog/method_logger.go | 2 +- .../internal/resolver/dns/dns_resolver.go | 6 +- .../resolver/passthrough/passthrough.go | 9 +- .../grpc/internal/resolver/unix/unix.go | 4 +- .../grpc/internal/transport/controlbuf.go | 46 +++++---- .../grpc/internal/transport/handler_server.go | 45 ++++++--- .../grpc/internal/transport/http2_client.go | 46 ++++++--- .../grpc/internal/transport/http2_server.go | 96 +++++++++---------- .../grpc/internal/transport/transport.go | 2 +- vendor/google.golang.org/grpc/pickfirst.go | 4 +- vendor/google.golang.org/grpc/regenerate.sh | 7 +- vendor/google.golang.org/grpc/server.go | 77 +++++++-------- vendor/google.golang.org/grpc/stream.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 3 +- vendor/modules.txt | 10 +- 30 files changed, 246 insertions(+), 224 deletions(-) rename vendor/google.golang.org/grpc/balancer/grpclb/{state => grpclbstate}/state.go (87%) diff --git a/go.mod b/go.mod index 2d7a6a3d430..2244fd5f5cc 100644 --- a/go.mod +++ b/go.mod @@ -61,15 +61,15 @@ require ( go.opentelemetry.io/contrib/propagators/aws v1.12.0 go.opentelemetry.io/otel v1.12.0 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 go.opentelemetry.io/otel/sdk v1.12.0 go.opentelemetry.io/otel/trace v1.12.0 go.uber.org/atomic v1.10.0 golang.org/x/net v0.5.0 golang.org/x/sync v0.1.0 golang.org/x/time v0.3.0 - google.golang.org/grpc v1.51.0 + google.golang.org/grpc v1.52.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/yaml v1.3.0 @@ -196,7 +196,7 @@ require ( go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 // indirect go.opentelemetry.io/otel/metric v0.34.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/goleak v1.2.0 // indirect diff --git a/go.sum b/go.sum index ddd7827eeb4..058d7c5562a 100644 --- a/go.sum +++ b/go.sum @@ -1636,14 +1636,14 @@ go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1/go.mod h1:zZWeMK8RgtSJddl2Oox9MfjMRgD/HqzuLujCFdF0CZU= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 h1:UfDENi+LTcLjQ/JhaXimjlIgn7wWjwbEMmdREm2Gyng= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 h1:ZVqtSAxrR4+ofzayuww0/EKamCjjnwnXTMRZzMudJoU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0/go.mod h1:IlaGLENJkAl9+Xoo3J0unkdOwtL+rmqZ3ryMjUtYA94= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 h1:+tsVdWosoqDfX6cdHAeacZozjQS94ySBd+aUXFwnNKA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0/go.mod h1:jSqjV+Knu1Jyvh+l3fx7V210Ev3HHgNQAi8YqpXaQP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= @@ -2374,8 +2374,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go index 3d43f7aea97..2d14d248336 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go @@ -119,8 +119,8 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { delay = throttle } - if err := waitFunc(ctx, delay); err != nil { - return err + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) } } } @@ -129,6 +129,9 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { // Allow override for testing. var waitFunc = wait +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index c5ee6c098cc..b65802edbbd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -19,6 +19,7 @@ import ( "errors" "sync" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" tracesdk "go.opentelemetry.io/otel/sdk/trace" ) @@ -45,7 +46,11 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) return nil } - return e.client.UploadTraces(ctx, protoSpans) + err := e.client.UploadTraces(ctx, protoSpans) + if err != nil { + return internal.WrapTracesError(err) + } + return nil } // Start establishes a connection to the receiving endpoint. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index bf4c3cb4449..1205aff23f7 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -42,10 +41,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type LoadBalanceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index dd15810d0ae..b0dd72fce14 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go similarity index 87% rename from vendor/google.golang.org/grpc/balancer/grpclb/state/state.go rename to vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go index 4ecfa1c2151..cece046be34 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go @@ -16,9 +16,9 @@ * */ -// Package state declares grpclb types to be set by resolvers wishing to pass -// information to grpclb via resolver.State Attributes. -package state +// Package grpclbstate declares grpclb types to be set by resolvers wishing to +// pass information to grpclb via resolver.State Attributes. +package grpclbstate import ( "google.golang.org/grpc/resolver" @@ -27,7 +27,7 @@ import ( // keyType is the key to use for storing State in Attributes. type keyType string -const key = keyType("grpc.grpclb.state") +const key = keyType("grpc.grpclb.grpclbstate") // State contains gRPCLB-relevant data passed from the name resolver. type State struct { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 64a232f2811..66d141fce70 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 422639c79db..04566890451 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -1268,6 +1274,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 703b48da753..1a40e17e8d3 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/altscontext.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type AltsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 383c5fb97a7..50eefa53830 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/handshaker.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HandshakeProtocol int32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 4fc3c79d6a3..b07412f1858 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/transport_security_common.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The security level of the created channel. The list is sorted in increasing // level of security. This order must always be maintained. type SecurityLevel int32 diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 9372dc322e8..8f5b536f11e 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -116,8 +116,9 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +128,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index a66024d23e3..8e29a62f164 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26d13..85e3ff2816a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c51491..d51302e65c6 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,7 @@ import ( "sync" "time" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e1e..c6e08221ff6 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702cacb..16091168773 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f48fd..aaa9c859a34 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -519,18 +527,6 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() for { it, err := l.cbuf.get(true) if err != nil { @@ -574,7 +570,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -662,11 +657,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err + if err == errStreamDrain { // errStreamDrain need not close transport + return nil } - // Other errors(errStreamDrain) need not close transport. - return nil + return err } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err @@ -764,7 +758,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +793,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,6 +811,14 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + l.framer.writer.Flush() + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +847,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fb272235d81..ebe8bfe330a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed time-out: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -141,12 +153,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d518b07e16f..3e582a2853c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -59,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -238,8 +242,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if connectCtx.Err() != nil { + if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } conn.Close() } }(conn) @@ -314,6 +321,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -440,10 +448,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -702,6 +708,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -934,6 +952,9 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. t.onClose() @@ -986,11 +1007,14 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647bc8..37e089bc843 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -453,7 +452,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } if !isGRPC || headerError { @@ -463,7 +462,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +502,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +513,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +529,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +549,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +596,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +629,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -843,8 +839,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +882,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1153,7 +1146,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1162,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1189,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1295,10 +1288,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1319,19 +1312,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1353,7 +1347,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 2e615ee20cc..6cff20c8e02 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -701,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0a27..b3a55481b94 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 99db79fafcf..a6f26c8ab0f 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f4dde72b41f..2808b7c83e8 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1819,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 960c3e33dfd..0f8e6c0149d 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -416,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 2198e7098d7..243e06e8d3e 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.51.0" +const Version = "1.52.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bd8e0cdb33c..1d03c091481 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -121,8 +121,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. diff --git a/vendor/modules.txt b/vendor/modules.txt index 749e561a8c0..0e97f0bd176 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1010,15 +1010,15 @@ go.opentelemetry.io/otel/semconv/v1.17.0 ## explicit; go 1.18 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc # go.opentelemetry.io/otel/metric v0.34.0 @@ -1206,7 +1206,7 @@ google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.51.0 +# google.golang.org/grpc v1.52.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1215,7 +1215,7 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 -google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/grpclb/grpclbstate google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz From ba5a005d31ee4945b54f5dbd57e418d36a07d9d9 Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Fri, 3 Feb 2023 00:36:49 +0100 Subject: [PATCH 063/133] Support enabled_tenants and disabled_tenants in alertmanager (#5116) * Support enabled_tenants and disabled_tenants in alertmanager Signed-off-by: Friedrich Gonzalez * Add PR number Signed-off-by: Friedrich Gonzalez * Disable alertmanager UI and API for tenants disabled Signed-off-by: Friedrich Gonzalez --------- Signed-off-by: Friedrich Gonzalez Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/configuration/config-file-reference.md | 12 ++++++ pkg/alertmanager/distributor.go | 7 +++- pkg/alertmanager/distributor_test.go | 18 +++++++- pkg/alertmanager/multitenant.go | 29 ++++++++++++- pkg/alertmanager/multitenant_test.go | 46 +++++++++++++++++++++ 6 files changed, 110 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc6bb3a17f3..9898ee5eb1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ * [ENHANCEMENT] Ingester: The metadata APIs should honour `querier.query-ingesters-within` when `querier.query-store-for-labels-enabled` is true. #5027 * [ENHANCEMENT] Query Frontend: Skip instant query roundtripper if sharding is not applicable. #5062 * [ENHANCEMENT] Push reduce one hash operation of Labels. #4945 #5114 +* [ENHANCEMENT] Alertmanager: Added `-alertmanager.enabled-tenants` and `-alertmanager.disabled-tenants` to explicitly enable or disable alertmanager for specific tenants. #5116 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 18e56571ba0..259e80a530c 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -1886,6 +1886,18 @@ alertmanager_client: # result in potentially fewer lost silences, and fewer duplicate notifications. # CLI flag: -alertmanager.persist-interval [persist_interval: | default = 15m] + +# Comma separated list of tenants whose alerts this alertmanager can process. If +# specified, only these tenants will be handled by alertmanager, otherwise this +# alertmanager can process alerts from all tenants. +# CLI flag: -alertmanager.enabled-tenants +[enabled_tenants: | default = ""] + +# Comma separated list of tenants whose alerts this alertmanager cannot process. +# If specified, a alertmanager that would normally pick the specified tenant(s) +# for processing will ignore them instead. +# CLI flag: -alertmanager.disabled-tenants +[disabled_tenants: | default = ""] ``` ### `alertmanager_storage_config` diff --git a/pkg/alertmanager/distributor.go b/pkg/alertmanager/distributor.go index df16f9702cc..9f815edb48e 100644 --- a/pkg/alertmanager/distributor.go +++ b/pkg/alertmanager/distributor.go @@ -118,7 +118,7 @@ func (d *Distributor) isUnaryReadPath(p string) bool { // In case of reads, it proxies the request to one of the alertmanagers. // DistributeRequest assumes that the caller has verified IsPathSupported returns // true for the route. -func (d *Distributor) DistributeRequest(w http.ResponseWriter, r *http.Request) { +func (d *Distributor) DistributeRequest(w http.ResponseWriter, r *http.Request, allowedTenants *util.AllowedTenants) { d.requestsInFlight.Add(1) defer d.requestsInFlight.Done() @@ -128,6 +128,11 @@ func (d *Distributor) DistributeRequest(w http.ResponseWriter, r *http.Request) return } + if !allowedTenants.IsAllowed(userID) { + http.Error(w, "Tenant is not allowed", http.StatusUnauthorized) + return + } + logger := util_log.WithContext(r.Context(), d.logger) if r.Method == http.MethodPost { diff --git a/pkg/alertmanager/distributor_test.go b/pkg/alertmanager/distributor_test.go index 98245eaa53a..40ef486cfaa 100644 --- a/pkg/alertmanager/distributor_test.go +++ b/pkg/alertmanager/distributor_test.go @@ -27,6 +27,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ring/kv/consul" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" @@ -40,6 +41,7 @@ func TestDistributor_DistributeRequest(t *testing.T) { replicationFactor int isRead bool isDelete bool + isTenantDisabled bool expStatusCode int expectedTotalCalls int headersNotPreserved bool @@ -56,6 +58,16 @@ func TestDistributor_DistributeRequest(t *testing.T) { expStatusCode: http.StatusOK, expectedTotalCalls: 3, route: "/alerts", + }, { + name: "Write /alerts, Simple AM request, all AM healthy, not allowed", + numAM: 4, + numHappyAM: 4, + replicationFactor: 3, + expStatusCode: http.StatusUnauthorized, + expectedTotalCalls: 0, + route: "/alerts", + headersNotPreserved: true, + isTenantDisabled: true, }, { name: "Write /alerts, Less than quorum AM available", numAM: 1, @@ -262,9 +274,13 @@ func TestDistributor_DistributeRequest(t *testing.T) { req.Method = http.MethodDelete } req.RequestURI = url + var allowedTenants *util.AllowedTenants + if c.isTenantDisabled { + allowedTenants = util.NewAllowedTenants(nil, []string{"1"}) + } w := httptest.NewRecorder() - d.DistributeRequest(w, req) + d.DistributeRequest(w, req, allowedTenants) resp := w.Result() require.Equal(t, c.expStatusCode, resp.StatusCode) diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 4a5c2b7ab55..e868c0cd51a 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -85,6 +85,9 @@ type MultitenantAlertmanagerConfig struct { // For the state persister. Persister PersisterConfig `yaml:",inline"` + + EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` + DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` } type ClusterConfig struct { @@ -116,6 +119,8 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.EnableAPI, "experimental.alertmanager.enable-api", false, "Enable the experimental alertmanager config api.") f.BoolVar(&cfg.ShardingEnabled, "alertmanager.sharding-enabled", false, "Shard tenants across multiple alertmanager instances.") + f.Var(&cfg.EnabledTenants, "alertmanager.enabled-tenants", "Comma separated list of tenants whose alerts this alertmanager can process. If specified, only these tenants will be handled by alertmanager, otherwise this alertmanager can process alerts from all tenants.") + f.Var(&cfg.DisabledTenants, "alertmanager.disabled-tenants", "Comma separated list of tenants whose alerts this alertmanager cannot process. If specified, a alertmanager that would normally pick the specified tenant(s) for processing will ignore them instead.") cfg.AlertmanagerClient.RegisterFlagsWithPrefix("alertmanager.alertmanager-client", f) cfg.Persister.RegisterFlagsWithPrefix("alertmanager", f) @@ -269,6 +274,8 @@ type MultitenantAlertmanager struct { limits Limits + allowedTenants *util.AllowedTenants + registry prometheus.Registerer ringCheckErrors prometheus.Counter tenantsOwned prometheus.Gauge @@ -359,6 +366,7 @@ func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackC logger: log.With(logger, "component", "MultiTenantAlertmanager"), registry: registerer, limits: limits, + allowedTenants: util.NewAllowedTenants(cfg.EnabledTenants, cfg.DisabledTenants), ringCheckErrors: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_alertmanager_ring_check_errors_total", Help: "Number of errors that have occurred when checking the ring for ownership.", @@ -418,6 +426,13 @@ func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackC } } + if len(cfg.EnabledTenants) > 0 { + level.Info(am.logger).Log("msg", "alertmanager using enabled users", "enabled", strings.Join(cfg.EnabledTenants, ", ")) + } + if len(cfg.DisabledTenants) > 0 { + level.Info(am.logger).Log("msg", "alertmanager using disabled users", "disabled", strings.Join(cfg.DisabledTenants, ", ")) + } + if registerer != nil { registerer.MustRegister(am.alertmanagerMetrics) } @@ -735,6 +750,10 @@ func (am *MultitenantAlertmanager) loadAlertmanagerConfigs(ctx context.Context) // Filter out users not owned by this shard. for _, userID := range allUserIDs { + if !am.allowedTenants.IsAllowed(userID) { + level.Debug(am.logger).Log("msg", "ignoring alertmanager for user, not allowed", "user", userID) + continue + } if am.isUserOwned(userID) { ownedUserIDs = append(ownedUserIDs, userID) } @@ -993,7 +1012,7 @@ func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Re } if am.cfg.ShardingEnabled && am.distributor.IsPathSupported(req.URL.Path) { - am.distributor.DistributeRequest(w, req) + am.distributor.DistributeRequest(w, req, am.allowedTenants) return } @@ -1014,6 +1033,10 @@ func (am *MultitenantAlertmanager) serveRequest(w http.ResponseWriter, req *http http.Error(w, err.Error(), http.StatusUnauthorized) return } + if !am.allowedTenants.IsAllowed(userID) { + http.Error(w, "Tenant is not allowed", http.StatusUnauthorized) + return + } am.alertmanagersMtx.Lock() userAM, ok := am.alertmanagers[userID] am.alertmanagersMtx.Unlock() @@ -1197,6 +1220,10 @@ func (am *MultitenantAlertmanager) deleteUnusedRemoteUserState(ctx context.Conte } for _, userID := range usersWithState { + if !am.allowedTenants.IsAllowed(userID) { + level.Debug(am.logger).Log("msg", "not deleting remote state for user, not allowed", "user", userID) + continue + } if _, ok := users[userID]; ok { continue } diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index 8c953d054f6..bd52526068f 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -1110,6 +1110,8 @@ func TestMultitenantAlertmanager_PerTenantSharding(t *testing.T) { configs int expectedTenants int withSharding bool + enabledTenants []string + disabledTenants []string }{ { name: "sharding disabled, 1 instance", @@ -1117,6 +1119,20 @@ func TestMultitenantAlertmanager_PerTenantSharding(t *testing.T) { configs: 10, expectedTenants: 10, }, + { + name: "sharding disabled, 1 instance, single user allowed", + instances: 1, + configs: 10, + expectedTenants: 1, + enabledTenants: []string{"u-1"}, + }, + { + name: "sharding disabled, 1 instance, single user disabled", + instances: 1, + configs: 10, + expectedTenants: 9, + disabledTenants: []string{"u-2"}, + }, { name: "sharding disabled, 2 instances", instances: 2, @@ -1131,6 +1147,24 @@ func TestMultitenantAlertmanager_PerTenantSharding(t *testing.T) { configs: 10, expectedTenants: 10, // same as no sharding and 1 instance }, + { + name: "sharding enabled, 1 instance, enabled tenants, single user allowed", + withSharding: true, + instances: 1, + replicationFactor: 1, + configs: 10, + expectedTenants: 1, + enabledTenants: []string{"u-3"}, + }, + { + name: "sharding enabled, 1 instance, enabled tenants, single user disabled", + withSharding: true, + instances: 1, + replicationFactor: 1, + configs: 10, + expectedTenants: 9, + disabledTenants: []string{"u-4"}, + }, { name: "sharding enabled, 2 instances, RF = 1", withSharding: true, @@ -1155,6 +1189,15 @@ func TestMultitenantAlertmanager_PerTenantSharding(t *testing.T) { configs: 10, expectedTenants: 30, // configs * replication factor }, + { + name: "sharding enabled, 5 instances, RF = 3, two users disabled", + withSharding: true, + instances: 5, + replicationFactor: 3, + configs: 10, + expectedTenants: 24, // (configs - disabled-tenants) * replication factor + disabledTenants: []string{"u-1", "u-2"}, + }, } for _, tt := range tc { @@ -1192,6 +1235,9 @@ func TestMultitenantAlertmanager_PerTenantSharding(t *testing.T) { amConfig.PollInterval = time.Hour amConfig.ShardingRing.RingCheckPeriod = time.Hour + amConfig.EnabledTenants = tt.enabledTenants + amConfig.DisabledTenants = tt.disabledTenants + if tt.withSharding { amConfig.ShardingEnabled = true } From b16789410fa1efa88671acb867dbc024826886ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Feb 2023 15:51:20 -0800 Subject: [PATCH 064/133] Bump github.com/klauspost/compress from 1.15.14 to 1.15.15 (#5108) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.15.14 to 1.15.15. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.15.14...v1.15.15) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 4 +- .../github.com/klauspost/compress/README.md | 7 + .../klauspost/compress/fse/compress.go | 31 +- .../klauspost/compress/huff0/bitreader.go | 8 +- .../klauspost/compress/huff0/compress.go | 90 +-- .../compress/huff0/decompress_amd64.s | 584 +++++++++--------- .../klauspost/compress/zstd/blockdec.go | 9 +- .../klauspost/compress/zstd/decoder.go | 54 +- .../compress/zstd/decoder_options.go | 26 +- .../klauspost/compress/zstd/dict.go | 3 + .../compress/zstd/encoder_options.go | 21 + .../klauspost/compress/zstd/framedec.go | 10 +- .../klauspost/compress/zstd/zstd.go | 1 - vendor/modules.txt | 2 +- 15 files changed, 448 insertions(+), 404 deletions(-) diff --git a/go.mod b/go.mod index 2244fd5f5cc..71ed8c788a7 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.14 + github.com/klauspost/compress v1.15.15 github.com/lib/pq v1.10.7 github.com/minio/minio-go/v7 v7.0.47 github.com/mitchellh/go-wordwrap v1.0.1 diff --git a/go.sum b/go.sum index 058d7c5562a..e0a2c8b1b27 100644 --- a/go.sum +++ b/go.sum @@ -1039,8 +1039,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= -github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index d73fb86e4fb..63f2cd5b25f 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,13 @@ This package provides various compression algorithms. # changelog +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + * Dec 11, 2022 (v1.15.13) * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c67..dac97e58a2d 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9dae..e36d9742f94 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index d9223a91efc..cdc94856f2c 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +605,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2ce6..c4c7ab2d1fe 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6b9929ddf5e..2445bb4fe5b 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -192,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -210,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 30459cd3fb8..7113e69ee3a 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -40,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -341,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -495,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -865,13 +851,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -953,3 +934,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e69c9..07a90dd7af3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b2725f77b5b..66a95c18ef3 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -21,6 +21,9 @@ type dict struct { const dictMagic = "\x37\xa4\x30\xec" +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { if d == nil { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6015f498afa..8e15be2f7f8 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "math/bits" "runtime" "strings" ) @@ -305,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -316,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 65984bf07ca..d8e8a05bd73 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index b1886f7c742..5ffa82f5ac1 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -72,7 +72,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e97f0bd176..7bcba77d46d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -509,7 +509,7 @@ github.com/json-iterator/go # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.15.14 +# github.com/klauspost/compress v1.15.15 ## explicit; go 1.17 github.com/klauspost/compress github.com/klauspost/compress/fse From 530f96144b970ce1b8b8aef6d9b5ca7f2b6e6f4e Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 2 Feb 2023 15:52:53 -0800 Subject: [PATCH 065/133] query frontend: get error from response body (#5118) Signed-off-by: Ben Ye Signed-off-by: Alex Le --- pkg/frontend/transport/handler.go | 32 ++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 47c61c83ea8..1f183572e57 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/status" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" + "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" @@ -152,6 +153,14 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { statusCode = getStatusCodeFromError(err) } else if resp != nil { statusCode = resp.StatusCode + // If the response status code is not 2xx, try to get the + // error message from response body. + if resp.StatusCode/100 != 2 { + body, err2 := tripperware.BodyBuffer(resp, f.log) + if err2 == nil { + err = httpgrpc.Errorf(resp.StatusCode, string(body)) + } + } } f.reportQueryStats(r, queryString, queryResponseTime, stats, err, statusCode) @@ -180,19 +189,14 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func formatGrafanaStatsFields(r *http.Request) []interface{} { - grafanaDashboardUID := "-" + fields := make([]interface{}, 0, 2) if dashboardUID := r.Header.Get("X-Dashboard-Uid"); dashboardUID != "" { - grafanaDashboardUID = dashboardUID + fields = append(fields, dashboardUID) } - grafanaPanelID := "-" if panelID := r.Header.Get("X-Panel-Id"); panelID != "" { - grafanaPanelID = panelID - } - - return []interface{}{ - "grafana_dashboard_uid", grafanaDashboardUID, - "grafana_panel_id", grafanaPanelID, + fields = append(fields, panelID) } + return fields } // reportSlowQuery reports slow queries. @@ -206,7 +210,10 @@ func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, query "path", r.URL.Path, "time_taken", queryResponseTime.String(), }, formatQueryString(queryString)...) - logMessage = append(logMessage, formatGrafanaStatsFields(r)...) + grafanaFields := formatGrafanaStatsFields(r) + if len(grafanaFields) > 0 { + logMessage = append(logMessage, grafanaFields...) + } level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) } @@ -244,7 +251,10 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer }, stats.LoadExtraFields()...) logMessage = append(logMessage, formatQueryString(queryString)...) - logMessage = append(logMessage, formatGrafanaStatsFields(r)...) + grafanaFields := formatGrafanaStatsFields(r) + if len(grafanaFields) > 0 { + logMessage = append(logMessage, grafanaFields...) + } if error != nil { s, ok := status.FromError(error) From 35a09ee4d037e1b3de401c08573242d3b1fa4791 Mon Sep 17 00:00:00 2001 From: Bhuwan Sahni Date: Thu, 2 Feb 2023 15:53:37 -0800 Subject: [PATCH 066/133] Make TSDB max exemplars config per tenant (#5080) * Make TSDB max exemplars config per tenant - https://github.com/cortexproject/cortex/issues/5016 Signed-off-by: sahnib * Fix documentation as per new flags Signed-off-by: sahnib * Enable hot reload of max exemplars value. Signed-off-by: sahnib * Fix goimports in ingester. Signed-off-by: sahnib * Rename config flag, and method to be more generic towards updating tsdb config. Signed-off-by: sahnib * Remove un-intended single process config change. Signed-off-by: sahnib * Remove un-necessary logging. Signed-off-by: sahnib * Add fallback on previous limit in block storage config Signed-off-by: sahnib * Added unit-test cases for fallback logic, and updated docs. Signed-off-by: sahnib --------- Signed-off-by: sahnib Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/blocks-storage/querier.md | 4 +- docs/blocks-storage/store-gateway.md | 4 +- docs/configuration/config-file-reference.md | 14 ++++- docs/configuration/runtime-config.yaml | 15 ++++++ pkg/ingester/ingester.go | 57 +++++++++++++++++++-- pkg/ingester/ingester_test.go | 29 ++++++++++- pkg/storage/tsdb/config.go | 2 +- pkg/util/validation/limits.go | 8 +++ pkg/util/validation/limits_test.go | 32 ++++++++++++ 10 files changed, 155 insertions(+), 11 deletions(-) create mode 100644 docs/configuration/runtime-config.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 9898ee5eb1d..b11c01223a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # Changelog ## master / unreleased +* [CHANGE] Storage: Make Max exemplars config per tenant instead of global configuration. #5016 * [CHANGE] Alertmanager: Local file disclosure vulnerability in OpsGenie configuration has been fixed. #5045 * [CHANGE] Rename oltp_endpoint to otlp_endpoint to match opentelemetry spec and lib name. #5067 * [CHANGE] Distributor/Ingester: Log warn level on push requests when they have status code 4xx. Do not log if status is 429. #5103 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 9387292364d..96215459656 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -885,7 +885,9 @@ blocks_storage: # CLI flag: -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup [max_tsdb_opening_concurrency_on_startup: | default = 10] - # Enables support for exemplars in TSDB and sets the maximum number that + # Deprecated, use maxExemplars in limits instead. If the MaxExemplars value + # in limits is set to zero, cortex will fallback on this value. This setting + # enables support for exemplars in TSDB and sets the maximum number that # will be stored. 0 or less means disabled. # CLI flag: -blocks-storage.tsdb.max-exemplars [max_exemplars: | default = 0] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 0c54882bc0b..26f645bb775 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -955,7 +955,9 @@ blocks_storage: # CLI flag: -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup [max_tsdb_opening_concurrency_on_startup: | default = 10] - # Enables support for exemplars in TSDB and sets the maximum number that + # Deprecated, use maxExemplars in limits instead. If the MaxExemplars value + # in limits is set to zero, cortex will fallback on this value. This setting + # enables support for exemplars in TSDB and sets the maximum number that # will be stored. 0 or less means disabled. # CLI flag: -blocks-storage.tsdb.max-exemplars [max_exemplars: | default = 0] diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 259e80a530c..f00669bf7c9 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -774,6 +774,10 @@ lifecycler: # CLI flag: -ingester.rate-update-period [rate_update_period: | default = 15s] +# Period with which to update the per-user tsdb config. +# CLI flag: -ingester.user-tsdb-configs-update-period +[user_tsdb_configs_update_period: | default = 15s] + # Enable tracking of active series and export them as metrics. # CLI flag: -ingester.active-series-metrics-enabled [active_series_metrics_enabled: | default = true] @@ -2715,6 +2719,12 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # e.g. remote_write.write_relabel_configs. [metric_relabel_configs: | default = ] +# Enables support for exemplars in TSDB and sets the maximum number that will be +# stored. less than zero means disabled. If the value is set to zero, cortex +# will fallback to blocks-storage.tsdb.max-exemplars value. +# CLI flag: -block-storage.tsdb.max-exemplars +[max_exemplars: | default = 0] + # The maximum number of series for which a query can fetch samples from each # ingester. This limit is enforced only in the ingesters (when querying samples # not flushed to the storage yet) and it's a per-instance limit. This limit is @@ -3814,7 +3824,9 @@ tsdb: # CLI flag: -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup [max_tsdb_opening_concurrency_on_startup: | default = 10] - # Enables support for exemplars in TSDB and sets the maximum number that will + # Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in + # limits is set to zero, cortex will fallback on this value. This setting + # enables support for exemplars in TSDB and sets the maximum number that will # be stored. 0 or less means disabled. # CLI flag: -blocks-storage.tsdb.max-exemplars [max_exemplars: | default = 0] diff --git a/docs/configuration/runtime-config.yaml b/docs/configuration/runtime-config.yaml new file mode 100644 index 00000000000..31fbaa25ea8 --- /dev/null +++ b/docs/configuration/runtime-config.yaml @@ -0,0 +1,15 @@ +overrides: + tenant1: + ingestion_rate: 10000 + max_exemplars: 1 + tenant2: + ingestion_rate: 10000 + max_exemplars: 0 + +multi_kv_config: + mirror_enabled: false + primary: memberlist + +ingester_limits: + max_ingestion_rate: 42000 + max_inflight_push_requests: 10000 diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index de512672f9b..7314a8587fb 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -13,6 +13,8 @@ import ( "sync" "time" + "github.com/prometheus/prometheus/config" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gogo/status" @@ -92,7 +94,8 @@ type Config struct { // Config for metadata purging. MetadataRetainPeriod time.Duration `yaml:"metadata_retain_period"` - RateUpdatePeriod time.Duration `yaml:"rate_update_period"` + RateUpdatePeriod time.Duration `yaml:"rate_update_period"` + UserTSDBConfigsUpdatePeriod time.Duration `yaml:"user_tsdb_configs_update_period"` ActiveSeriesMetricsEnabled bool `yaml:"active_series_metrics_enabled"` ActiveSeriesMetricsUpdatePeriod time.Duration `yaml:"active_series_metrics_update_period"` @@ -126,6 +129,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.MetadataRetainPeriod, "ingester.metadata-retain-period", 10*time.Minute, "Period at which metadata we have not seen will remain in memory before being deleted.") f.DurationVar(&cfg.RateUpdatePeriod, "ingester.rate-update-period", 15*time.Second, "Period with which to update the per-user ingestion rates.") + f.DurationVar(&cfg.UserTSDBConfigsUpdatePeriod, "ingester.user-tsdb-configs-update-period", 15*time.Second, "Period with which to update the per-user tsdb config.") f.BoolVar(&cfg.ActiveSeriesMetricsEnabled, "ingester.active-series-metrics-enabled", true, "Enable tracking of active series and export them as metrics.") f.DurationVar(&cfg.ActiveSeriesMetricsUpdatePeriod, "ingester.active-series-metrics-update-period", 1*time.Minute, "How often to update active series metrics.") f.DurationVar(&cfg.ActiveSeriesMetricsIdleTimeout, "ingester.active-series-metrics-idle-timeout", 10*time.Minute, "After what time a series is considered to be inactive.") @@ -782,6 +786,9 @@ func (i *Ingester) updateLoop(ctx context.Context) error { rateUpdateTicker := time.NewTicker(i.cfg.RateUpdatePeriod) defer rateUpdateTicker.Stop() + userTSDBConfigTicker := time.NewTicker(i.cfg.UserTSDBConfigsUpdatePeriod) + defer userTSDBConfigTicker.Stop() + ingestionRateTicker := time.NewTicker(instanceIngestionRateTickInterval) defer ingestionRateTicker.Stop() @@ -812,7 +819,8 @@ func (i *Ingester) updateLoop(ctx context.Context) error { case <-activeSeriesTickerChan: i.updateActiveSeries() - + case <-userTSDBConfigTicker.C: + i.updateUserTSDBConfigs() case <-ctx.Done(): return nil case err := <-i.subservicesWatcher.Chan(): @@ -821,6 +829,43 @@ func (i *Ingester) updateLoop(ctx context.Context) error { } } +func (i *Ingester) updateUserTSDBConfigs() { + for _, userID := range i.getTSDBUsers() { + userDB := i.getTSDB(userID) + if userDB == nil { + continue + } + + cfg := &config.Config{ + StorageConfig: config.StorageConfig{ + ExemplarsConfig: &config.ExemplarsConfig{ + MaxExemplars: i.getMaxExemplars(userID), + }, + }, + } + + // This method currently updates the MaxExemplars and OutOfOrderTimeWindow. Invoking this method + // with a 0 value of OutOfOrderTimeWindow simply updates Max Exemplars. + err := userDB.db.ApplyConfig(cfg) + if err != nil { + level.Error(logutil.WithUserID(userID, i.logger)).Log("msg", "failed to update user tsdb configuration.") + } + } +} + +// getMaxExemplars returns the maxExemplars value set in limits config. +// If limits value is set to zero, it falls back to old configuration +// in block storage config. +func (i *Ingester) getMaxExemplars(userID string) int64 { + maxExemplarsFromLimits := i.limits.MaxExemplars(userID) + + if maxExemplarsFromLimits == 0 { + return int64(i.cfg.BlocksStorageConfig.TSDB.MaxExemplars) + } + + return int64(maxExemplarsFromLimits) +} + func (i *Ingester) updateActiveSeries() { purgeTime := time.Now().Add(-i.cfg.ActiveSeriesMetricsIdleTimeout) @@ -1045,7 +1090,8 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte }) } - if i.cfg.BlocksStorageConfig.TSDB.MaxExemplars > 0 { + maxExemplarsForUser := i.getMaxExemplars(userID) + if maxExemplarsForUser > 0 { // app.AppendExemplar currently doesn't create the series, it must // already exist. If it does not then drop. if ref == 0 && len(ts.Exemplars) > 0 { @@ -1834,7 +1880,8 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { } enableExemplars := false - if i.cfg.BlocksStorageConfig.TSDB.MaxExemplars > 0 { + maxExemplarsForUser := i.getMaxExemplars(userID) + if maxExemplarsForUser > 0 { enableExemplars = true } // Create a new user database @@ -1851,7 +1898,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { BlocksToDelete: userDB.blocksToDelete, EnableExemplarStorage: enableExemplars, IsolationDisabled: true, - MaxExemplars: int64(i.cfg.BlocksStorageConfig.TSDB.MaxExemplars), + MaxExemplars: maxExemplarsForUser, HeadChunksWriteQueueSize: i.cfg.BlocksStorageConfig.TSDB.HeadChunksWriteQueueSize, EnableMemorySnapshotOnShutdown: i.cfg.BlocksStorageConfig.TSDB.MemorySnapshotOnShutdown, }, nil) diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 8ae139d9a98..287b14133a9 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -774,9 +774,10 @@ func TestIngester_Push(t *testing.T) { cfg := defaultIngesterTestConfig(t) cfg.LifecyclerConfig.JoinAfter = 0 cfg.ActiveSeriesMetricsEnabled = !testData.disableActiveSeries - cfg.BlocksStorageConfig.TSDB.MaxExemplars = testData.maxExemplars - i, err := prepareIngesterWithBlocksStorage(t, cfg, registry) + limits := defaultLimitsTestConfig() + limits.MaxExemplars = testData.maxExemplars + i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, "", registry) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -4015,6 +4016,30 @@ func TestIngester_inflightPushRequests(t *testing.T) { require.NoError(t, g.Wait()) } +func TestIngester_MaxExemplarsFallBack(t *testing.T) { + // Create ingester. + cfg := defaultIngesterTestConfig(t) + cfg.BlocksStorageConfig.TSDB.MaxExemplars = 2 + + dir := t.TempDir() + blocksDir := filepath.Join(dir, "blocks") + limits := defaultLimitsTestConfig() + i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, blocksDir, nil) + require.NoError(t, err) + + maxExemplars := i.getMaxExemplars("someTenant") + require.Equal(t, maxExemplars, int64(2)) + + // set max exemplars value in limits, and re-initialize the ingester + limits.MaxExemplars = 5 + i, err = prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, blocksDir, nil) + require.NoError(t, err) + + // validate this value is picked up now + maxExemplars = i.getMaxExemplars("someTenant") + require.Equal(t, maxExemplars, int64(5)) +} + func generateSamplesForLabel(l labels.Labels, count int) *cortexpb.WriteRequest { var lbls = make([]labels.Labels, 0, count) var samples = make([]cortexpb.Sample, 0, count) diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 1709bf12f1f..ed57dc5ba30 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -173,8 +173,8 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 0, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") - f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.") f.IntVar(&cfg.HeadChunksWriteQueueSize, "blocks-storage.tsdb.head-chunks-write-queue-size", chunks.DefaultWriteQueueSize, "The size of the in-memory queue used before flushing chunks to the disk.") + f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.") f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") } diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 3f4e67dc36d..0339e6a4e57 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -55,6 +55,7 @@ type Limits struct { EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"` IngestionTenantShardSize int `yaml:"ingestion_tenant_shard_size" json:"ingestion_tenant_shard_size"` MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty" json:"metric_relabel_configs,omitempty" doc:"nocli|description=List of metric relabel configurations. Note that in most situations, it is more effective to use metrics relabeling directly in the Prometheus server, e.g. remote_write.write_relabel_configs."` + MaxExemplars int `yaml:"max_exemplars" json:"max_exemplars"` // Ingester enforced limits. // Series @@ -63,6 +64,7 @@ type Limits struct { MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"` MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` + // Metadata MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user" json:"max_metadata_per_user"` MaxLocalMetadataPerMetric int `yaml:"max_metadata_per_metric" json:"max_metadata_per_metric"` @@ -147,6 +149,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.") f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.") + f.IntVar(&l.MaxExemplars, "block-storage.tsdb.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.") f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.") f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.") @@ -564,6 +567,11 @@ func (o *Overrides) AlertmanagerReceiversBlockPrivateAddresses(user string) bool return o.GetOverridesForUser(user).AlertmanagerReceiversBlockPrivateAddresses } +// MaxExemplars gets the maximum number of exemplars that will be stored per user. 0 or less means disabled. +func (o *Overrides) MaxExemplars(userID string) int { + return o.GetOverridesForUser(userID).MaxExemplars +} + // Notification limits are special. Limits are returned in following order: // 1. per-tenant limits for given integration // 2. default limits for given integration diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index d47d029eb99..62b56c6cb2e 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -532,3 +532,35 @@ testuser: }) } } + +func TestMaxExemplarsOverridesPerTenant(t *testing.T) { + SetDefaultLimitsForYAMLUnmarshalling(Limits{ + MaxLabelNameLength: 100, + }) + + baseYAML := ` +max_exemplars: 5` + overridesYAML := ` +tenant1: + max_exemplars: 1 +tenant2: + max_exemplars: 3 +` + + l := Limits{} + err := yaml.UnmarshalStrict([]byte(baseYAML), &l) + require.NoError(t, err) + + overrides := map[string]*Limits{} + err = yaml.Unmarshal([]byte(overridesYAML), &overrides) + require.NoError(t, err, "parsing overrides") + + tl := newMockTenantLimits(overrides) + + ov, err := NewOverrides(l, tl) + require.NoError(t, err) + + require.Equal(t, 1, ov.MaxExemplars("tenant1")) + require.Equal(t, 3, ov.MaxExemplars("tenant2")) + require.Equal(t, 5, ov.MaxExemplars("tenant3")) +} From 599f8f8a9b0a971d746dfc9d3466cfe91e5452b8 Mon Sep 17 00:00:00 2001 From: Bhuwan Sahni Date: Fri, 3 Feb 2023 12:35:20 -0800 Subject: [PATCH 067/133] Rename max exemplars limit flag to ingester.max-exemplars. (#5122) Signed-off-by: sahnib Signed-off-by: Alex Le --- docs/configuration/config-file-reference.md | 2 +- pkg/util/validation/limits.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index f00669bf7c9..4659d4b4784 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -2722,7 +2722,7 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # Enables support for exemplars in TSDB and sets the maximum number that will be # stored. less than zero means disabled. If the value is set to zero, cortex # will fallback to blocks-storage.tsdb.max-exemplars value. -# CLI flag: -block-storage.tsdb.max-exemplars +# CLI flag: -ingester.max-exemplars [max_exemplars: | default = 0] # The maximum number of series for which a query can fetch samples from each diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 0339e6a4e57..3be42b02dcf 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -149,7 +149,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.") f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.") - f.IntVar(&l.MaxExemplars, "block-storage.tsdb.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.") + f.IntVar(&l.MaxExemplars, "ingester.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.") f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.") f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.") From 64e4b16d7d2befe136698e450ed44ebaac827e5d Mon Sep 17 00:00:00 2001 From: alexk <42849762+alexku7@users.noreply.github.com> Date: Thu, 9 Feb 2023 11:53:44 +0200 Subject: [PATCH 068/133] Update Dockerfiles to use Alpine 3.17 (#5132) Signed-off-by: Kurtser Alex Signed-off-by: Alex Le --- CHANGELOG.md | 1 + cmd/cortex/Dockerfile | 2 +- cmd/query-tee/Dockerfile | 2 +- cmd/test-exporter/Dockerfile | 2 +- cmd/thanosconvert/Dockerfile | 2 +- development/tsdb-blocks-storage-s3-gossip/dev.dockerfile | 2 +- development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile | 2 +- development/tsdb-blocks-storage-s3/dev.dockerfile | 2 +- .../tsdb-blocks-storage-swift-single-binary/dev.dockerfile | 2 +- packaging/fpm/Dockerfile | 2 +- 10 files changed, 10 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b11c01223a9..95ee1f45481 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ * [ENHANCEMENT] Query Frontend: Skip instant query roundtripper if sharding is not applicable. #5062 * [ENHANCEMENT] Push reduce one hash operation of Labels. #4945 #5114 * [ENHANCEMENT] Alertmanager: Added `-alertmanager.enabled-tenants` and `-alertmanager.disabled-tenants` to explicitly enable or disable alertmanager for specific tenants. #5116 +* [ENHANCEMENT] Upgraded Docker base images to `alpine:3.17`. #5132 * [FEATURE] Querier/Query Frontend: support Prometheus /api/v1/status/buildinfo API. #4978 * [FEATURE] Ingester: Add active series to all_user_stats page. #4972 * [FEATURE] Ingester: Added `-blocks-storage.tsdb.head-chunks-write-queue-size` allowing to configure the size of the in-memory queue used before flushing chunks to the disk . #5000 diff --git a/cmd/cortex/Dockerfile b/cmd/cortex/Dockerfile index 52fbb0c8d21..e6604a38d4d 100644 --- a/cmd/cortex/Dockerfile +++ b/cmd/cortex/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.14 +FROM alpine:3.17 ARG TARGETARCH RUN apk add --no-cache ca-certificates diff --git a/cmd/query-tee/Dockerfile b/cmd/query-tee/Dockerfile index 8974e5bd272..53242577b32 100644 --- a/cmd/query-tee/Dockerfile +++ b/cmd/query-tee/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.14 +FROM alpine:3.17 ARG TARGETARCH RUN apk add --no-cache ca-certificates diff --git a/cmd/test-exporter/Dockerfile b/cmd/test-exporter/Dockerfile index 0ec8f1ffef5..52897f0b862 100644 --- a/cmd/test-exporter/Dockerfile +++ b/cmd/test-exporter/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.14 +FROM alpine:3.17 ARG TARGETARCH RUN apk add --no-cache ca-certificates COPY test-exporter-$TARGETARCH /test-exporter diff --git a/cmd/thanosconvert/Dockerfile b/cmd/thanosconvert/Dockerfile index 9e4faa4a1aa..80ba01201e3 100644 --- a/cmd/thanosconvert/Dockerfile +++ b/cmd/thanosconvert/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.14 +FROM alpine:3.17 ARG TARGETARCH RUN apk add --no-cache ca-certificates COPY thanosconvert-$TARGETARCH /thanosconvert diff --git a/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile b/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile index fa9d47ce2d6..abd818fd3a8 100644 --- a/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile +++ b/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile @@ -2,7 +2,7 @@ FROM golang:1.18 ENV CGO_ENABLED=0 RUN go get github.com/go-delve/delve/cmd/dlv -FROM alpine:3.14 +FROM alpine:3.17 RUN mkdir /cortex WORKDIR /cortex diff --git a/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile b/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile index 98cc805a613..f66add7f19f 100644 --- a/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile +++ b/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.14 +FROM alpine:3.17 RUN mkdir /cortex WORKDIR /cortex diff --git a/development/tsdb-blocks-storage-s3/dev.dockerfile b/development/tsdb-blocks-storage-s3/dev.dockerfile index fa9d47ce2d6..abd818fd3a8 100644 --- a/development/tsdb-blocks-storage-s3/dev.dockerfile +++ b/development/tsdb-blocks-storage-s3/dev.dockerfile @@ -2,7 +2,7 @@ FROM golang:1.18 ENV CGO_ENABLED=0 RUN go get github.com/go-delve/delve/cmd/dlv -FROM alpine:3.14 +FROM alpine:3.17 RUN mkdir /cortex WORKDIR /cortex diff --git a/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile b/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile index 98cc805a613..f66add7f19f 100644 --- a/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile +++ b/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.14 +FROM alpine:3.17 RUN mkdir /cortex WORKDIR /cortex diff --git a/packaging/fpm/Dockerfile b/packaging/fpm/Dockerfile index b348f88ca9e..2796d540bf7 100644 --- a/packaging/fpm/Dockerfile +++ b/packaging/fpm/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.13 +FROM alpine:3.17 RUN apk add --no-cache \ ruby \ From d2fc6c9b2d60c895fc63639c93d0144fa34ae9bc Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Fri, 10 Feb 2023 15:02:41 -0800 Subject: [PATCH 069/133] fix conflict schema url when initializing resources (#5140) Signed-off-by: Ben Ye Signed-off-by: Alex Le --- pkg/tracing/tracing.go | 17 ++++++----------- pkg/tracing/tracing_test.go | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 11 deletions(-) create mode 100644 pkg/tracing/tracing_test.go diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index bfa97e6a06f..b9cb069062b 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -19,7 +19,7 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "github.com/cortexproject/cortex/pkg/tracing/migration" "github.com/cortexproject/cortex/pkg/tracing/sampler" @@ -119,7 +119,6 @@ func SetupTracing(ctx context.Context, name string, c Config) (func(context.Cont } r, err := newResource(ctx, name, c.Otel.ExtraDetectors) - if err != nil { return nil, fmt.Errorf("creating tracing resource: %w", err) } @@ -158,14 +157,10 @@ func newTraceProvider(r *resource.Resource, c Config, exporter *otlptrace.Export } func newResource(ctx context.Context, target string, detectors []resource.Detector) (*resource.Resource, error) { - r, err := resource.New(ctx, resource.WithHost(), resource.WithDetectors(detectors...)) - - if err != nil { - return nil, err + opts := []resource.Option{ + resource.WithHost(), + resource.WithDetectors(detectors...), + resource.WithAttributes(semconv.ServiceNameKey.String(target)), } - - return resource.Merge(r, resource.NewWithAttributes( - semconv.SchemaURL, - semconv.ServiceNameKey.String(target), - )) + return resource.New(ctx, opts...) } diff --git a/pkg/tracing/tracing_test.go b/pkg/tracing/tracing_test.go new file mode 100644 index 00000000000..3739e20af24 --- /dev/null +++ b/pkg/tracing/tracing_test.go @@ -0,0 +1,21 @@ +package tracing + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +func TestNewResource(t *testing.T) { + ctx := context.Background() + target := "ingester" + r, err := newResource(ctx, target, nil) + require.NoError(t, err) + + set := r.Set() + name, ok := set.Value(semconv.ServiceNameKey) + require.True(t, ok) + require.Equal(t, name.AsString(), target) +} From e41a88c3bf8771015a80a0021ba1d7436d2bfc8e Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Fri, 10 Feb 2023 16:40:16 -0800 Subject: [PATCH 070/133] Using otel tracer sampler (#5141) * Using otel tracer sampler Signed-off-by: Alan Protasio * Changelog Signed-off-by: Alan Protasio --------- Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- CHANGELOG.md | 1 + pkg/tracing/sampler/sampling.go | 55 ---------------------- pkg/tracing/sampler/sampling_test.go | 69 ---------------------------- pkg/tracing/tracing.go | 5 +- 4 files changed, 3 insertions(+), 127 deletions(-) delete mode 100644 pkg/tracing/sampler/sampling.go delete mode 100644 pkg/tracing/sampler/sampling_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 95ee1f45481..5173b8b5a8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ * [CHANGE] Alertmanager: Local file disclosure vulnerability in OpsGenie configuration has been fixed. #5045 * [CHANGE] Rename oltp_endpoint to otlp_endpoint to match opentelemetry spec and lib name. #5067 * [CHANGE] Distributor/Ingester: Log warn level on push requests when they have status code 4xx. Do not log if status is 429. #5103 +* [CHANGE] Tracing: Use the default OTEL trace sampler when `-tracing.otel.exporter-type` is set to `awsxray`. #5141 * [ENHANCEMENT] Update Go version to 1.19.3. #4988 * [ENHANCEMENT] Querier: limit series query to only ingesters if `start` param is not specified. #4976 * [ENHANCEMENT] Query-frontend/scheduler: add a new limit `frontend.max-outstanding-requests-per-tenant` for configuring queue size per tenant. Started deprecating two flags `-query-scheduler.max-outstanding-requests-per-tenant` and `-querier.max-outstanding-requests-per-tenant`, and change their value default to 0. Now if both the old flag and new flag are specified, the old flag's queue size will be picked. #5005 diff --git a/pkg/tracing/sampler/sampling.go b/pkg/tracing/sampler/sampling.go deleted file mode 100644 index 068f48a1e83..00000000000 --- a/pkg/tracing/sampler/sampling.go +++ /dev/null @@ -1,55 +0,0 @@ -package sampler - -import ( - "encoding/binary" - "fmt" - "math" - - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" -) - -type xrayTraceIDRatioBased struct { - sdktrace.Sampler - max uint64 -} - -// NewXrayTraceIDRatioBased creates a sampler based on random number. -// fraction parameter should be between 0 and 1 where: -// fraction >= 1 it will always sample -// fraction <= 0 it will never sample -func NewXrayTraceIDRatioBased(fraction float64) sdktrace.Sampler { - if fraction >= 1 { - return sdktrace.AlwaysSample() - } else if fraction <= 0 { - return sdktrace.NeverSample() - } - - return &xrayTraceIDRatioBased{max: uint64(fraction * math.MaxUint64)} -} - -func (s *xrayTraceIDRatioBased) ShouldSample(p sdktrace.SamplingParameters) sdktrace.SamplingResult { - // The default otel sampler pick the first 8 bytes to make the sampling decision and this is a problem to - // xray case as the first 4 bytes on the xray traceId is the time of the original request and the random part are - // the 12 last bytes, and so, this sampler pick the last 8 bytes to make the sampling decision. - // Xray Trace format: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html - // Xray Id Generator: https://github.com/open-telemetry/opentelemetry-go-contrib/blob/54f0bc5c0fd347cd6db9b7bc14c9f0c00dfcb36b/propagators/aws/xray/idgenerator.go#L58-L63 - // Ref: https://github.com/open-telemetry/opentelemetry-go/blob/7a60bc785d669fa6ad26ba70e88151d4df631d90/sdk/trace/sampling.go#L82-L95 - val := binary.BigEndian.Uint64(p.TraceID[8:16]) - psc := trace.SpanContextFromContext(p.ParentContext) - shouldSample := val < s.max - if shouldSample { - return sdktrace.SamplingResult{ - Decision: sdktrace.RecordAndSample, - Tracestate: psc.TraceState(), - } - } - return sdktrace.SamplingResult{ - Decision: sdktrace.Drop, - Tracestate: psc.TraceState(), - } -} - -func (s *xrayTraceIDRatioBased) Description() string { - return fmt.Sprintf("xrayTraceIDRatioBased{%v}", s.max) -} diff --git a/pkg/tracing/sampler/sampling_test.go b/pkg/tracing/sampler/sampling_test.go deleted file mode 100644 index 892392ab2f6..00000000000 --- a/pkg/tracing/sampler/sampling_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package sampler - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - "go.opentelemetry.io/contrib/propagators/aws/xray" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" -) - -func Test_ShouldSample(t *testing.T) { - parentCtx := trace.ContextWithSpanContext( - context.Background(), - trace.NewSpanContext(trace.SpanContextConfig{ - TraceState: trace.TraceState{}, - }), - ) - - generator := xray.NewIDGenerator() - - tests := []struct { - name string - fraction float64 - }{ - { - name: "should always sample", - fraction: 1, - }, - { - name: "should nerver sample", - fraction: 0, - }, - { - name: "should sample 50%", - fraction: 0.5, - }, - { - name: "should sample 10%", - fraction: 0.1, - }, - } - - totalIterations := 10000 - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - totalSampled := 0 - s := NewXrayTraceIDRatioBased(tt.fraction) - for i := 0; i < totalIterations; i++ { - traceID, _ := generator.NewIDs(context.Background()) - r := s.ShouldSample( - sdktrace.SamplingParameters{ - ParentContext: parentCtx, - TraceID: traceID, - Name: "test", - Kind: trace.SpanKindServer, - }) - if r.Decision == sdktrace.RecordAndSample { - totalSampled++ - } - } - - tolerance := 0.1 - expected := tt.fraction * float64(totalIterations) - require.InDelta(t, expected, totalSampled, expected*tolerance) - }) - } -} diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index b9cb069062b..33f90ee5bd2 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -22,7 +22,6 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "github.com/cortexproject/cortex/pkg/tracing/migration" - "github.com/cortexproject/cortex/pkg/tracing/sampler" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/tls" ) @@ -147,12 +146,12 @@ func newTraceProvider(r *resource.Resource, c Config, exporter *otlptrace.Export switch strings.ToLower(c.Otel.ExporterType) { case "awsxray": options = append(options, sdktrace.WithIDGenerator(xray.NewIDGenerator())) - options = append(options, sdktrace.WithSampler(sdktrace.ParentBased(sampler.NewXrayTraceIDRatioBased(c.Otel.SampleRatio)))) propagator = xray.Propagator{} default: - options = append(options, sdktrace.WithSampler(sdktrace.ParentBased(sdktrace.TraceIDRatioBased(c.Otel.SampleRatio)))) } + options = append(options, sdktrace.WithSampler(sdktrace.ParentBased(sdktrace.TraceIDRatioBased(c.Otel.SampleRatio)))) + return propagator, sdktrace.NewTracerProvider(options...) } From 414e9651e5f410d1dc97c09fc2a7f0aee00a34bc Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Sat, 11 Feb 2023 14:31:24 +0100 Subject: [PATCH 071/133] Remove Weave cloud and Grafana Cloud Reference from docs (#5138) Signed-off-by: Friedrich Gonzalez Signed-off-by: Alex Le --- docs/_index.md | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/docs/_index.md b/docs/_index.md index b92b677ed18..2796e983ff6 100644 --- a/docs/_index.md +++ b/docs/_index.md @@ -21,7 +21,7 @@ Cortex provides horizontally scalable, highly available, multi-tenant, long term Prometheus sources in a single cluster, allowing untrusted parties to share the same cluster. - **Long term storage:** Cortex supports S3, GCS, Swift and Microsoft Azure for long term storage of metric data. This allows you to durably store data for longer than the lifetime of any single machine, and use this data for long term capacity planning. -Cortex is a [CNCF](https://cncf.io) incubation project used in several production systems including [Weave Cloud](https://cloud.weave.works) and [Grafana Cloud](https://grafana.com/cloud). +Cortex is a [CNCF](https://cncf.io) incubation project used in several production systems. Cortex is primarily used as a [remote write](https://prometheus.io/docs/operating/configuration/#remote_write) destination for Prometheus, exposing a Prometheus-compatible query API. ## Documentation @@ -139,19 +139,9 @@ Your feedback is always welcome. ## Hosted Cortex (Prometheus as a service) -There are several commercial services where you can use Cortex +These are commercial services where you can use Cortex on-demand: -### Weave Cloud - -[Weave Cloud](https://cloud.weave.works) from -[Weaveworks](https://weave.works) lets you deploy, manage, and monitor -container-based applications. Sign up at https://cloud.weave.works -and follow the instructions there. Additional help can also be found -in the [Weave Cloud documentation](https://www.weave.works/docs/cloud/latest/overview/). - -[Instrumenting Your App: Best Practices](https://www.weave.works/docs/cloud/latest/tasks/monitor/best-instrumenting/) - ### Amazon Managed Service for Prometheus (AMP) [Amazon Managed Service for Prometheus (AMP)](https://aws.amazon.com/prometheus/) is a Prometheus-compatible monitoring service that makes it easy to monitor containerized applications at scale. It is a highly available, secure, and managed monitoring for your containers. Get started [here](https://console.aws.amazon.com/prometheus/home). To learn more about the AMP, reference our [documentation](https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html) and [Getting Started with AMP blog](https://aws.amazon.com/blogs/mt/getting-started-amazon-managed-service-for-prometheus/). From 869b69e2c5783f002912be5a09d18e44706515ea Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 13 Feb 2023 18:03:56 -0800 Subject: [PATCH 072/133] initialize querier stats in QFE only if it is not enabled (#5142) * initialize querier stats in QFE only if it is not already enabled Signed-off-by: Ben Ye * fix lint Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye Signed-off-by: Alex Le --- pkg/frontend/transport/handler.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 1f183572e57..7763f79ea98 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -120,9 +120,13 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Initialise the stats in the context and make sure it's propagated // down the request chain. if f.cfg.QueryStatsEnabled { - var ctx context.Context - stats, ctx = querier_stats.ContextWithEmptyStats(r.Context()) - r = r.WithContext(ctx) + // Check if querier stats is enabled in the context. + stats = querier_stats.FromContext(r.Context()) + if stats == nil { + var ctx context.Context + stats, ctx = querier_stats.ContextWithEmptyStats(r.Context()) + r = r.WithContext(ctx) + } } defer func() { From c273e676f8054007a0b61396262f69c276190cbb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Feb 2023 10:40:02 -0800 Subject: [PATCH 073/133] Bump go.etcd.io/etcd/client/pkg/v3 from 3.5.6 to 3.5.7 (#5125) Bumps [go.etcd.io/etcd/client/pkg/v3](https://github.com/etcd-io/etcd) from 3.5.6 to 3.5.7. - [Release notes](https://github.com/etcd-io/etcd/releases) - [Changelog](https://github.com/etcd-io/etcd/blob/main/Dockerfile-release.ppc64le) - [Commits](https://github.com/etcd-io/etcd/compare/v3.5.6...v3.5.7) --- updated-dependencies: - dependency-name: go.etcd.io/etcd/client/pkg/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 3 +- .../pkg/v3/transport/sockopt_solaris.go | 35 +++++++++++++++++++ .../client/pkg/v3/transport/sockopt_unix.go | 18 ++++++++-- vendor/modules.txt | 4 +-- 5 files changed, 56 insertions(+), 6 deletions(-) create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go diff --git a/go.mod b/go.mod index 71ed8c788a7..86da3c3af7b 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,7 @@ require ( github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.6 - go.etcd.io/etcd/client/pkg/v3 v3.5.6 + go.etcd.io/etcd/client/pkg/v3 v3.5.7 go.etcd.io/etcd/client/v3 v3.5.6 go.opentelemetry.io/contrib/propagators/aws v1.12.0 go.opentelemetry.io/otel v1.12.0 diff --git a/go.sum b/go.sum index e0a2c8b1b27..c81a0eacda8 100644 --- a/go.sum +++ b/go.sum @@ -1585,8 +1585,9 @@ go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQc go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A= go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= +go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.6 h1:coLs69PWCXE9G4FKquzNaSHrRyMCAXwF+IX1tAPVO8E= diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go new file mode 100644 index 00000000000..495c736365e --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go @@ -0,0 +1,35 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build solaris +// +build solaris + +package transport + +import ( + "fmt" + "syscall" + + "golang.org/x/sys/unix" +) + +func setReusePort(network, address string, c syscall.RawConn) error { + return fmt.Errorf("port reuse is not supported on Solaris") +} + +func setReuseAddress(network, address string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1) + }) +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go index 432b52e0fce..e2cc6f48286 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go @@ -1,5 +1,19 @@ -//go:build !windows -// +build !windows +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !solaris +// +build !windows,!solaris package transport diff --git a/vendor/modules.txt b/vendor/modules.txt index 7bcba77d46d..689523ddb60 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -930,8 +930,8 @@ go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.6 -## explicit; go 1.16 +# go.etcd.io/etcd/client/pkg/v3 v3.5.7 +## explicit; go 1.17 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/systemd From 36fced71ddd72e91c7dee702d5d4acebaca15cdf Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 14 Feb 2023 10:48:50 -0800 Subject: [PATCH 074/133] Add redis as index cache and caching bucket backend (#5057) * add redis as index cache and caching bucket backend Signed-off-by: Ben Ye * add e2e test for redis index cache Signed-off-by: Ben Ye * fix lint and integration test Signed-off-by: Ben Ye * fix redis address Signed-off-by: Ben Ye * update doc Signed-off-by: Ben Ye * use cache backend to create cache Signed-off-by: Ben Ye * update store gateway docs Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye Signed-off-by: Alex Le --- .github/workflows/test-build-deploy.yml | 1 + CHANGELOG.md | 1 + docs/blocks-storage/querier.md | 324 +++++++++++++++++- docs/blocks-storage/store-gateway.md | 341 ++++++++++++++++++- docs/blocks-storage/store-gateway.template | 17 +- docs/configuration/config-file-reference.md | 324 +++++++++++++++++- integration/e2e/cache/cache.go | 11 + integration/e2e/images/images.go | 1 + integration/querier_test.go | 86 +++-- pkg/cortexpb/compat_test.go | 3 +- pkg/storage/tsdb/caching_bucket.go | 38 ++- pkg/storage/tsdb/index_cache.go | 22 +- pkg/storage/tsdb/redis_client_config.go | 107 ++++++ pkg/storage/tsdb/redis_client_config_test.go | 73 ++++ 14 files changed, 1296 insertions(+), 53 deletions(-) create mode 100644 pkg/storage/tsdb/redis_client_config.go create mode 100644 pkg/storage/tsdb/redis_client_config_test.go diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 469f98d4f0e..faa04fa59fb 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -140,6 +140,7 @@ jobs: docker pull quay.io/cortexproject/cortex:v1.14.0 fi docker pull memcached:1.6.1 + docker pull redis:7.0.4-alpine env: TEST_TAGS: ${{ matrix.tags }} - name: Integration Tests diff --git a/CHANGELOG.md b/CHANGELOG.md index 5173b8b5a8b..f9b308e95a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ * [FEATURE] Querier/Ruler: Support the new thanos promql engine. This is an experimental feature and might change in the future. #5093 * [FEATURE] Added zstd as an option for grpc compression #5092 * [FEATURE] Ring: Add new kv store option `dynamodb`. #5026 +* [FEATURE] Cache: Support redis as backend for caching bucket and index cache. #5057 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 * [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 96215459656..7dee1d32fa7 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -519,7 +519,8 @@ blocks_storage: [consistency_delay: | default = 0s] index_cache: - # The index cache backend type. Supported values: inmemory, memcached. + # The index cache backend type. Supported values: inmemory, memcached, + # redis. # CLI flag: -blocks-storage.bucket-store.index-cache.backend [backend: | default = "inmemory"] @@ -576,6 +577,113 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. + # Default 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.cache-size + [cache_size: | default = 0] + chunks_cache: # Backend for chunks cache, if not empty. Supported values: memcached. # CLI flag: -blocks-storage.bucket-store.chunks-cache.backend @@ -628,6 +736,113 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.chunks-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. + # Default 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.cache-size + [cache_size: | default = 0] + # Size of each subrange that bucket object is split into for better # caching. # CLI flag: -blocks-storage.bucket-store.chunks-cache.subrange-size @@ -699,6 +914,113 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. + # Default 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.cache-size + [cache_size: | default = 0] + # How long to cache list of tenants in the bucket. # CLI flag: -blocks-storage.bucket-store.metadata-cache.tenants-list-ttl [tenants_list_ttl: | default = 15m] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 26f645bb775..23b8295f35a 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -117,6 +117,7 @@ The store-gateway can use a cache to speed up lookups of postings and series fro - `inmemory` - `memcached` +- `redis` #### In-memory index cache @@ -139,20 +140,26 @@ The Memcached client uses a jump hash algorithm to shard cached entries across a For example, if you're running Memcached in Kubernetes, you may: 1. Deploy your Memcached cluster using a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) -2. Create an [headless service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) for Memcached StatefulSet +2. Create a [headless service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) for Memcached StatefulSet 3. Configure the Cortex's Memcached client address using the `dnssrvnoa+` [service discovery](../configuration/arguments.md#dns-service-discovery) +#### Redis index cache + +The `redis` index cache allows to use [Redis](https://memcached.org/) as cache backend. This cache backend is configured using `-blocks-storage.bucket-store.index-cache.backend=redis` and requires the Redis server(s) addresses via `-blocks-storage.bucket-store.index-cache.redis.addresses` (or config file). + +Using `redis` as the cache backend has similar trade-offs as using `memcached` cache backend. However, client side caching can be enabled when using `redis` backend to avoid Store Gateway fetching data from cache each time. See [here](https://redis.io/docs/manual/client-side-caching/) for more info and it can be enabled by setting flag `-blocks-storage.bucket-store.index-cache.redis.cache-size` > 0. + ### Chunks cache Store-gateway can also use a cache for storing chunks fetched from the storage. Chunks contain actual samples, and can be reused if user query hits the same series for the same time range. -To enable chunks cache, please set `-blocks-storage.bucket-store.chunks-cache.backend`. Chunks can currently only be stored into Memcached cache. Memcached client can be configured via flags with `-blocks-storage.bucket-store.chunks-cache.memcached.*` prefix. +To enable chunks cache, please set `-blocks-storage.bucket-store.chunks-cache.backend`. Chunks can be stored into Memcached or Redis cache. Memcached client can be configured via flags with `-blocks-storage.bucket-store.chunks-cache.memcached.*` prefix. Redis client can be configured via flags with `-blocks-storage.bucket-store.chunks-cache.redis.*` prefix. There are additional low-level options for configuring chunks cache. Please refer to other flags with `-blocks-storage.bucket-store.chunks-cache.*` prefix. ### Metadata cache -Store-gateway and [querier](./querier.md) can use memcached for caching bucket metadata: +Store-gateway and [querier](./querier.md) can use memcached or redis for caching bucket metadata: - List of tenants - List of blocks per tenant @@ -162,11 +169,11 @@ Store-gateway and [querier](./querier.md) can use memcached for caching bucket m Using the metadata cache can significantly reduce the number of API calls to object storage and protects from linearly scale the number of these API calls with the number of querier and store-gateway instances (because the bucket is periodically scanned and synched by each querier and store-gateway). -To enable metadata cache, please set `-blocks-storage.bucket-store.metadata-cache.backend`. Only `memcached` backend is supported currently. Memcached client has additional configuration available via flags with `-blocks-storage.bucket-store.metadata-cache.memcached.*` prefix. +To enable metadata cache, please set `-blocks-storage.bucket-store.metadata-cache.backend`. `memcached` and `redis` backend are supported currently. Memcached client has additional configuration available via flags with `-blocks-storage.bucket-store.metadata-cache.memcached.*` prefix. Redis client has additional configuration available via flags with `-blocks-storage.bucket-store.metadata-cache.redis.*` prefix. Additional options for configuring metadata cache have `-blocks-storage.bucket-store.metadata-cache.*` prefix. By configuring TTL to zero or negative value, caching of given item type is disabled. -_The same memcached backend cluster should be shared between store-gateways and queriers._ +_The same cache backend deployment should be shared between store-gateways and queriers._ ## Store-gateway HTTP endpoints @@ -589,7 +596,8 @@ blocks_storage: [consistency_delay: | default = 0s] index_cache: - # The index cache backend type. Supported values: inmemory, memcached. + # The index cache backend type. Supported values: inmemory, memcached, + # redis. # CLI flag: -blocks-storage.bucket-store.index-cache.backend [backend: | default = "inmemory"] @@ -646,6 +654,113 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. + # Default 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.cache-size + [cache_size: | default = 0] + chunks_cache: # Backend for chunks cache, if not empty. Supported values: memcached. # CLI flag: -blocks-storage.bucket-store.chunks-cache.backend @@ -698,6 +813,113 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.chunks-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. + # Default 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.cache-size + [cache_size: | default = 0] + # Size of each subrange that bucket object is split into for better # caching. # CLI flag: -blocks-storage.bucket-store.chunks-cache.subrange-size @@ -769,6 +991,113 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. + # Default 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.cache-size + [cache_size: | default = 0] + # How long to cache list of tenants in the bucket. # CLI flag: -blocks-storage.bucket-store.metadata-cache.tenants-list-ttl [tenants_list_ttl: | default = 15m] diff --git a/docs/blocks-storage/store-gateway.template b/docs/blocks-storage/store-gateway.template index 68b787f1ab4..b4cb6a1fe03 100644 --- a/docs/blocks-storage/store-gateway.template +++ b/docs/blocks-storage/store-gateway.template @@ -117,6 +117,7 @@ The store-gateway can use a cache to speed up lookups of postings and series fro - `inmemory` - `memcached` +- `redis` #### In-memory index cache @@ -139,20 +140,26 @@ The Memcached client uses a jump hash algorithm to shard cached entries across a For example, if you're running Memcached in Kubernetes, you may: 1. Deploy your Memcached cluster using a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) -2. Create an [headless service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) for Memcached StatefulSet +2. Create a [headless service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) for Memcached StatefulSet 3. Configure the Cortex's Memcached client address using the `dnssrvnoa+` [service discovery](../configuration/arguments.md#dns-service-discovery) +#### Redis index cache + +The `redis` index cache allows to use [Redis](https://memcached.org/) as cache backend. This cache backend is configured using `-blocks-storage.bucket-store.index-cache.backend=redis` and requires the Redis server(s) addresses via `-blocks-storage.bucket-store.index-cache.redis.addresses` (or config file). + +Using `redis` as the cache backend has similar trade-offs as using `memcached` cache backend. However, client side caching can be enabled when using `redis` backend to avoid Store Gateway fetching data from cache each time. See [here](https://redis.io/docs/manual/client-side-caching/) for more info and it can be enabled by setting flag `-blocks-storage.bucket-store.index-cache.redis.cache-size` > 0. + ### Chunks cache Store-gateway can also use a cache for storing chunks fetched from the storage. Chunks contain actual samples, and can be reused if user query hits the same series for the same time range. -To enable chunks cache, please set `-blocks-storage.bucket-store.chunks-cache.backend`. Chunks can currently only be stored into Memcached cache. Memcached client can be configured via flags with `-blocks-storage.bucket-store.chunks-cache.memcached.*` prefix. +To enable chunks cache, please set `-blocks-storage.bucket-store.chunks-cache.backend`. Chunks can be stored into Memcached or Redis cache. Memcached client can be configured via flags with `-blocks-storage.bucket-store.chunks-cache.memcached.*` prefix. Redis client can be configured via flags with `-blocks-storage.bucket-store.chunks-cache.redis.*` prefix. There are additional low-level options for configuring chunks cache. Please refer to other flags with `-blocks-storage.bucket-store.chunks-cache.*` prefix. ### Metadata cache -Store-gateway and [querier](./querier.md) can use memcached for caching bucket metadata: +Store-gateway and [querier](./querier.md) can use memcached or redis for caching bucket metadata: - List of tenants - List of blocks per tenant @@ -162,11 +169,11 @@ Store-gateway and [querier](./querier.md) can use memcached for caching bucket m Using the metadata cache can significantly reduce the number of API calls to object storage and protects from linearly scale the number of these API calls with the number of querier and store-gateway instances (because the bucket is periodically scanned and synched by each querier and store-gateway). -To enable metadata cache, please set `-blocks-storage.bucket-store.metadata-cache.backend`. Only `memcached` backend is supported currently. Memcached client has additional configuration available via flags with `-blocks-storage.bucket-store.metadata-cache.memcached.*` prefix. +To enable metadata cache, please set `-blocks-storage.bucket-store.metadata-cache.backend`. `memcached` and `redis` backend are supported currently. Memcached client has additional configuration available via flags with `-blocks-storage.bucket-store.metadata-cache.memcached.*` prefix. Redis client has additional configuration available via flags with `-blocks-storage.bucket-store.metadata-cache.redis.*` prefix. Additional options for configuring metadata cache have `-blocks-storage.bucket-store.metadata-cache.*` prefix. By configuring TTL to zero or negative value, caching of given item type is disabled. -_The same memcached backend cluster should be shared between store-gateways and queriers._ +_The same cache backend deployment should be shared between store-gateways and queriers._ ## Store-gateway HTTP endpoints diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 4659d4b4784..a85cc9957fe 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -3460,7 +3460,8 @@ bucket_store: [consistency_delay: | default = 0s] index_cache: - # The index cache backend type. Supported values: inmemory, memcached. + # The index cache backend type. Supported values: inmemory, memcached, + # redis. # CLI flag: -blocks-storage.bucket-store.index-cache.backend [backend: | default = "inmemory"] @@ -3517,6 +3518,113 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. Default + # 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate against. + # If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching is + # when data is stored in memory instead of fetching data each time. See + # https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.cache-size + [cache_size: | default = 0] + chunks_cache: # Backend for chunks cache, if not empty. Supported values: memcached. # CLI flag: -blocks-storage.bucket-store.chunks-cache.backend @@ -3569,6 +3677,113 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.chunks-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. Default + # 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate against. + # If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching is + # when data is stored in memory instead of fetching data each time. See + # https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.chunks-cache.redis.cache-size + [cache_size: | default = 0] + # Size of each subrange that bucket object is split into for better caching. # CLI flag: -blocks-storage.bucket-store.chunks-cache.subrange-size [subrange_size: | default = 16000] @@ -3639,6 +3854,113 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.metadata-cache.memcached.auto-discovery [auto_discovery: | default = false] + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.master-name + [master_name: | default = ""] + + # Maximum number of socket connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.pool-size + [pool_size: | default = 100] + + # Specifies the minimum number of idle connections, which is useful when + # it is slow to establish new connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.min-idle-conns + [min_idle_conns: | default = 10] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Amount of time after which client closes idle connections. Should be + # less than server's timeout. -1 disables idle timeout check. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.idle-timeout + [idle_timeout: | default = 5m] + + # Connection age at which client retires (closes) the connection. Default + # 0 is to not close aged connections. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.max-conn-age + [max_conn_age: | default = 0s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate against. + # If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis..tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching is + # when data is stored in memory instead of fetching data each time. See + # https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.metadata-cache.redis.cache-size + [cache_size: | default = 0] + # How long to cache list of tenants in the bucket. # CLI flag: -blocks-storage.bucket-store.metadata-cache.tenants-list-ttl [tenants_list_ttl: | default = 15m] diff --git a/integration/e2e/cache/cache.go b/integration/e2e/cache/cache.go index 7598c307701..1a165a0a91d 100644 --- a/integration/e2e/cache/cache.go +++ b/integration/e2e/cache/cache.go @@ -7,6 +7,7 @@ import ( const ( MemcachedPort = 11211 + RedisPort = 6379 ) func NewMemcached() *e2e.ConcreteService { @@ -18,3 +19,13 @@ func NewMemcached() *e2e.ConcreteService { MemcachedPort, ) } + +func NewRedis() *e2e.ConcreteService { + return e2e.NewConcreteService( + "redis", + images.Redis, + e2e.NewCommand("redis-server", "*:6379"), + e2e.NewTCPReadinessProbe(RedisPort), + RedisPort, + ) +} diff --git a/integration/e2e/images/images.go b/integration/e2e/images/images.go index 63deb2fc733..c7993192854 100644 --- a/integration/e2e/images/images.go +++ b/integration/e2e/images/images.go @@ -7,6 +7,7 @@ package images var ( Memcached = "memcached:1.6.1" + Redis = "docker.io/redis:7.0.4-alpine" Minio = "minio/minio:RELEASE.2021-10-13T00-23-17Z" KES = "minio/kes:v0.17.1" Consul = "consul:1.8.4" diff --git a/integration/querier_test.go b/integration/querier_test.go index 551a15ac465..651003a9ecd 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -35,10 +35,12 @@ func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { "blocks sharding disabled, ingester gRPC streaming disabled, memcached index cache": { blocksShardingStrategy: "", ingesterStreamingEnabled: false, - // Memcached index cache is required to avoid flaky tests when the blocks sharding is disabled - // because two different requests may hit two different store-gateways, so if the cache is not - // shared there's no guarantee we'll have a cache hit. - indexCacheBackend: tsdb.IndexCacheBackendMemcached, + indexCacheBackend: tsdb.IndexCacheBackendMemcached, + }, + "blocks sharding disabled, ingester gRPC streaming disabled, redis index cache": { + blocksShardingStrategy: "", + ingesterStreamingEnabled: false, + indexCacheBackend: tsdb.IndexCacheBackendRedis, }, "blocks default sharding, ingester gRPC streaming disabled, inmemory index cache": { blocksShardingStrategy: "default", @@ -74,6 +76,19 @@ func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { indexCacheBackend: tsdb.IndexCacheBackendInMemory, bucketIndexEnabled: true, }, + "blocks default sharding, ingester gRPC streaming enabled, redis index cache, bucket index enabled": { + blocksShardingStrategy: "default", + ingesterStreamingEnabled: true, + indexCacheBackend: tsdb.IndexCacheBackendRedis, + bucketIndexEnabled: true, + }, + "blocks shuffle sharding, ingester gRPC streaming enabled, redis index cache, bucket index enabled": { + blocksShardingStrategy: "shuffle-sharding", + tenantShardSize: 1, + ingesterStreamingEnabled: true, + indexCacheBackend: tsdb.IndexCacheBackendRedis, + bucketIndexEnabled: true, + }, } for testName, testCfg := range tests { @@ -106,10 +121,15 @@ func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { consul := e2edb.NewConsul() minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) memcached := e2ecache.NewMemcached() - require.NoError(t, s.StartAndWaitReady(consul, minio, memcached)) - - // Add the memcached address to the flags. - flags["-blocks-storage.bucket-store.index-cache.memcached.addresses"] = "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort) + redis := e2ecache.NewRedis() + require.NoError(t, s.StartAndWaitReady(consul, minio, memcached, redis)) + + // Add the cache address to the flags. + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + flags["-blocks-storage.bucket-store.index-cache.memcached.addresses"] = "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort) + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendRedis { + flags["-blocks-storage.bucket-store.index-cache.redis.addresses"] = redis.NetworkEndpoint(e2ecache.RedisPort) + } // Start Cortex components. distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") @@ -280,10 +300,7 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { "blocks sharding disabled, ingester gRPC streaming disabled, memcached index cache": { blocksShardingEnabled: false, ingesterStreamingEnabled: false, - // Memcached index cache is required to avoid flaky tests when the blocks sharding is disabled - // because two different requests may hit two different store-gateways, so if the cache is not - // shared there's no guarantee we'll have a cache hit. - indexCacheBackend: tsdb.IndexCacheBackendMemcached, + indexCacheBackend: tsdb.IndexCacheBackendMemcached, }, "blocks sharding enabled, ingester gRPC streaming enabled, memcached index cache": { blocksShardingEnabled: true, @@ -296,6 +313,22 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { indexCacheBackend: tsdb.IndexCacheBackendMemcached, bucketIndexEnabled: true, }, + "blocks sharding disabled, ingester gRPC streaming disabled, redis index cache": { + blocksShardingEnabled: false, + ingesterStreamingEnabled: false, + indexCacheBackend: tsdb.IndexCacheBackendRedis, + }, + "blocks sharding enabled, ingester gRPC streaming enabled, redis index cache": { + blocksShardingEnabled: true, + ingesterStreamingEnabled: true, + indexCacheBackend: tsdb.IndexCacheBackendRedis, + }, + "blocks sharding enabled, ingester gRPC streaming enabled, redis index cache, bucket index enabled": { + blocksShardingEnabled: true, + ingesterStreamingEnabled: true, + indexCacheBackend: tsdb.IndexCacheBackendRedis, + bucketIndexEnabled: true, + }, } for testName, testCfg := range tests { @@ -311,7 +344,8 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { consul := e2edb.NewConsul() minio := e2edb.NewMinio(9000, bucketName) memcached := e2ecache.NewMemcached() - require.NoError(t, s.StartAndWaitReady(consul, minio, memcached)) + redis := e2ecache.NewRedis() + require.NoError(t, s.StartAndWaitReady(consul, minio, memcached, redis)) // Setting the replication factor equal to the number of Cortex replicas // make sure each replica creates the same blocks, so the total number of @@ -321,16 +355,15 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { // Configure the blocks storage to frequently compact TSDB head // and ship blocks to the storage. flags := mergeFlags(BlocksStorageFlags(), map[string]string{ - "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), - "-blocks-storage.tsdb.ship-interval": "1s", - "-blocks-storage.bucket-store.sync-interval": "1s", - "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), - "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, - "-blocks-storage.bucket-store.index-cache.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), - "-blocks-storage.bucket-store.bucket-index.enabled": strconv.FormatBool(testCfg.bucketIndexEnabled), - "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), - "-querier.query-store-for-labels-enabled": "true", - "-querier.thanos-engine": strconv.FormatBool(thanosEngine), + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.bucket-store.sync-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, + "-blocks-storage.bucket-store.bucket-index.enabled": strconv.FormatBool(testCfg.bucketIndexEnabled), + "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), + "-querier.query-store-for-labels-enabled": "true", + "-querier.thanos-engine": strconv.FormatBool(thanosEngine), // Ingester. "-ring.store": "consul", "-consul.hostname": consul.NetworkHTTPEndpoint(), @@ -343,6 +376,13 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { "-store-gateway.sharding-ring.replication-factor": "1", }) + // Add the cache address to the flags. + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + flags["-blocks-storage.bucket-store.index-cache.memcached.addresses"] = "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort) + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendRedis { + flags["-blocks-storage.bucket-store.index-cache.redis.addresses"] = redis.NetworkEndpoint(e2ecache.RedisPort) + } + // Start Cortex replicas. cortex1 := e2ecortex.NewSingleBinary("cortex-1", flags, "") cortex2 := e2ecortex.NewSingleBinary("cortex-2", flags, "") diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go index 662ad0800ce..63200d6ae1f 100644 --- a/pkg/cortexpb/compat_test.go +++ b/pkg/cortexpb/compat_test.go @@ -7,7 +7,6 @@ import ( "testing" "unsafe" - "github.com/efficientgo/core/testutil" jsoniter "github.com/json-iterator/go" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" @@ -99,7 +98,7 @@ func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) { for _, tt := range tc { t.Run(tt.desc, func(t *testing.T) { m := MetricMetadataMetricTypeToMetricType(tt.input) - testutil.Equals(t, tt.expected, m) + assert.Equal(t, tt.expected, m) }) } } diff --git a/pkg/storage/tsdb/caching_bucket.go b/pkg/storage/tsdb/caching_bucket.go index a0454adc19c..b468e31d533 100644 --- a/pkg/storage/tsdb/caching_bucket.go +++ b/pkg/storage/tsdb/caching_bucket.go @@ -23,25 +23,26 @@ import ( const ( CacheBackendMemcached = "memcached" + CacheBackendRedis = "redis" ) type CacheBackend struct { Backend string `yaml:"backend"` Memcached MemcachedClientConfig `yaml:"memcached"` + Redis RedisClientConfig `yaml:"redis"` } // Validate the config. func (cfg *CacheBackend) Validate() error { - if cfg.Backend != "" && cfg.Backend != CacheBackendMemcached { + switch cfg.Backend { + case CacheBackendMemcached: + return cfg.Memcached.Validate() + case CacheBackendRedis: + return cfg.Redis.Validate() + case "": + default: return fmt.Errorf("unsupported cache backend: %s", cfg.Backend) } - - if cfg.Backend == CacheBackendMemcached { - if err := cfg.Memcached.Validate(); err != nil { - return err - } - } - return nil } @@ -58,6 +59,7 @@ func (cfg *ChunksCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("Backend for chunks cache, if not empty. Supported values: %s.", CacheBackendMemcached)) cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") + cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.") f.Int64Var(&cfg.SubrangeSize, prefix+"subrange-size", 16000, "Size of each subrange that bucket object is split into for better caching.") f.IntVar(&cfg.MaxGetRangeRequests, prefix+"max-get-range-requests", 3, "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching chunks. Zero or negative value = unlimited number of sub-requests.") @@ -89,6 +91,7 @@ func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("Backend for metadata cache, if not empty. Supported values: %s.", CacheBackendMemcached)) cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") + cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.") f.DurationVar(&cfg.TenantsListTTL, prefix+"tenants-list-ttl", 15*time.Minute, "How long to cache list of tenants in the bucket.") f.DurationVar(&cfg.TenantBlocksListTTL, prefix+"tenant-blocks-list-ttl", 5*time.Minute, "How long to cache list of blocks for each tenant.") @@ -111,7 +114,7 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata cfg := cache.NewCachingBucketConfig() cachingConfigured := false - chunksCache, err := createCache("chunks-cache", chunksConfig.Backend, chunksConfig.Memcached, logger, reg) + chunksCache, err := createCache("chunks-cache", &chunksConfig.CacheBackend, logger, reg) if err != nil { return nil, errors.Wrapf(err, "chunks-cache") } @@ -121,7 +124,7 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata cfg.CacheGetRange("chunks", chunksCache, isTSDBChunkFile, chunksConfig.SubrangeSize, chunksConfig.AttributesTTL, chunksConfig.SubrangeTTL, chunksConfig.MaxGetRangeRequests) } - metadataCache, err := createCache("metadata-cache", metadataConfig.Backend, metadataConfig.Memcached, logger, reg) + metadataCache, err := createCache("metadata-cache", &metadataConfig.CacheBackend, logger, reg) if err != nil { return nil, errors.Wrapf(err, "metadata-cache") } @@ -149,22 +152,29 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata return storecache.NewCachingBucket(bkt, cfg, logger, reg) } -func createCache(cacheName string, backend string, memcached MemcachedClientConfig, logger log.Logger, reg prometheus.Registerer) (cache.Cache, error) { - switch backend { +func createCache(cacheName string, cacheBackend *CacheBackend, logger log.Logger, reg prometheus.Registerer) (cache.Cache, error) { + switch cacheBackend.Backend { case "": // No caching. return nil, nil case CacheBackendMemcached: var client cacheutil.MemcachedClient - client, err := cacheutil.NewMemcachedClientWithConfig(logger, cacheName, memcached.ToMemcachedClientConfig(), reg) + client, err := cacheutil.NewMemcachedClientWithConfig(logger, cacheName, cacheBackend.Memcached.ToMemcachedClientConfig(), reg) if err != nil { return nil, errors.Wrapf(err, "failed to create memcached client") } return cache.NewMemcachedCache(cacheName, logger, client, reg), nil + case CacheBackendRedis: + redisCache, err := cacheutil.NewRedisClientWithConfig(logger, cacheName, cacheBackend.Redis.ToRedisClientConfig(), reg) + if err != nil { + return nil, errors.Wrapf(err, "failed to create redis client") + } + return cache.NewRedisCache(cacheName, logger, redisCache, reg), nil + default: - return nil, errors.Errorf("unsupported cache type for cache %s: %s", cacheName, backend) + return nil, errors.Errorf("unsupported cache type for cache %s: %s", cacheName, cacheBackend.Backend) } } diff --git a/pkg/storage/tsdb/index_cache.go b/pkg/storage/tsdb/index_cache.go index 60b347d142f..ca4a5484208 100644 --- a/pkg/storage/tsdb/index_cache.go +++ b/pkg/storage/tsdb/index_cache.go @@ -23,6 +23,9 @@ const ( // IndexCacheBackendMemcached is the value for the memcached index cache backend. IndexCacheBackendMemcached = "memcached" + // IndexCacheBackendRedis is the value for the redis index cache backend. + IndexCacheBackendRedis = "redis" + // IndexCacheBackendDefault is the value for the default index cache backend. IndexCacheBackendDefault = IndexCacheBackendInMemory @@ -30,7 +33,7 @@ const ( ) var ( - supportedIndexCacheBackends = []string{IndexCacheBackendInMemory, IndexCacheBackendMemcached} + supportedIndexCacheBackends = []string{IndexCacheBackendInMemory, IndexCacheBackendMemcached, IndexCacheBackendRedis} errUnsupportedIndexCacheBackend = errors.New("unsupported index cache backend") errNoIndexCacheAddresses = errors.New("no index cache backend addresses") @@ -40,6 +43,7 @@ type IndexCacheConfig struct { Backend string `yaml:"backend"` InMemory InMemoryIndexCacheConfig `yaml:"inmemory"` Memcached MemcachedClientConfig `yaml:"memcached"` + Redis RedisClientConfig `yaml:"redis"` } func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) { @@ -51,6 +55,7 @@ func (cfg *IndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix str cfg.InMemory.RegisterFlagsWithPrefix(f, prefix+"inmemory.") cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") + cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.") } // Validate the config. @@ -63,6 +68,10 @@ func (cfg *IndexCacheConfig) Validate() error { if err := cfg.Memcached.Validate(); err != nil { return err } + } else if cfg.Backend == IndexCacheBackendRedis { + if err := cfg.Redis.Validate(); err != nil { + return err + } } return nil @@ -83,6 +92,8 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu return newInMemoryIndexCache(cfg.InMemory, logger, registerer) case IndexCacheBackendMemcached: return newMemcachedIndexCache(cfg.Memcached, logger, registerer) + case IndexCacheBackendRedis: + return newRedisIndexCache(cfg.Redis, logger, registerer) default: return nil, errUnsupportedIndexCacheBackend } @@ -111,3 +122,12 @@ func newMemcachedIndexCache(cfg MemcachedClientConfig, logger log.Logger, regist return storecache.NewMemcachedIndexCache(logger, client, registerer) } + +func newRedisIndexCache(cfg RedisClientConfig, logger log.Logger, registerer prometheus.Registerer) (storecache.IndexCache, error) { + client, err := cacheutil.NewRedisClientWithConfig(logger, "index-cache", cfg.ToRedisClientConfig(), registerer) + if err != nil { + return nil, errors.Wrapf(err, "create index cache redis client") + } + + return storecache.NewRemoteIndexCache(logger, client, registerer) +} diff --git a/pkg/storage/tsdb/redis_client_config.go b/pkg/storage/tsdb/redis_client_config.go new file mode 100644 index 00000000000..9c0937d431a --- /dev/null +++ b/pkg/storage/tsdb/redis_client_config.go @@ -0,0 +1,107 @@ +package tsdb + +import ( + "flag" + "time" + + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/cacheutil" + + "github.com/cortexproject/cortex/pkg/util/tls" +) + +type RedisClientConfig struct { + Addresses string `yaml:"addresses"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DB int `yaml:"db"` + MasterName string `yaml:"master_name"` + + PoolSize int `yaml:"pool_size"` + MinIdleConns int `yaml:"min_idle_conns"` + MaxGetMultiConcurrency int `yaml:"max_get_multi_concurrency"` + GetMultiBatchSize int `yaml:"get_multi_batch_size"` + MaxSetMultiConcurrency int `yaml:"max_set_multi_concurrency"` + SetMultiBatchSize int `yaml:"set_multi_batch_size"` + + DialTimeout time.Duration `yaml:"dial_timeout"` + ReadTimeout time.Duration `yaml:"read_timeout"` + WriteTimeout time.Duration `yaml:"write_timeout"` + IdleTimeout time.Duration `yaml:"idle_timeout"` + MaxConnAge time.Duration `yaml:"max_conn_age"` + + TLSEnabled bool `yaml:"tls_enabled"` + TLS tls.ClientConfig `yaml:",inline"` + + // If not zero then client-side caching is enabled. + // Client-side caching is when data is stored in memory + // instead of fetching data each time. + // See https://redis.io/docs/manual/client-side-caching/ for info. + CacheSize int `yaml:"cache_size"` +} + +func (cfg *RedisClientConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.StringVar(&cfg.Addresses, prefix+"addresses", "", "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).") + f.StringVar(&cfg.Username, prefix+"username", "", "Redis username.") + f.StringVar(&cfg.Password, prefix+"password", "", "Redis password.") + f.IntVar(&cfg.DB, prefix+"db", 0, "Database to be selected after connecting to the server.") + f.DurationVar(&cfg.DialTimeout, prefix+"dial-timeout", time.Second*5, "Client dial timeout.") + f.DurationVar(&cfg.ReadTimeout, prefix+"read-timeout", time.Second*3, "Client read timeout.") + f.DurationVar(&cfg.WriteTimeout, prefix+"write-timeout", time.Second*3, "Client write timeout.") + f.DurationVar(&cfg.IdleTimeout, prefix+"idle-timeout", time.Minute*5, "Amount of time after which client closes idle connections. Should be less than server's timeout. -1 disables idle timeout check.") + f.DurationVar(&cfg.MaxConnAge, prefix+"max-conn-age", 0, "Connection age at which client retires (closes) the connection. Default 0 is to not close aged connections.") + f.IntVar(&cfg.PoolSize, prefix+"pool-size", 100, "Maximum number of socket connections.") + f.IntVar(&cfg.MinIdleConns, prefix+"min-idle-conns", 10, "Specifies the minimum number of idle connections, which is useful when it is slow to establish new connections.") + f.IntVar(&cfg.MaxGetMultiConcurrency, prefix+"max-get-multi-concurrency", 100, "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.") + f.IntVar(&cfg.GetMultiBatchSize, prefix+"get-multi-batch-size", 100, "The maximum size per batch for mget.") + f.IntVar(&cfg.MaxSetMultiConcurrency, prefix+"max-set-multi-concurrency", 100, "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.") + f.IntVar(&cfg.SetMultiBatchSize, prefix+"set-multi-batch-size", 100, "The maximum size per batch for pipeline set.") + f.StringVar(&cfg.MasterName, prefix+"master-name", "", "Specifies the master's name. Must be not empty for Redis Sentinel.") + f.IntVar(&cfg.CacheSize, prefix+"cache-size", 0, "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.") + f.BoolVar(&cfg.TLSEnabled, prefix+"tls-enabled", false, "Whether to enable tls for redis connection.") + cfg.TLS.RegisterFlagsWithPrefix(prefix, f) +} + +// Validate the config. +func (cfg *RedisClientConfig) Validate() error { + if cfg.Addresses == "" { + return errNoIndexCacheAddresses + } + + if cfg.TLSEnabled { + if (cfg.TLS.CertPath != "") != (cfg.TLS.KeyPath != "") { + return errors.New("both client key and certificate must be provided") + } + } + + return nil +} + +func (cfg *RedisClientConfig) ToRedisClientConfig() cacheutil.RedisClientConfig { + return cacheutil.RedisClientConfig{ + Addr: cfg.Addresses, + Username: cfg.Username, + Password: cfg.Password, + DB: cfg.DB, + MasterName: cfg.MasterName, + DialTimeout: cfg.DialTimeout, + ReadTimeout: cfg.ReadTimeout, + WriteTimeout: cfg.WriteTimeout, + PoolSize: cfg.PoolSize, + MinIdleConns: cfg.MinIdleConns, + IdleTimeout: cfg.IdleTimeout, + MaxConnAge: cfg.MaxConnAge, + MaxGetMultiConcurrency: cfg.MaxGetMultiConcurrency, + GetMultiBatchSize: cfg.GetMultiBatchSize, + MaxSetMultiConcurrency: cfg.MaxSetMultiConcurrency, + SetMultiBatchSize: cfg.SetMultiBatchSize, + TLSEnabled: cfg.TLSEnabled, + TLSConfig: cacheutil.TLSConfig{ + CAFile: cfg.TLS.CAPath, + KeyFile: cfg.TLS.KeyPath, + CertFile: cfg.TLS.CertPath, + ServerName: cfg.TLS.ServerName, + InsecureSkipVerify: cfg.TLS.InsecureSkipVerify, + }, + } +} diff --git a/pkg/storage/tsdb/redis_client_config_test.go b/pkg/storage/tsdb/redis_client_config_test.go new file mode 100644 index 00000000000..3d5c52d5079 --- /dev/null +++ b/pkg/storage/tsdb/redis_client_config_test.go @@ -0,0 +1,73 @@ +package tsdb + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/util/tls" +) + +func TestRedisIndexCacheConfigValidate(t *testing.T) { + for _, tc := range []struct { + name string + conf *RedisClientConfig + err error + }{ + { + name: "empty address", + conf: &RedisClientConfig{}, + err: errNoIndexCacheAddresses, + }, + { + name: "provide TLS cert path but not key path", + conf: &RedisClientConfig{ + Addresses: "aaa", + TLSEnabled: true, + TLS: tls.ClientConfig{ + CertPath: "foo", + }, + }, + err: errors.New("both client key and certificate must be provided"), + }, + { + name: "provide TLS key path but not cert path", + conf: &RedisClientConfig{ + Addresses: "aaa", + TLSEnabled: true, + TLS: tls.ClientConfig{ + KeyPath: "foo", + }, + }, + err: errors.New("both client key and certificate must be provided"), + }, + { + name: "success when TLS enabled", + conf: &RedisClientConfig{ + Addresses: "aaa", + TLSEnabled: true, + TLS: tls.ClientConfig{ + KeyPath: "foo", + CertPath: "bar", + }, + }, + }, + { + name: "success when TLS disabled", + conf: &RedisClientConfig{ + Addresses: "aaa", + TLSEnabled: false, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + err := tc.conf.Validate() + if tc.err == nil { + require.NoError(t, err) + } else { + require.Equal(t, tc.err.Error(), err.Error()) + } + }) + } +} From e138bb33682d63c94dc7978e2644fbb4323b8ed5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Feb 2023 11:04:34 -0800 Subject: [PATCH 075/133] Bump go.opentelemetry.io/contrib/propagators/aws from 1.12.0 to 1.14.0 (#5143) Bumps [go.opentelemetry.io/contrib/propagators/aws](https://github.com/open-telemetry/opentelemetry-go-contrib) from 1.12.0 to 1.14.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/v1.12.0...v1.14.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/propagators/aws dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 8 +- go.sum | 16 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 26 +- .../otel/semconv/v1.17.0/resource.go | 1380 ++++++++-- .../otel/semconv/v1.17.0/trace.go | 2291 ++++++++++++++--- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 4 +- vendor/modules.txt | 8 +- 8 files changed, 3067 insertions(+), 668 deletions(-) diff --git a/go.mod b/go.mod index 86da3c3af7b..7965712ae16 100644 --- a/go.mod +++ b/go.mod @@ -58,13 +58,13 @@ require ( go.etcd.io/etcd/api/v3 v3.5.6 go.etcd.io/etcd/client/pkg/v3 v3.5.7 go.etcd.io/etcd/client/v3 v3.5.6 - go.opentelemetry.io/contrib/propagators/aws v1.12.0 - go.opentelemetry.io/otel v1.12.0 + go.opentelemetry.io/contrib/propagators/aws v1.14.0 + go.opentelemetry.io/otel v1.13.0 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 - go.opentelemetry.io/otel/sdk v1.12.0 - go.opentelemetry.io/otel/trace v1.12.0 + go.opentelemetry.io/otel/sdk v1.13.0 + go.opentelemetry.io/otel/trace v1.13.0 go.uber.org/atomic v1.10.0 golang.org/x/net v0.5.0 golang.org/x/sync v0.1.0 diff --git a/go.sum b/go.sum index c81a0eacda8..aa628385bbf 100644 --- a/go.sum +++ b/go.sum @@ -1621,8 +1621,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= -go.opentelemetry.io/contrib/propagators/aws v1.12.0 h1:n3lNyZs2ytVEfFhcn2QO5Z80lgrMXyP1Ncx0C7rUU8A= -go.opentelemetry.io/contrib/propagators/aws v1.12.0/go.mod h1:xZSOQIixr40Cq3NHn/YsvkDOzpVaR0j19WJRZnOKwbk= +go.opentelemetry.io/contrib/propagators/aws v1.14.0 h1:At9KgnobpQSaC7LYQ2JpDEOxLlMW6EsmLPsCAFfXSNg= +go.opentelemetry.io/contrib/propagators/aws v1.14.0/go.mod h1:KB4fnXEZfSGUC39lmyXfqfuw7D1C8n01nXxsYgKvQhc= go.opentelemetry.io/contrib/propagators/b3 v1.9.0 h1:Lzb9zU98jCE2kyfCjWfSSsiQoGtvBL+COxvUBf7FNhU= go.opentelemetry.io/contrib/propagators/b3 v1.9.0/go.mod h1:fyx3gFXn+4w5uWTTiqaI8oBNBW/6w9Ow5zxXf7NGixU= go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 h1:edJTgwezAtLKUINAXfjxllJ1vlsphNPV7RkuKNd/HkQ= @@ -1631,8 +1631,8 @@ go.opentelemetry.io/contrib/propagators/ot v1.9.0 h1:+pYoqyFoA3H6EZ7Wie2ZQdqS4Zf go.opentelemetry.io/contrib/propagators/ot v1.9.0/go.mod h1:D2GfaecHHX67fXT93/5iKl2DArjt8+H0XWtFD8b4Z+k= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= -go.opentelemetry.io/otel v1.12.0 h1:IgfC7kqQrRccIKuB7Cl+SRUmsKbEwSGPr0Eu+/ht1SQ= -go.opentelemetry.io/otel v1.12.0/go.mod h1:geaoz0L0r1BEOR81k7/n9W4TCXYCJ7bPO7K374jQHG0= +go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= +go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 h1:jrqVQNRKglMRBUrSf40J5mBYnUyleAKL+g0iuyEyaGA= go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1/go.mod h1:zZWeMK8RgtSJddl2Oox9MfjMRgD/HqzuLujCFdF0CZU= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= @@ -1652,14 +1652,14 @@ go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2b go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= -go.opentelemetry.io/otel/sdk v1.12.0 h1:8npliVYV7qc0t1FKdpU08eMnOjgPFMnriPhn0HH4q3o= -go.opentelemetry.io/otel/sdk v1.12.0/go.mod h1:WYcvtgquYvgODEvxOry5owO2y9MyciW7JqMz6cpXShE= +go.opentelemetry.io/otel/sdk v1.13.0 h1:BHib5g8MvdqS65yo2vV1s6Le42Hm6rrw08qU6yz5JaM= +go.opentelemetry.io/otel/sdk v1.13.0/go.mod h1:YLKPx5+6Vx/o1TCUYYs+bpymtkmazOMT6zoRrC7AQ7I= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= -go.opentelemetry.io/otel/trace v1.12.0 h1:p28in++7Kd0r2d8gSt931O57fdjUyWxkVbESuILAeUc= -go.opentelemetry.io/otel/trace v1.12.0/go.mod h1:pHlgBynn6s25qJ2szD+Bv+iwKJttjHSI3lUAyf0GNuQ= +go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= +go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 2bfe0a9411b..33609fb5106 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,29 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + ## [1.12.0/0.35.0] 2023-01-28 ### Added @@ -2220,7 +2243,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.12.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.13.0...HEAD +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 [1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 [1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 [1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index add7987a22b..39a2eab3a6a 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -18,9 +18,14 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" import "go.opentelemetry.io/otel/attribute" -// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). const ( - // Array of brand name and version separated by a space + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space // // Type: string[] // RequirementLevel: Optional @@ -30,7 +35,10 @@ const ( // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") - // The platform on which the browser is running + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running // // Type: string // RequirementLevel: Optional @@ -39,39 +47,49 @@ const ( // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD - // be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client Hints + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in the - // [`os.type` and `os.name` attributes](./os.md). However, for consistency, the - // values in the `browser.platform` attribute should capture the exact value that - // the user agent provides. + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") - // A boolean that is true if the browser is running on a mobile device + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be - // left unset. + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. BrowserMobileKey = attribute.Key("browser.mobile") - // Full user-agent string provided by the browser + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 - // (KHTML, ' + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' - // Note: The user-agent value SHOULD be provided only from browsers that do not - // have a mechanism to retrieve brands and platform individually from the User- - // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` - // API can be used. + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") - // Preferred language of the user using the browser + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser // // Type: string // RequirementLevel: Optional @@ -82,46 +100,96 @@ const ( BrowserLanguageKey = attribute.Key("browser.language") ) +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + // A cloud environment (e.g. GCP, Azure, AWS) const ( - // Name of the cloud provider. + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") - // The cloud account ID the resource is assigned to. + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") - // The geographical region the resource is running. + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for example - // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- - // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- - // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- - // us/global-infrastructure/geographies/), [Google Cloud - // regions](https://cloud.google.com/about/locations), or [Tencent Cloud + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") - // Cloud regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the resource - // is running. + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // The cloud platform in use. + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. // // Type: Enum // RequirementLevel: Optional @@ -201,49 +269,89 @@ var ( CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + // Resources used by AWS Elastic Container Service (ECS). const ( - // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. - // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo - // perguide/clusters.html). + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l - // aunch_types.html) for an ECS task. + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates - // t/developerguide/task_definitions.html). + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // The task definition family this task definition is a member of. + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // The revision for this task definition. + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. // // Type: string // RequirementLevel: Optional @@ -259,9 +367,48 @@ var ( AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + // Resources used by AWS Elastic Kubernetes Service (EKS). const ( - // The ARN of an EKS cluster. + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. // // Type: string // RequirementLevel: Optional @@ -270,83 +417,142 @@ const ( AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + // Resources specific to Amazon Web Services. const ( - // The name(s) of the AWS log group(s) an application is writing to. + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each write - // to their own log group. + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - // The name(s) of the AWS log stream(s) an application is writing to. + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - // The ARN(s) of the AWS log stream(s). + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- - // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain - // several log streams, so these ARNs necessarily identify both a log group and a - // log stream. + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + // A container instance. const ( - // Container name used by container runtime. + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") - // Container ID. Usually a UUID, as for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container- - // identification). The UUID might be abbreviated. + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") - // The container runtime managing this container. + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") - // Name of the image the container was built on. + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") - // Container image tag. + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. // // Type: string // RequirementLevel: Optional @@ -355,9 +561,48 @@ const ( ContainerImageTagKey = attribute.Key("container.image.tag") ) +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + // The software deployment. const ( - // Name of the [deployment + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // @@ -368,46 +613,67 @@ const ( DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + // The device on which the process represented by this resource is running. const ( - // A unique identifier representing the device + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id - // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden - // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the - // Firebase Installation ID or a globally unique UUID which is persisted across + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on best - // practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. DeviceIDKey = attribute.Key("device.id") - // The model identifier for the device + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version of the - // model identifier rather than the market or consumer-friendly name of the - // device. + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - // The marketing name for the device model + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of the - // device model rather than a machine readable alternative. + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") - // The name of the device manufacturer + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer // // Type: string // RequirementLevel: Optional @@ -419,25 +685,57 @@ const ( DeviceManufacturerKey = attribute.Key("device.manufacturer") ) +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + // A serverless instance. const ( - // The name of the single function that this runtime instance executes. + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the FaaS + // Note: This is the name of the function as configured/deployed on the + // FaaS // platform and is usually different from the name of the callback // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- - // general.md#source-code-attributes) + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) // span attributes). - - // For some cloud providers, the above definition is ambiguous. The following + // + // For some cloud providers, the above definition is ambiguous. The + // following // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud providers/products: - + // (and consequently the span name) for the listed cloud + // providers/products: + // // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). @@ -445,61 +743,67 @@ const ( // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") - // The unique ID of the single function that this runtime instance executes. + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: On some cloud providers, it may not be possible to determine the full ID - // at startup, + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, // so consider setting `faas.id` as a span attribute instead. - + // // The exact value to use for `faas.id` depends on the cloud provider: - + // // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- - // namespaces.html). + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // aliases.html) - // with the resolved function version, as the same runtime instance may be - // invokable with + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with // multiple different aliases. - // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- - // resource-names) - // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- - // us/rest/api/resources/resources/get-by-id) of the invoked function, + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.We - // b/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function app can - // host multiple functions that would usually share + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") - // The immutable version of the function being executed. + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: - + // // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // versions.html) + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env- - // var#runtime_environment_variables_set_automatically). + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") - // The execution environment ID as a string, that will be potentially reused for - // other invocations to the same function/function version. + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. // // Type: string // RequirementLevel: Optional @@ -507,67 +811,125 @@ const ( // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") - // The amount of memory available to the serverless function in MiB. + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information. + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + // A host is defined as a general computing instance. const ( - // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud - // provider. For non-containerized Linux systems, the `machine-id` located in - // `/etc/machine-id` or `/var/lib/dbus/machine-id` may be used. + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") - // Name of the host. On Unix systems, it may contain what the hostname command - // returns, or the fully qualified hostname, or another name specified by the - // user. + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") - // Type of host. For Cloud, this must be the machine type. + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") - // The CPU architecture the host system is running on. + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") - // Name of the VM image or OS install the host was instantiated from. + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") - // VM image ID. For Cloud, this value is from the provider. + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") - // The version string of the VM image as defined in [Version + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string @@ -596,9 +958,57 @@ var ( HostArchX86 = HostArchKey.String("x86") ) +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + // A Kubernetes Cluster. const ( - // The name of the cluster. + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. // // Type: string // RequirementLevel: Optional @@ -607,16 +1017,26 @@ const ( K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + // A Kubernetes Node object. const ( - // The name of the Node. + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") - // The UID of the Node. + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. // // Type: string // RequirementLevel: Optional @@ -625,9 +1045,23 @@ const ( K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + // A Kubernetes Namespace. const ( - // The name of the namespace that the pod is running in. + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. // // Type: string // RequirementLevel: Optional @@ -636,16 +1070,26 @@ const ( K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + // A Kubernetes Pod object. const ( - // The UID of the Pod. + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // The name of the Pod. + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. // // Type: string // RequirementLevel: Optional @@ -654,19 +1098,37 @@ const ( K8SPodNameKey = attribute.Key("k8s.pod.name") ) -// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( - // The name of the Container from Pod specification, must be unique within a Pod. - // Container runtime usually uses different globally unique name - // (`container.name`). + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") - // Number of times the container was restarted. This attribute can be used to - // identify a particular container (running or stopped) within a container spec. + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. // // Type: int // RequirementLevel: Optional @@ -675,16 +1137,37 @@ const ( K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + // A Kubernetes ReplicaSet object. const ( - // The UID of the ReplicaSet. + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - // The name of the ReplicaSet. + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. // // Type: string // RequirementLevel: Optional @@ -693,16 +1176,35 @@ const ( K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + // A Kubernetes Deployment object. const ( - // The UID of the Deployment. + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // The name of the Deployment. + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. // // Type: string // RequirementLevel: Optional @@ -711,16 +1213,35 @@ const ( K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + // A Kubernetes StatefulSet object. const ( - // The UID of the StatefulSet. + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - // The name of the StatefulSet. + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. // // Type: string // RequirementLevel: Optional @@ -729,16 +1250,35 @@ const ( K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + // A Kubernetes DaemonSet object. const ( - // The UID of the DaemonSet. + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - // The name of the DaemonSet. + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. // // Type: string // RequirementLevel: Optional @@ -747,16 +1287,33 @@ const ( K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + // A Kubernetes Job object. const ( - // The UID of the Job. + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") - // The name of the Job. + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. // // Type: string // RequirementLevel: Optional @@ -765,16 +1322,33 @@ const ( K8SJobNameKey = attribute.Key("k8s.job.name") ) +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + // A Kubernetes CronJob object. const ( - // The UID of the CronJob. + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - // The name of the CronJob. + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. // // Type: string // RequirementLevel: Optional @@ -783,30 +1357,55 @@ const ( K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) -// The operating system (OS) on which the process represented by this resource is running. +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. const ( - // The operating system type. + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") - // Human readable (not intended to be parsed) OS version information, like e.g. - // reported by `ver` or `lsb_release -a` commands. + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' OSDescriptionKey = attribute.Key("os.description") - // Human readable operating system name. + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") - // The version string of the operating system as defined in [Version + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string @@ -841,71 +1440,120 @@ var ( OSTypeZOS = OSTypeKey.String("z_os") ) +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + // An operating system process. const ( - // Process identifier (PID). + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") - // Parent Process identifier (PID). + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") - // The name of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. // // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can be set to + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On Linux based - // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, - // can be set to the first parameter extracted from `GetCommandLineW`. + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. // // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") - // The full command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not - // set this if you have to assemble it just for monitoring; use + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") - // All the command arguments (including the command/executable itself) as received + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited strings - // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be - // the full argv vector passed to `main`. + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. // // Type: string[] - // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") - // The username of the user that owns the process. + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. // // Type: string // RequirementLevel: Optional @@ -914,25 +1562,101 @@ const ( ProcessOwnerKey = attribute.Key("process.owner") ) +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + // The single (language) runtime instance which is monitored. const ( - // The name of the runtime of this process. For compiled native binaries, this - // SHOULD be the name of the compiler. + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // The version of the runtime of this process, as returned by the runtime without - // modification. + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // An additional description about the runtime of the process, for example a + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string @@ -942,35 +1666,68 @@ const ( ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + // A service instance. const ( - // Logical name of the service. + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, the - // value MUST be set to `unknown_service`. + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") - // A namespace for `service.name`. + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace defined - // (so the empty/unspecified namespace is simply one more valid namespace). Zero- - // length namespace string is assumed equal to unspecified namespace. + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") - // The string ID of the service instance. + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. // // Type: string // RequirementLevel: Optional @@ -978,18 +1735,22 @@ const ( // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to distinguish instances of the same service that exist - // at the same time (e.g. instances of a horizontally scaled service). It is - // preferable for the ID to be persistent and stay the same for the lifetime of - // the service instance, however it is acceptable that the ID is ephemeral and - // changes during important lifetime events for the service (e.g. service - // restarts). If the service has no inherent unique ID that can be used as the - // value of this attribute it is recommended to generate a random Version 1 or - // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") - // The version string of the service API or implementation. + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. // // Type: string // RequirementLevel: Optional @@ -998,29 +1759,69 @@ const ( ServiceVersionKey = attribute.Key("service.version") ) -// The telemetry SDK used to capture data recorded by the instrumentation libraries. +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. const ( - // The name of the telemetry SDK as defined above. + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - // The language of the telemetry SDK. + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // The version string of the telemetry SDK. + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - // The version string of the auto instrumentation agent, if used. + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional @@ -1054,43 +1855,100 @@ var ( TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) -// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. const ( - // The name of the web engine. + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") - // The version of the web engine. + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") - // Additional description of the web engine (e.g. detailed version and edition - // information). + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. const ( - // The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OtelScopeNameKey = attribute.Key("otel.scope.name") - // The version of the instrumentation scope - (`InstrumentationScope.Version` in - // OTLP). + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). // // Type: string // RequirementLevel: Optional @@ -1099,16 +1957,36 @@ const ( OtelScopeVersionKey = attribute.Key("otel.scope.version") ) -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. const ( - // Deprecated, use the `otel.scope.name` attribute. + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OtelLibraryNameKey = attribute.Key("otel.library.name") - // Deprecated, use the `otel.scope.version` attribute. + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional @@ -1116,3 +1994,17 @@ const ( // Examples: '1.0.0' OtelLibraryVersionKey = attribute.Key("otel.library.version") ) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go index 01e5f072a21..8c4a7299d27 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -18,27 +18,36 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" import "go.opentelemetry.io/otel/attribute" -// This document defines the shared attributes used to report a single exception associated with a span or log. +// The shared attributes used to report a single exception associated with a +// span or log. const ( - // The type of the exception (its fully-qualified class name, if applicable). The - // dynamic type of the exception should be preferred over the static type in - // languages that support it. + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") - // The exception message. + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" ExceptionMessageKey = attribute.Key("exception.message") - // A stacktrace as a string in the natural representation for the language - // runtime. The representation is to be determined and documented by each language - // SIG. + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. // // Type: string // RequirementLevel: Optional @@ -51,16 +60,44 @@ const ( ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) -// This document defines attributes for Events represented using Log Records. +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. const ( - // The name identifies the event. + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") - // The domain identifies the business context for the events. + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. // // Type: Enum // RequirementLevel: Required @@ -79,11 +116,20 @@ var ( EventDomainK8S = EventDomainKey.String("k8s") ) -// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). const ( - // The full invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` - // applicable). + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). // // Type: string // RequirementLevel: Optional @@ -93,44 +139,72 @@ const ( AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) -// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. const ( - // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec - // .md#id) uniquely identifies the event. + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m - // d#source-1) identifies the context in which an event happened. + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable - // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- - // service' + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - // The [version of the CloudEvents specification](https://github.com/cloudevents/s - // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp - // ec.md#type) contains a value describing the type of event related to the - // originating occurrence. + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. - // md#subject) of the event in the context of the event producer (identified by + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by // source). // // Type: string @@ -140,9 +214,53 @@ const ( CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) -// This document defines semantic conventions for the OpenTracing Shim +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim const ( - // Parent-child Reference type + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type // // Type: Enum // RequirementLevel: Optional @@ -158,16 +276,21 @@ var ( OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) -// This document defines the attributes used to perform database client calls. +// The attributes used to perform database client calls. const ( - // An identifier for the database management system (DBMS) product being used. See - // below for a list of well-known identifiers. + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") - // The connection string used to connect to the database. It is recommended to + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string @@ -175,16 +298,21 @@ const ( // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") - // Username for accessing the database. + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") - // The fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver - // used to connect. + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. // // Type: string // RequirementLevel: Optional @@ -192,41 +320,52 @@ const ( // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - // This attribute is used to report the name of the database being accessed. For - // commands that switch the database, this should be set to the target database - // (even if the command fails). + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called "schema - // name". In case there are multiple layers that could be considered for database - // name (e.g. Oracle instance name and schema name), the database name to be used - // is the more specific layer (e.g. Oracle schema name). + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). DBNameKey = attribute.Key("db.name") - // The database statement being executed. + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. // // Type: string - // RequirementLevel: ConditionallyRequired (If applicable and not explicitly - // disabled via instrumentation configuration.) + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") - // The name of the operation being executed, e.g. the [MongoDB command + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string - // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.) + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to attempt any - // client-side parsing of `db.statement` just to get this property, but it should - // be set if the operation name is provided by the library being instrumented. If - // the SQL statement has an ambiguous operation, or performs more than one - // operation, this value may be omitted. + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. DBOperationKey = attribute.Key("db.operation") ) @@ -331,73 +470,152 @@ var ( DBSystemClickhouse = DBSystemKey.String("clickhouse") ) +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + // Connection-level attributes for Microsoft SQL Server const ( - // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- - // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named instance. + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer - // required (but still recommended if non-standard). + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + // Call-level attributes for Cassandra const ( - // The fetch size used for paging, i.e. how many rows will be returned at once. + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - // The consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra- - // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - // The name of the primary table that the operation is acting upon, including the - // keyspace name (if applicable). + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra rather - // than sql. It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") - // Whether or not the query is idempotent. + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - // The number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - // The ID of the coordinating node for a query. + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - // The data center of the coordinating node for a query. + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional @@ -431,23 +649,80 @@ var ( DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + // Call-level attributes for Redis const ( - // The index of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To be used - // instead of the generic `db.name` attribute. + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. // // Type: int - // RequirementLevel: ConditionallyRequired (If other than the default database - // (`0`).) + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + // Call-level attributes for MongoDB const ( - // The collection being accessed within the database stated in `db.name`. + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required @@ -456,10 +731,19 @@ const ( DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + // Call-level attributes for SQL databases const ( - // The name of the primary table that the operation is acting upon, including the - // database name (if applicable). + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). // // Type: string // RequirementLevel: Recommended @@ -467,21 +751,35 @@ const ( // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. DBSQLTableKey = attribute.Key("db.sql.table") ) -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts. +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. const ( - // Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OtelStatusCodeKey = attribute.Key("otel.status_code") - // Description of the Status if it has a value, otherwise not set. + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional @@ -497,16 +795,27 @@ var ( OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") ) -// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. const ( - // Type of the trigger which caused this function execution. + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. - + // // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the @@ -514,7 +823,10 @@ const ( // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") - // The execution ID of the current function execution. + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. // // Type: string // RequirementLevel: Optional @@ -536,34 +848,53 @@ var ( FaaSTriggerOther = FaaSTriggerKey.String("other") ) -// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. const ( - // The name of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos - // DB to the database name. + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - // Describes the type of the operation that was performed on the data. + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // A string containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // The document name/table subjected to the operation. For example, in Cloud - // Storage or S3 is the name of the file, and in Cosmos DB the table name. + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional @@ -581,19 +912,50 @@ var ( FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + // Semantic Convention for FaaS scheduled to be executed regularly. const ( - // A string containing the function invocation time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") - // A string containing the schedule period as [Cron Expression](https://docs.oracl - // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional @@ -602,10 +964,28 @@ const ( FaaSCronKey = attribute.Key("faas.cron") ) +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + // Contains additional attributes for incoming FaaS spans. const ( - // A boolean that is true if the serverless function is executed for the first - // time (aka cold-start). + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). // // Type: boolean // RequirementLevel: Optional @@ -613,39 +993,54 @@ const ( FaaSColdstartKey = attribute.Key("faas.coldstart") ) +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + // Contains additional attributes for outgoing FaaS spans. const ( - // The name of the invoked function. + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // The cloud provider of the invoked function. + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked - // function. + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // The cloud region of the invoked function. + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. // // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or - // GCP, the region in which a function is hosted is essential to uniquely identify - // the function and also part of its endpoint. Since it's part of the endpoint - // being called, the region is always known to clients. In these cases, - // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the - // client or not required for identifying the invoked function, setting - // `faas.invoked_region` is optional.) + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) @@ -662,50 +1057,82 @@ var ( FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + // These attributes may be used for any network related operation. const ( - // Transport protocol used. See note below. + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") - // Application layer protocol used. The value SHOULD be normalized to lowercase. + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") - // Version of the application layer protocol used. See note below. + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' - // Note: `net.app.protocol.version` refers to the version of the protocol used and - // might be different from the protocol client's version. If the HTTP client used - // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should - // be set to `1.1`. + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") - // Remote socket peer name. + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. // // Type: string - // RequirementLevel: Recommended (If available and different from `net.peer.name` - // and if `net.sock.peer.addr` is set.) + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local - // communication, [etc](https://man7.org/linux/man- - // pages/man7/address_families.7.html). + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - // Remote socket peer port. + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if @@ -713,56 +1140,77 @@ const ( // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - // Protocol [address family](https://man7.org/linux/man- - // pages/man7/address_families.7.html) which is used for communication. + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. // // Type: Enum - // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of - // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry - // SHOULD accept both IPv4 and IPv6 formats for the address in + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") - // Logical remote hostname, see note below. + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra - // DNS lookup. + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") - // Logical remote port number + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") - // Logical local hostname or similar, see note below. + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") - // Logical local port number, preferably the one that the peer used to connect + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") - // Local socket address. Useful in case of a multi-IP host. + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - // Local socket port number. + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if @@ -770,44 +1218,62 @@ const ( // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") - // The internet connection type currently being used by the host. + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - // This describes more details regarding the connection.type. It may be the type - // of cell technology connection, but it could be used for describing details - // about a wifi connection. + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - // The name of the mobile carrier. + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - // The mobile carrier country code. + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - // The mobile carrier network code. + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string @@ -897,11 +1363,120 @@ var ( NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + // Operations that access some remote service. const ( - // The [`service.name`](../../resource/semantic_conventions/README.md#service) of - // the remote service. SHOULD be equal to the actual `service.name` resource - // attribute of the remote service if any. + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. // // Type: string // RequirementLevel: Optional @@ -910,31 +1485,49 @@ const ( PeerServiceKey = attribute.Key("peer.service") ) -// These attributes may be used for any operation with an authenticated and/or authorized enduser. +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. const ( - // Username or client_id extracted from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the - // inbound request from outside the system. + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") - // Actual/assumed role the client is making the request under extracted from token - // or application security context. + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") - // Scopes or granted authorities the client currently possesses extracted from - // token or application security context. The value would come from the scope - // associated with an [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value - // in a [SAML 2.0 Assertion](http://docs.oasis- - // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional @@ -943,16 +1536,50 @@ const ( EnduserScopeKey = attribute.Key("enduser.scope") ) -// These attributes may be used for any operation to store information about a thread that started a span. +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. const ( - // Current "managed" thread ID (as opposed to OS thread ID). + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") - // Current thread name. + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. // // Type: string // RequirementLevel: Optional @@ -961,43 +1588,70 @@ const ( ThreadNameKey = attribute.Key("thread.name") ) -// These attributes allow to report this unit of code and therefore to provide more context about the span. +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. const ( - // The method or function name, or equivalent (usually rightmost part of the code - // unit's name). + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") - // The "namespace" within which `code.function` is defined. Usually the qualified - // class or module name, such that `code.namespace` + some separator + - // `code.function` form a unique identifier for the code unit. + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") - // The source code file name that identifies the code unit as uniquely as possible - // (preferably an absolute file path). + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") - // The line number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") - // The column number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. // // Type: int // RequirementLevel: Optional @@ -1006,42 +1660,98 @@ const ( CodeColumnKey = attribute.Key("code.column") ) -// This document defines semantic conventions for HTTP client and server Spans. +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. const ( - // HTTP request method. + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") - // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int - // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.) + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") - // Kind of HTTP protocol used. + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable - // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` - // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. HTTPFlavorKey = attribute.Key("http.flavor") - // Value of the [HTTP User-Agent](https://www.rfc- - // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") - // The size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- - // length) header. For requests using transport encoding, this should be the + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the // compressed size. // // Type: int @@ -1049,10 +1759,14 @@ const ( // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - // The size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- - // length) header. For requests using transport encoding, this should be the + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the // compressed size. // // Type: int @@ -1077,74 +1791,149 @@ var ( HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + // Semantic Convention for HTTP Client const ( - // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. - // Usually the fragment is not transmitted over HTTP, but if it is known, it - // should be included nevertheless. + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the attribute's - // value should be `https://www.example.com/`. + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") - // The ordinal number of request resending attempt (for any reason, including + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets resent - // by the client, regardless of what was the cause of the resending (e.g. - // redirection, authorization failure, 503 Server Unavailable, network issues, or - // any other). + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + // Semantic Convention for HTTP Server const ( - // The URI scheme identifying the used protocol. + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") - // The full request target as passed in a HTTP request line or equivalent. + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") - // The matched route (path template in the format used by the respective server - // framework). See note below + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and the URI - // path can NOT substitute it. + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. HTTPRouteKey = attribute.Key("http.route") - // The IP address of the original client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en- - // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.sock.peer.addr`, which would + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would // identify the network-level peer, which may be a proxy. - + // // This attribute should be set when a source of information different - // from the one used for `net.sock.peer.addr`, is available even if that other + // from the one used for `net.sock.peer.addr`, is available even if that + // other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting @@ -1154,89 +1943,155 @@ const ( HTTPClientIPKey = attribute.Key("http.client_ip") ) +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + // Attributes that exist for multiple DynamoDB request types. const ( - // The keys in the `RequestItems` object field. + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // The JSON-serialized value of each item in the `ConsumedCapacity` response + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { - // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }' + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // The JSON-serialized value of the `ItemCollectionMetrics` response field. + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // The value of the `ConsistentRead` request parameter. + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // The value of the `ProjectionExpression` request parameter. + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, - // ProductReviews' + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // The value of the `Limit` request parameter. + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // The value of the `AttributesToGet` request parameter. + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // The value of the `IndexName` request parameter. + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // The value of the `Select` request parameter. + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. // // Type: string // RequirementLevel: Optional @@ -1245,42 +2100,148 @@ const ( AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + // DynamoDB.CreateTable const ( - // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request - // field + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field // // Type: string[] // RequirementLevel: Optional // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": - // number, "WriteCapacityUnits": number } }' + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }' + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + // DynamoDB.ListTables const ( - // The value of the `ExclusiveStartTableName` request parameter. + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // The the number of items in the `TableNames` response parameter. + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional @@ -1289,9 +2250,25 @@ const ( AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + // DynamoDB.Query const ( - // The value of the `ScanIndexForward` request parameter. + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional @@ -1299,30 +2276,48 @@ const ( AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + // DynamoDB.Scan const ( - // The value of the `Segment` request parameter. + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // The value of the `TotalSegments` request parameter. + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - // The value of the `Count` response parameter. + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // The value of the `ScannedCount` response parameter. + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional @@ -1331,18 +2326,51 @@ const ( AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + // DynamoDB.UpdateTable const ( - // The JSON-serialized value of each item in the `AttributeDefinitions` request - // field. + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` - // request field. + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. // // Type: string[] // RequirementLevel: Optional @@ -1350,28 +2378,53 @@ const ( // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }' + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) -// This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. const ( - // The name of the operation being executed. + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - // The type of the operation being executed. + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") - // The GraphQL document being executed. + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. // // Type: string // RequirementLevel: Optional @@ -1390,34 +2443,62 @@ var ( GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) -// Semantic convention describing per-message attributes populated on messaging spans or links. +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. const ( - // A value used by the messaging system as an identifier for the message, - // represented as a string. + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message.id") - // The [conversation ID](#conversations) identifying the conversation to which the - // message belongs, represented as a string. Sometimes called "Correlation ID". + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - // The (uncompressed) size of the message payload in bytes. Also use this - // attribute if it is unknown whether the compressed or uncompressed payload size - // is reported. + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") - // The compressed size of the message payload in bytes. + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. // // Type: int // RequirementLevel: Optional @@ -1426,44 +2507,94 @@ const ( MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ) -// Semantic convention for attributes that describe messaging destination on broker +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker const ( - // The message destination name + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic or - // other entity within the broker. If - // the broker does not have such notion, the destination name SHOULD uniquely - // identify the broker. + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - // The kind of message destination + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") - // Low cardinality representation of the messaging destination name + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example would - // be a destination name involving a user name or product id. Although the - // destination name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and aggregation. + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - // A boolean that is true if the message destination is temporary and might not - // exist anymore after messages are processed. + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - // A boolean that is true if the message destination is anonymous (could be + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean @@ -1479,45 +2610,90 @@ var ( MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + // Semantic convention for attributes that describe messaging source on broker const ( - // The message source name + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' - // Note: Source name SHOULD uniquely identify a specific queue, topic, or other - // entity within the broker. If - // the broker does not have such notion, the source name SHOULD uniquely identify - // the broker. + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. MessagingSourceNameKey = attribute.Key("messaging.source.name") - // The kind of message source + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingSourceKindKey = attribute.Key("messaging.source.kind") - // Low cardinality representation of the messaging source name + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' - // Note: Source names could be constructed from templates. An example would be a - // source name involving a user name or product id. Although the source name in - // this case is of high cardinality, the underlying template is of low cardinality - // and can be effectively used for grouping and aggregation. + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. MessagingSourceTemplateKey = attribute.Key("messaging.source.template") - // A boolean that is true if the message source is temporary and might not exist - // anymore after messages are processed. + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") - // A boolean that is true if the message source is anonymous (could be unnamed or - // have auto-generated name). + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional @@ -1532,36 +2708,74 @@ var ( MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") ) -// This document defines general attributes used in messaging systems. +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. const ( - // A string identifying the messaging system. + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") - // A string identifying the kind of messaging operation as defined in the - // [Operation names](#operation-names) section above. + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: If a custom value is used, it MUST be of low cardinality. MessagingOperationKey = attribute.Key("messaging.operation") - // The number of messages sent, received, or processed in the scope of the + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the // batching operation. // // Type: int - // RequirementLevel: ConditionallyRequired (If the span describes an operation on - // a batch of messages.) + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) // Stability: stable // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans - // that operate with a single message. When a messaging client library supports - // both batch and single-message API for the same operation, instrumentations - // SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use - // it for single-message APIs. + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ) @@ -1574,13 +2788,31 @@ var ( MessagingOperationProcess = MessagingOperationKey.String("process") ) -// Semantic convention for a consumer of messages received from a messaging system +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system const ( - // The identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both are - // present, or only `messaging.kafka.consumer.group`. For brokers, such as - // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the - // message. + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. // // Type: string // RequirementLevel: Optional @@ -1589,9 +2821,22 @@ const ( MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") ) +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + // Attributes for RabbitMQ const ( - // RabbitMQ message routing key. + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) @@ -1600,68 +2845,151 @@ const ( MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ) +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + // Attributes for Apache Kafka const ( - // Message keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message.id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - // Name of the Kafka Consumer Group that is handling the message. Only applies to - // consumers, not producers. + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - // Client ID for the Consumer or Producer that is handling the message. + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - // Partition the message is sent to. + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - // Partition the message is received from. + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") - // The offset of a record in the corresponding Kafka partition. + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - // A boolean that is true if the message is a tombstone. + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. // // Type: boolean - // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the - // value is assumed to be `false`.) + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) // Stability: stable MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ) +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + // Attributes for Apache RocketMQ const ( - // Namespace of RocketMQ resources, resources in different namespaces are + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string @@ -1669,68 +2997,97 @@ const ( // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - // Name of the RocketMQ producer/consumer group that is handling the message. The - // client type is identified by the SpanKind. + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - // The unique identifier for each client. + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - // The timestamp in milliseconds that the delay message is expected to be - // delivered to consumer. + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. // // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay and delay - // time level is not specified.) + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - // The delay time level for delay message, which determines the message delay - // time. + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. // // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay and - // delivery timestamp is not specified.) + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - // It is essential for FIFO message. Messages that belong to the same message - // group are always processed one by one within the same consumer group. + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - // Type of message. + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - // The secondary classifier of message besides topic. + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - // Key(s) of message, another way to mark message besides message id. + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - // Model of message consumption. This only applies to consumer spans. + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. // // Type: Enum // RequirementLevel: Optional @@ -1756,31 +3113,98 @@ var ( MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) -// This document defines semantic conventions for remote procedure calls. +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. const ( - // A string identifying the remoting system. See below for a list of well-known - // identifiers. + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") - // The full (logical) name of the service being called, including its package - // name, if applicable. + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing class. - // The `code.namespace` attribute may be used to store the latter (despite the - // attribute name, it may include a class name; e.g., class with method actually - // executing the call on the server side, RPC client stub class on the client - // side). + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). RPCServiceKey = attribute.Key("rpc.service") - // The name of the (logical) method being called, must be equal to the $method - // part in the span name. + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. // // Type: string // RequirementLevel: Recommended @@ -1788,9 +3212,9 @@ const ( // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the latter - // (e.g., method actually executing the call on the server side, RPC client stub - // method on the client side). + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) @@ -1805,11 +3229,27 @@ var ( RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + // Tech-specific attributes for gRPC. const ( - // The [numeric status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC - // request. + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. // // Type: Enum // RequirementLevel: Required @@ -1856,25 +3296,33 @@ var ( // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( - // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC - // 1.0 does not specify this, the value can be omitted. + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. // // Type: string - // RequirementLevel: ConditionallyRequired (If other than the default version - // (`1.0`)) + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - // `id` property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be cast to - // string for simplicity. Use empty string in case of `null` value. Omit entirely - // if this is a notification. + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. // // Type: int @@ -1882,6 +3330,9 @@ const ( // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. // // Type: string @@ -1890,3 +3341,35 @@ const ( // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index bda8f7cbfae..d82fbaf550e 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.12.0" + return "1.13.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 66f45622215..22df4553c2e 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,7 +14,7 @@ module-sets: stable-v1: - version: v1.12.0 + version: v1.13.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing @@ -34,7 +34,7 @@ module-sets: - go.opentelemetry.io/otel/trace - go.opentelemetry.io/otel/sdk experimental-metrics: - version: v0.35.0 + version: v0.36.0 modules: - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus diff --git a/vendor/modules.txt b/vendor/modules.txt index 689523ddb60..2f4958129a6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -978,7 +978,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp # go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 ## explicit; go 1.17 go.opentelemetry.io/contrib/propagators/autoprop -# go.opentelemetry.io/contrib/propagators/aws v1.12.0 +# go.opentelemetry.io/contrib/propagators/aws v1.14.0 ## explicit; go 1.18 go.opentelemetry.io/contrib/propagators/aws/xray # go.opentelemetry.io/contrib/propagators/b3 v1.9.0 @@ -990,7 +990,7 @@ go.opentelemetry.io/contrib/propagators/jaeger # go.opentelemetry.io/contrib/propagators/ot v1.9.0 ## explicit; go 1.17 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.12.0 +# go.opentelemetry.io/otel v1.13.0 ## explicit; go 1.18 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1032,7 +1032,7 @@ go.opentelemetry.io/otel/metric/instrument/syncfloat64 go.opentelemetry.io/otel/metric/instrument/syncint64 go.opentelemetry.io/otel/metric/internal/global go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/sdk v1.12.0 +# go.opentelemetry.io/otel/sdk v1.13.0 ## explicit; go 1.18 go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal @@ -1040,7 +1040,7 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.12.0 +# go.opentelemetry.io/otel/trace v1.13.0 ## explicit; go 1.18 go.opentelemetry.io/otel/trace # go.opentelemetry.io/proto/otlp v0.19.0 From 63424d8f7f4447276ea8bff34ae3f5605663391a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Feb 2023 11:06:09 -0800 Subject: [PATCH 076/133] Bump go.etcd.io/etcd/api/v3 from 3.5.6 to 3.5.7 (#5126) Bumps [go.etcd.io/etcd/api/v3](https://github.com/etcd-io/etcd) from 3.5.6 to 3.5.7. - [Release notes](https://github.com/etcd-io/etcd/releases) - [Changelog](https://github.com/etcd-io/etcd/blob/main/Dockerfile-release.ppc64le) - [Commits](https://github.com/etcd-io/etcd/compare/v3.5.6...v3.5.7) --- updated-dependencies: - dependency-name: go.etcd.io/etcd/api/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 3 ++- vendor/go.etcd.io/etcd/api/v3/version/version.go | 2 +- vendor/modules.txt | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 7965712ae16..2b92d936d7a 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d - go.etcd.io/etcd/api/v3 v3.5.6 + go.etcd.io/etcd/api/v3 v3.5.7 go.etcd.io/etcd/client/pkg/v3 v3.5.7 go.etcd.io/etcd/client/v3 v3.5.6 go.opentelemetry.io/contrib/propagators/aws v1.14.0 diff --git a/go.sum b/go.sum index aa628385bbf..9aea35fc7e5 100644 --- a/go.sum +++ b/go.sum @@ -1582,8 +1582,9 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A= go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= +go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index ee202c42b72..f3b389421ef 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.6" + Version = "3.5.7" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/modules.txt b/vendor/modules.txt index 2f4958129a6..b8ac2b0c584 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -922,8 +922,8 @@ github.com/yuin/gopher-lua github.com/yuin/gopher-lua/ast github.com/yuin/gopher-lua/parse github.com/yuin/gopher-lua/pm -# go.etcd.io/etcd/api/v3 v3.5.6 -## explicit; go 1.16 +# go.etcd.io/etcd/api/v3 v3.5.7 +## explicit; go 1.17 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/membershippb From 012deb82d2621385912bff1fe9e325f93598ea02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 13:24:01 +0100 Subject: [PATCH 077/133] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace (#5163) Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) from 1.12.0 to 1.13.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.12.0...v1.13.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 6 +++--- go.sum | 12 ++++++------ .../google.golang.org/grpc/balancer/grpclb/grpclb.go | 2 +- .../balancer/grpclb/{grpclbstate => state}/state.go | 8 ++++---- .../grpc/internal/resolver/dns/dns_resolver.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 8 ++++---- 7 files changed, 20 insertions(+), 20 deletions(-) rename vendor/google.golang.org/grpc/balancer/grpclb/{grpclbstate => state}/state.go (87%) diff --git a/go.mod b/go.mod index 2b92d936d7a..55772fdece5 100644 --- a/go.mod +++ b/go.mod @@ -61,7 +61,7 @@ require ( go.opentelemetry.io/contrib/propagators/aws v1.14.0 go.opentelemetry.io/otel v1.13.0 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.13.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 go.opentelemetry.io/otel/sdk v1.13.0 go.opentelemetry.io/otel/trace v1.13.0 @@ -69,7 +69,7 @@ require ( golang.org/x/net v0.5.0 golang.org/x/sync v0.1.0 golang.org/x/time v0.3.0 - google.golang.org/grpc v1.52.0 + google.golang.org/grpc v1.52.3 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/yaml v1.3.0 @@ -196,7 +196,7 @@ require ( go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.13.0 // indirect go.opentelemetry.io/otel/metric v0.34.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/goleak v1.2.0 // indirect diff --git a/go.sum b/go.sum index 9aea35fc7e5..3d05c4cb3f6 100644 --- a/go.sum +++ b/go.sum @@ -1638,11 +1638,11 @@ go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1/go.mod h1:zZWeMK8RgtSJddl2Oox9MfjMRgD/HqzuLujCFdF0CZU= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 h1:UfDENi+LTcLjQ/JhaXimjlIgn7wWjwbEMmdREm2Gyng= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.13.0 h1:pa05sNT/P8OsIQ8mPZKTIyiBuzS/xDGLVx+DCt0y6Vs= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.13.0/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 h1:ZVqtSAxrR4+ofzayuww0/EKamCjjnwnXTMRZzMudJoU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0/go.mod h1:IlaGLENJkAl9+Xoo3J0unkdOwtL+rmqZ3ryMjUtYA94= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.13.0 h1:Any/nVxaoMq1T2w0W85d6w5COlLuCCgOYKQhJJWEMwQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.13.0/go.mod h1:46vAP6RWfNn7EKov73l5KBFlNxz8kYlxR1woU+bJ4ZY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 h1:+tsVdWosoqDfX6cdHAeacZozjQS94ySBd+aUXFwnNKA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0/go.mod h1:jSqjV+Knu1Jyvh+l3fx7V210Ev3HHgNQAi8YqpXaQP8= @@ -2376,8 +2376,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index b0dd72fce14..dd15810d0ae 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/grpclb/grpclbstate" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go similarity index 87% rename from vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go rename to vendor/google.golang.org/grpc/balancer/grpclb/state/state.go index cece046be34..4ecfa1c2151 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -16,9 +16,9 @@ * */ -// Package grpclbstate declares grpclb types to be set by resolvers wishing to -// pass information to grpclb via resolver.State Attributes. -package grpclbstate +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state import ( "google.golang.org/grpc/resolver" @@ -27,7 +27,7 @@ import ( // keyType is the key to use for storing State in Attributes. type keyType string -const key = keyType("grpc.grpclb.grpclbstate") +const key = keyType("grpc.grpclb.state") // State contains gRPCLB-relevant data passed from the name resolver. type State struct { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index d51302e65c6..b08ac30adfe 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,7 @@ import ( "sync" "time" - grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 243e06e8d3e..6410a0b96c9 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.52.0" +const Version = "1.52.3" diff --git a/vendor/modules.txt b/vendor/modules.txt index b8ac2b0c584..fc01a725a74 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1010,10 +1010,10 @@ go.opentelemetry.io/otel/semconv/v1.17.0 ## explicit; go 1.18 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.13.0 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.13.0 ## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig @@ -1206,7 +1206,7 @@ google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.52.0 +# google.golang.org/grpc v1.52.3 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1215,7 +1215,7 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 -google.golang.org/grpc/balancer/grpclb/grpclbstate +google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz From 5bc269f623d3e7af508493268ead5b068dacd135 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 10:23:01 -0800 Subject: [PATCH 078/133] Bump github.com/minio/minio-go/v7 from 7.0.47 to 7.0.49 (#5160) Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.47 to 7.0.49. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.47...v7.0.49) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 14 +- go.sum | 30 +- .../github.com/klauspost/cpuid/v2/README.md | 229 ++++++++++- vendor/github.com/klauspost/cpuid/v2/cpuid.go | 213 +++++++++-- .../klauspost/cpuid/v2/detect_x86.go | 2 +- .../klauspost/cpuid/v2/featureid_string.go | 361 ++++++++++-------- .../klauspost/cpuid/v2/os_darwin_arm64.go | 2 +- .../github.com/minio/minio-go/v7/.gitignore | 3 +- .../minio/minio-go/v7/api-get-object.go | 12 +- .../minio/minio-go/v7/api-get-options.go | 52 +++ .../minio-go/v7/api-put-object-streaming.go | 6 +- .../minio/minio-go/v7/api-put-object.go | 28 ++ .../minio-go/v7/api-putobject-snowball.go | 5 + vendor/github.com/minio/minio-go/v7/api.go | 2 +- .../v7/pkg/credentials/sts_tls_identity.go | 3 + .../minio/minio-go/v7/s3-endpoints.go | 1 + vendor/github.com/minio/minio-go/v7/utils.go | 17 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 11 +- .../x/crypto/pkcs12/internal/rc2/rc2.go | 53 ++- vendor/golang.org/x/net/http2/flow.go | 2 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 81 ++-- vendor/golang.org/x/net/http2/server.go | 20 +- vendor/golang.org/x/net/http2/transport.go | 2 +- vendor/golang.org/x/net/ipv6/dgramopt.go | 2 +- vendor/golang.org/x/net/netutil/listen.go | 2 +- vendor/golang.org/x/net/trace/histogram.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 1 + vendor/golang.org/x/sys/cpu/endian_big.go | 11 + vendor/golang.org/x/sys/cpu/endian_little.go | 11 + vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- .../golang.org/x/sys/unix/syscall_darwin.go | 1 + .../x/sys/unix/syscall_freebsd_386.go | 9 +- .../x/sys/unix/syscall_freebsd_amd64.go | 9 +- .../x/sys/unix/syscall_freebsd_arm.go | 9 +- .../x/sys/unix/syscall_freebsd_arm64.go | 9 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 9 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 3 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 2 +- vendor/golang.org/x/sys/unix/timestruct.go | 2 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 9 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 30 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_loong64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../golang.org/x/sys/unix/zsyscall_linux.go | 11 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 217 +++++++++-- .../x/sys/windows/syscall_windows.go | 14 +- .../internal/language/compact/language.go | 2 +- .../x/text/internal/language/language.go | 2 +- vendor/golang.org/x/text/language/language.go | 2 +- vendor/gopkg.in/ini.v1/deprecated.go | 5 +- vendor/gopkg.in/ini.v1/ini.go | 8 +- vendor/modules.txt | 14 +- 66 files changed, 1174 insertions(+), 401 deletions(-) create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go diff --git a/go.mod b/go.mod index 55772fdece5..fad8859df30 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.15 github.com/lib/pq v1.10.7 - github.com/minio/minio-go/v7 v7.0.47 + github.com/minio/minio-go/v7 v7.0.49 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -66,7 +66,7 @@ require ( go.opentelemetry.io/otel/sdk v1.13.0 go.opentelemetry.io/otel/trace v1.13.0 go.uber.org/atomic v1.10.0 - golang.org/x/net v0.5.0 + golang.org/x/net v0.7.0 golang.org/x/sync v0.1.0 golang.org/x/time v0.3.0 google.golang.org/grpc v1.52.3 @@ -152,7 +152,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.1.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.3 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect @@ -202,12 +202,12 @@ require ( go.uber.org/goleak v1.2.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/crypto v0.1.0 // indirect + golang.org/x/crypto v0.6.0 // indirect golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/oauth2 v0.3.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/tools v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect @@ -216,7 +216,7 @@ require ( google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect ) // Override since git.apache.org is down. The docs say to fetch from github. diff --git a/go.sum b/go.sum index 3d05c4cb3f6..65baa920744 100644 --- a/go.sum +++ b/go.sum @@ -1043,8 +1043,8 @@ github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7y github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= -github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1136,8 +1136,8 @@ github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7Xn github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs= -github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/minio-go/v7 v7.0.49 h1:dE5DfOtnXMXCjr/HWI6zN9vCrY6Sv666qhhiwUMvGV4= +github.com/minio/minio-go/v7 v7.0.49/go.mod h1:UI34MvQEiob3Cf/gGExGMmzugkM/tNgbFypNDy5LMVc= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= @@ -1719,8 +1719,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1856,8 +1856,8 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2047,15 +2047,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2066,8 +2066,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2414,8 +2414,8 @@ gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:a gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index ea7df3dd846..857a93e592d 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -16,10 +16,17 @@ Package home: https://github.com/klauspost/cpuid ## installing -`go get -u github.com/klauspost/cpuid/v2` using modules. - +`go get -u github.com/klauspost/cpuid/v2` using modules. Drop `v2` for others. +### Homebrew + +For macOS/Linux users, you can install via [brew](https://brew.sh/) + +```sh +$ brew install cpuid +``` + ## example ```Go @@ -77,10 +84,14 @@ We have Streaming SIMD 2 Extensions The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. +To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc. +This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported. + Note that for some cpu/os combinations some features will not be detected. `amd64` has rather good support and should work reliably on all platforms. -Note that hypervisors may not pass through all CPU features. +Note that hypervisors may not pass through all CPU features through to the guest OS, +so even if your host supports a feature it may not be visible on guests. ## arm64 feature detection @@ -253,6 +264,218 @@ Exit Code 0 Exit Code 1 ``` + +## Available flags + +### x86 & amd64 + +| Feature Flag | Description | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) | +| AESNI | Advanced Encryption Standard New Instructions | +| AMD3DNOW | AMD 3DNOW | +| AMD3DNOWEXT | AMD 3DNowExt | +| AMXBF16 | Tile computational operations on BFLOAT16 numbers | +| AMXINT8 | Tile computational operations on 8-bit integers | +| AMXFP16 | Tile computational operations on FP16 numbers | +| AMXTILE | Tile architecture | +| AVX | AVX functions | +| AVX2 | AVX2 functions | +| AVX512BF16 | AVX-512 BFLOAT16 Instructions | +| AVX512BITALG | AVX-512 Bit Algorithms | +| AVX512BW | AVX-512 Byte and Word Instructions | +| AVX512CD | AVX-512 Conflict Detection Instructions | +| AVX512DQ | AVX-512 Doubleword and Quadword Instructions | +| AVX512ER | AVX-512 Exponential and Reciprocal Instructions | +| AVX512F | AVX-512 Foundation | +| AVX512FP16 | AVX-512 FP16 Instructions | +| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions | +| AVX512PF | AVX-512 Prefetch Instructions | +| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions | +| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 | +| AVX512VL | AVX-512 Vector Length Extensions | +| AVX512VNNI | AVX-512 Vector Neural Network Instructions | +| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q | +| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword | +| AVXIFMA | AVX-IFMA instructions | +| AVXNECONVERT | AVX-NE-CONVERT instructions | +| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | +| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | +| AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| BMI1 | Bit Manipulation Instruction Set 1 | +| BMI2 | Bit Manipulation Instruction Set 2 | +| CETIBT | Intel CET Indirect Branch Tracking | +| CETSS | Intel CET Shadow Stack | +| CLDEMOTE | Cache Line Demote | +| CLMUL | Carry-less Multiplication | +| CLZERO | CLZERO instruction supported | +| CMOV | i686 CMOV | +| CMPCCXADD | CMPCCXADD instructions | +| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB | +| CMPXCHG8 | CMPXCHG8 instruction | +| CPBOOST | Core Performance Boost | +| CPPC | AMD: Collaborative Processor Performance Control | +| CX16 | CMPXCHG16B Instruction | +| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ | +| ENQCMD | Enqueue Command | +| ERMS | Enhanced REP MOVSB/STOSB | +| F16C | Half-precision floating-point conversion | +| FLUSH_L1D | Flush L1D cache | +| FMA3 | Intel FMA 3. Does not imply AVX. | +| FMA4 | Bulldozer FMA4 functions | +| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide | +| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide | +| FSRM | Fast Short Rep Mov | +| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 | +| FXSROPT | FXSAVE/FXRSTOR optimizations | +| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. | +| HLE | Hardware Lock Elision | +| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR | +| HTT | Hyperthreading (enabled) | +| HWA | Hardware assert supported. Indicates support for MSRC001_10 | +| HYBRID_CPU | This part has CPUs of more than one type. | +| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors | +| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) | +| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR | +| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) | +| IBRS | AMD: Indirect Branch Restricted Speculation | +| IBRS_PREFERRED | AMD: IBRS is preferred over software solution | +| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection | +| IBS | Instruction Based Sampling (AMD) | +| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) | +| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) | +| IBSFFV | Instruction Based Sampling Feature (AMD) | +| IBSOPCNT | Instruction Based Sampling Feature (AMD) | +| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) | +| IBSOPSAM | Instruction Based Sampling Feature (AMD) | +| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) | +| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) | +| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported | +| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported | +| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse | +| IBS_PREVENTHOST | Disallowing IBS use by the host supported | +| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 | +| INT_WBINVD | WBINVD/WBNOINVD are interruptible. | +| INVLPGB | NVLPGB and TLBSYNC instruction supported | +| LAHF | LAHF/SAHF in long mode | +| LAM | If set, CPU supports Linear Address Masking | +| LBRVIRT | LBR virtualization | +| LZCNT | LZCNT instruction | +| MCAOVERFLOW | MCA overflow recovery support. | +| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. | +| MCOMMIT | MCOMMIT instruction supported | +| MD_CLEAR | VERW clears CPU buffers | +| MMX | standard MMX | +| MMXEXT | SSE integer functions or AMD MMX ext | +| MOVBE | MOVBE instruction (big-endian) | +| MOVDIR64B | Move 64 Bytes as Direct Store | +| MOVDIRI | Move Doubleword as Direct Store | +| MOVSB_ZL | Fast Zero-Length MOVSB | +| MPX | Intel MPX (Memory Protection Extensions) | +| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | +| MSRIRC | Instruction Retired Counter MSR available | +| MSR_PAGEFLUSH | Page Flush MSR available | +| NRIPS | Indicates support for NRIP save on VMEXIT | +| NX | NX (No-Execute) bit | +| OSXSAVE | XSAVE enabled by OS | +| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption | +| POPCNT | POPCNT instruction | +| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled | +| PREFETCHI | PREFETCHIT0/1 instructions | +| PSFD | AMD: Predictive Store Forward Disable | +| RDPRU | RDPRU instruction supported | +| RDRAND | RDRAND instruction is available | +| RDSEED | RDSEED instruction is available | +| RDTSCP | RDTSCP Instruction | +| RTM | Restricted Transactional Memory | +| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. | +| SERIALIZE | Serialize Instruction Execution | +| SEV | AMD Secure Encrypted Virtualization supported | +| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host | +| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported | +| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests | +| SEV_ES | AMD SEV Encrypted State supported | +| SEV_RESTRICTED | AMD SEV Restricted Injection supported | +| SEV_SNP | AMD SEV Secure Nested Paging supported | +| SGX | Software Guard Extensions | +| SGXLC | Software Guard Extensions Launch Control | +| SHA | Intel SHA Extensions | +| SME | AMD Secure Memory Encryption supported | +| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced | +| SPEC_CTRL_SSBD | Speculative Store Bypass Disable | +| SRBDS_CTRL | SRBDS mitigation MSR available | +| SSE | SSE functions | +| SSE2 | P4 SSE functions | +| SSE3 | Prescott SSE3 functions | +| SSE4 | Penryn SSE4.1 functions | +| SSE42 | Nehalem SSE4.2 functions | +| SSE4A | AMD Barcelona microarchitecture SSE4a instructions | +| SSSE3 | Conroe SSSE3 functions | +| STIBP | Single Thread Indirect Branch Predictors | +| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On | +| STOSB_SHORT | Fast short STOSB | +| SUCCOR | Software uncorrectable error containment and recovery capability. | +| SVM | AMD Secure Virtual Machine | +| SVMDA | Indicates support for the SVM decode assists. | +| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control | +| SVML | AMD SVM lock. Indicates support for SVM-Lock. | +| SVMNP | AMD SVM nested paging | +| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter | +| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold | +| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. | +| SYSEE | SYSENTER and SYSEXIT instructions | +| TBM | AMD Trailing Bit Manipulation | +| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations | +| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. | +| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. | +| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 | +| TSXLDTRK | Intel TSX Suspend Load Address Tracking | +| VAES | Vector AES. AVX(512) versions requires additional checks. | +| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. | +| VMPL | AMD VM Permission Levels supported | +| VMSA_REGPROT | AMD VMSA Register Protection supported | +| VMX | Virtual Machine Extensions | +| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. | +| VTE | AMD Virtual Transparent Encryption supported | +| WAITPKG | TPAUSE, UMONITOR, UMWAIT | +| WBNOINVD | Write Back and Do Not Invalidate Cache | +| X87 | FPU | +| XGETBV1 | Supports XGETBV with ECX = 1 | +| XOP | Bulldozer XOP functions | +| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV | +| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. | +| XSAVEOPT | XSAVEOPT available | +| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS | + +# ARM features: + +| Feature Flag | Description | +|--------------|------------------------------------------------------------------| +| AESARM | AES instructions | +| ARMCPUID | Some CPU ID registers readable at user-level | +| ASIMD | Advanced SIMD | +| ASIMDDP | SIMD Dot Product | +| ASIMDHP | Advanced SIMD half-precision floating point | +| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) | +| ATOMICS | Large System Extensions (LSE) | +| CRC32 | CRC32/CRC32C instructions | +| DCPOP | Data cache clean to Point of Persistence (DC CVAP) | +| EVTSTRM | Generic timer | +| FCMA | Floatin point complex number addition and multiplication | +| FP | Single-precision and double-precision floating point | +| FPHP | Half-precision floating point | +| GPA | Generic Pointer Authentication | +| JSCVT | Javascript-style double->int convert (FJCVTZS) | +| LRCPC | Weaker release consistency (LDAPR, etc) | +| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) | +| SHA1 | SHA-1 instructions (SHA1C, etc) | +| SHA2 | SHA-2 instructions (SHA256H, etc) | +| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) | +| SHA512 | SHA512 instructions | +| SM3 | SM3 instructions | +| SM4 | SM4 instructions | +| SVE | Scalable Vector Extension | + # license This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 701f2385b68..cf2ae9c51ed 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -73,6 +73,7 @@ const ( AMD3DNOW // AMD 3DNOW AMD3DNOWEXT // AMD 3DNowExt AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers AMXINT8 // Tile computational operations on 8-bit integers AMXTILE // Tile architecture AVX // AVX functions @@ -93,8 +94,11 @@ const ( AVX512VNNI // AVX-512 Vector Neural Network Instructions AVX512VP2INTERSECT // AVX-512 Intersect for D/Q AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 CETIBT // Intel CET Indirect Branch Tracking @@ -103,15 +107,22 @@ const ( CLMUL // Carry-less Multiplication CLZERO // CLZERO instruction supported CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB CMPXCHG8 // CMPXCHG8 instruction CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ ENQCMD // Enqueue Command ERMS // Enhanced REP MOVSB/STOSB F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache FMA3 // Intel FMA 3. Does not imply AVX. FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 FXSROPT // FXSAVE/FXRSTOR optimizations GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. @@ -119,8 +130,14 @@ const ( HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR HTT // Hyperthreading (enabled) HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection IBS // Instruction Based Sampling (AMD) IBSBRNTRGT // Instruction Based Sampling Feature (AMD) IBSFETCHSAM // Instruction Based Sampling Feature (AMD) @@ -130,7 +147,11 @@ const ( IBSOPSAM // Instruction Based Sampling Feature (AMD) IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 INT_WBINVD // WBINVD/WBNOINVD are interruptible. INVLPGB // NVLPGB and TLBSYNC instruction supported LAHF // LAHF/SAHF in long mode @@ -138,13 +159,16 @@ const ( LBRVIRT // LBR virtualization LZCNT // LZCNT instruction MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers MMX // standard MMX MMXEXT // SSE integer functions or AMD MMX ext MOVBE // MOVBE instruction (big-endian) MOVDIR64B // Move 64 Bytes as Direct Store MOVDIRI // Move Doubleword as Direct Store MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD MPX // Intel MPX (Memory Protection Extensions) MSRIRC // Instruction Retired Counter MSR available MSR_PAGEFLUSH // Page Flush MSR available @@ -153,13 +177,15 @@ const ( OSXSAVE // XSAVE enabled by OS PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // AMD: Predictive Store Forward Disable RDPRU // RDPRU instruction supported RDRAND // RDRAND instruction is available RDSEED // RDSEED instruction is available RDTSCP // RDTSCP Instruction RTM // Restricted Transactional Memory RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. - SCE // SYSENTER and SYSEXIT instructions SERIALIZE // Serialize Instruction Execution SEV // AMD Secure Encrypted Virtualization supported SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host @@ -173,6 +199,8 @@ const ( SHA // Intel SHA Extensions SME // AMD Secure Memory Encryption supported SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available SSE // SSE functions SSE2 // P4 SSE functions SSE3 // Prescott SSE3 functions @@ -181,6 +209,7 @@ const ( SSE4A // AMD Barcelona microarchitecture SSE4a instructions SSSE3 // Conroe SSSE3 functions STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On STOSB_SHORT // Fast short STOSB SUCCOR // Software uncorrectable error containment and recovery capability. SVM // AMD Secure Virtual Machine @@ -190,8 +219,12 @@ const ( SVMNP // AMD SVM nested paging SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions TBM // AMD Trailing Bit Manipulation + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 TSXLDTRK // Intel TSX Suspend Load Address Tracking VAES // Vector AES. AVX(512) versions requires additional checks. @@ -253,6 +286,7 @@ type CPUInfo struct { LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. Family int // CPU family number Model int // CPU model number + Stepping int // CPU stepping info CacheLine int // Cache line size in bytes. Will be 0 if undetectable. Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. BoostFreq int64 // Max clock speed, if known, 0 otherwise @@ -355,30 +389,61 @@ func (c CPUInfo) Supports(ids ...FeatureID) bool { // Has allows for checking a single feature. // Should be inlined by the compiler. -func (c CPUInfo) Has(id FeatureID) bool { +func (c *CPUInfo) Has(id FeatureID) bool { return c.featureSet.inSet(id) } +// AnyOf returns whether the CPU supports one or more of the requested features. +func (c CPUInfo) AnyOf(ids ...FeatureID) bool { + for _, id := range ids { + if c.featureSet.inSet(id) { + return true + } + } + return false +} + +// Features contains several features combined for a fast check using +// CpuInfo.HasAll +type Features *flagSet + +// CombineFeatures allows to combine several features for a close to constant time lookup. +func CombineFeatures(ids ...FeatureID) Features { + var v flagSet + for _, id := range ids { + v.set(id) + } + return &v +} + +func (c *CPUInfo) HasAll(f Features) bool { + return c.featureSet.hasSetP(f) +} + // https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels -var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2) -var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) -var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) -var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) +var oneOfLevel = CombineFeatures(SYSEE, SYSCALL) +var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2) +var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) +var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) +var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) // X64Level returns the microarchitecture level detected on the CPU. // If features are lacking or non x64 mode, 0 is returned. // See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels func (c CPUInfo) X64Level() int { - if c.featureSet.hasSet(level4Features) { + if !c.featureSet.hasOneOf(oneOfLevel) { + return 0 + } + if c.featureSet.hasSetP(level4Features) { return 4 } - if c.featureSet.hasSet(level3Features) { + if c.featureSet.hasSetP(level3Features) { return 3 } - if c.featureSet.hasSet(level2Features) { + if c.featureSet.hasSetP(level2Features) { return 2 } - if c.featureSet.hasSet(level1Features) { + if c.featureSet.hasSetP(level1Features) { return 1 } return 0 @@ -542,7 +607,7 @@ const flagMask = flagBits - 1 // flagSet contains detected cpu features and characteristics in an array of flags type flagSet [(lastID + flagMask) / flagBits]flags -func (s flagSet) inSet(feat FeatureID) bool { +func (s *flagSet) inSet(feat FeatureID) bool { return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 } @@ -572,7 +637,7 @@ func (s *flagSet) or(other flagSet) { } // hasSet returns whether all features are present. -func (s flagSet) hasSet(other flagSet) bool { +func (s *flagSet) hasSet(other flagSet) bool { for i, v := range other[:] { if s[i]&v != v { return false @@ -581,8 +646,28 @@ func (s flagSet) hasSet(other flagSet) bool { return true } +// hasSet returns whether all features are present. +func (s *flagSet) hasSetP(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasOneOf returns whether one or more features are present. +func (s *flagSet) hasOneOf(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != 0 { + return true + } + } + return false +} + // nEnabled will return the number of enabled flags. -func (s flagSet) nEnabled() (n int) { +func (s *flagSet) nEnabled() (n int) { for _, v := range s[:] { n += bits.OnesCount64(uint64(v)) } @@ -677,7 +762,7 @@ func threadsPerCore() int { if vend == AMD { // Workaround for AMD returning 0, assume 2 if >= Zen 2 // It will be more correct than not. - fam, _ := familyModel() + fam, _, _ := familyModel() _, _, _, d := cpuid(1) if (d&(1<<28)) != 0 && fam >= 23 { return 2 @@ -715,14 +800,27 @@ func logicalCores() int { } } -func familyModel() (int, int) { +func familyModel() (family, model, stepping int) { if maxFunctionID() < 0x1 { - return 0, 0 + return 0, 0, 0 } eax, _, _, _ := cpuid(1) - family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) - model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) - return int(family), int(model) + // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0]. + family = int((eax >> 8) & 0xf) + extFam := family == 0x6 // Intel is 0x6, needs extended model. + if family == 0xf { + // Add ExtFamily + family += int((eax >> 20) & 0xff) + extFam = true + } + // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0]. + model = int((eax >> 4) & 0xf) + if extFam { + // Add ExtModel + model += int((eax >> 12) & 0xf0) + } + stepping = int(eax & 0xf) + return family, model, stepping } func physicalCores() int { @@ -857,7 +955,7 @@ func (c *CPUInfo) cacheSize() { c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties - if maxExtendedFunction() < 0x8000001D { + if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) { return } @@ -974,14 +1072,13 @@ func support() flagSet { if mfi < 0x1 { return fs } - family, model := familyModel() + family, model, _ := familyModel() _, _, c, d := cpuid(1) fs.setIf((d&(1<<0)) != 0, X87) fs.setIf((d&(1<<8)) != 0, CMPXCHG8) - fs.setIf((d&(1<<11)) != 0, SCE) + fs.setIf((d&(1<<11)) != 0, SYSEE) fs.setIf((d&(1<<15)) != 0, CMOV) - fs.setIf((d&(1<<22)) != 0, MMXEXT) fs.setIf((d&(1<<23)) != 0, MMX) fs.setIf((d&(1<<24)) != 0, FXSR) fs.setIf((d&(1<<25)) != 0, FXSROPT) @@ -989,9 +1086,9 @@ func support() flagSet { fs.setIf((d&(1<<26)) != 0, SSE2) fs.setIf((c&1) != 0, SSE3) fs.setIf((c&(1<<5)) != 0, VMX) - fs.setIf((c&0x00000200) != 0, SSSE3) - fs.setIf((c&0x00080000) != 0, SSE4) - fs.setIf((c&0x00100000) != 0, SSE42) + fs.setIf((c&(1<<9)) != 0, SSSE3) + fs.setIf((c&(1<<19)) != 0, SSE4) + fs.setIf((c&(1<<20)) != 0, SSE42) fs.setIf((c&(1<<25)) != 0, AESNI) fs.setIf((c&(1<<1)) != 0, CLMUL) fs.setIf(c&(1<<22) != 0, MOVBE) @@ -1068,21 +1165,36 @@ func support() flagSet { fs.setIf(ecx&(1<<30) != 0, SGXLC) // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<4) != 0, FSRM) + fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL) + fs.setIf(edx&(1<<10) != 0, MD_CLEAR) fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) fs.setIf(edx&(1<<14) != 0, SERIALIZE) + fs.setIf(edx&(1<<15) != 0, HYBRID_CPU) fs.setIf(edx&(1<<16) != 0, TSXLDTRK) fs.setIf(edx&(1<<18) != 0, PCONFIG) fs.setIf(edx&(1<<20) != 0, CETIBT) fs.setIf(edx&(1<<26) != 0, IBPB) fs.setIf(edx&(1<<27) != 0, STIBP) + fs.setIf(edx&(1<<28) != 0, FLUSH_L1D) + fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP) + fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP) + fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD) + + // CPUID.(EAX=7, ECX=1).EDX + fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8) + fs.setIf(edx&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx&(1<<14) != 0, PREFETCHI) - // CPUID.(EAX=7, ECX=1) + // CPUID.(EAX=7, ECX=1).EAX eax1, _, _, _ := cpuidex(7, 1) fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI) + fs.setIf(eax1&(1<<7) != 0, CMPCCXADD) fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL) fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT) fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT) fs.setIf(eax1&(1<<22) != 0, HRESET) + fs.setIf(eax1&(1<<23) != 0, AVXIFMA) fs.setIf(eax1&(1<<26) != 0, LAM) // Only detect AVX-512 features if XGETBV is supported @@ -1120,9 +1232,15 @@ func support() flagSet { fs.setIf(edx&(1<<25) != 0, AMXINT8) // eax1 = CPUID.(EAX=7, ECX=1).EAX fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + fs.setIf(eax1&(1<<21) != 0, AMXFP16) } } + + // CPUID.(EAX=7, ECX=2) + _, _, _, edx = cpuidex(7, 2) + fs.setIf(edx&(1<<5) != 0, MCDT_NO) } + // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) // EAX // Bit 00: XSAVEOPT is available. @@ -1156,20 +1274,24 @@ func support() flagSet { fs.setIf((c&(1<<2)) != 0, SVM) fs.setIf((c&(1<<6)) != 0, SSE4A) fs.setIf((c&(1<<10)) != 0, IBS) + fs.setIf((c&(1<<22)) != 0, TOPEXT) // EDX - fs.setIf((d&(1<<31)) != 0, AMD3DNOW) - fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT) - fs.setIf((d&(1<<23)) != 0, MMX) - fs.setIf((d&(1<<22)) != 0, MMXEXT) + fs.setIf(d&(1<<11) != 0, SYSCALL) fs.setIf(d&(1<<20) != 0, NX) + fs.setIf(d&(1<<22) != 0, MMXEXT) + fs.setIf(d&(1<<23) != 0, MMX) + fs.setIf(d&(1<<24) != 0, FXSR) + fs.setIf(d&(1<<25) != 0, FXSROPT) fs.setIf(d&(1<<27) != 0, RDTSCP) + fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT) + fs.setIf(d&(1<<31) != 0, AMD3DNOW) /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be * used unless the OS has AVX support. */ if fs.inSet(AVX) { - fs.setIf((c&0x00000800) != 0, XOP) - fs.setIf((c&0x00010000) != 0, FMA4) + fs.setIf((c&(1<<11)) != 0, XOP) + fs.setIf((c&(1<<16)) != 0, FMA4) } } @@ -1183,9 +1305,21 @@ func support() flagSet { if maxExtendedFunction() >= 0x80000008 { _, b, _, _ := cpuid(0x80000008) + fs.setIf(b&(1<<28) != 0, PSFD) + fs.setIf(b&(1<<27) != 0, CPPC) + fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD) + fs.setIf(b&(1<<23) != 0, PPIN) + fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED) + fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS) + fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP) + fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED) + fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON) + fs.setIf(b&(1<<15) != 0, STIBP) + fs.setIf(b&(1<<14) != 0, IBRS) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf(b&(1<<12) != 0, IBPB) fs.setIf((b&(1<<9)) != 0, WBNOINVD) fs.setIf((b&(1<<8)) != 0, MCOMMIT) - fs.setIf((b&(1<<13)) != 0, INT_WBINVD) fs.setIf((b&(1<<4)) != 0, RDPRU) fs.setIf((b&(1<<3)) != 0, INVLPGB) fs.setIf((b&(1<<1)) != 0, MSRIRC) @@ -1206,6 +1340,13 @@ func support() flagSet { fs.setIf((edx>>12)&1 == 1, SVMPFT) } + if maxExtendedFunction() >= 0x8000001a { + eax, _, _, _ := cpuid(0x8000001a) + fs.setIf((eax>>0)&1 == 1, FP128) + fs.setIf((eax>>1)&1 == 1, MOVU) + fs.setIf((eax>>2)&1 == 1, FP256) + } + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { eax, _, _, _ := cpuid(0x8000001b) fs.setIf((eax>>0)&1 == 1, IBSFFV) @@ -1216,6 +1357,10 @@ func support() flagSet { fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) + fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE) + fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX) + fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1. + fs.setIf((eax>>11)&1 == 1, IBS_ZEN4) } if maxExtendedFunction() >= 0x8000001f && vend == AMD { diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index 35678d8a3ee..c946824ec0a 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -24,7 +24,7 @@ func addInfo(c *CPUInfo, safe bool) { c.maxExFunc = maxExtendedFunction() c.BrandName = brandName() c.CacheLine = cacheLine() - c.Family, c.Model = familyModel() + c.Family, c.Model, c.Stepping = familyModel() c.featureSet = support() c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) c.ThreadsPerCore = threadsPerCore() diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index a9b3e36c707..8b6cd2b72ee 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -13,174 +13,207 @@ func _() { _ = x[AMD3DNOW-3] _ = x[AMD3DNOWEXT-4] _ = x[AMXBF16-5] - _ = x[AMXINT8-6] - _ = x[AMXTILE-7] - _ = x[AVX-8] - _ = x[AVX2-9] - _ = x[AVX512BF16-10] - _ = x[AVX512BITALG-11] - _ = x[AVX512BW-12] - _ = x[AVX512CD-13] - _ = x[AVX512DQ-14] - _ = x[AVX512ER-15] - _ = x[AVX512F-16] - _ = x[AVX512FP16-17] - _ = x[AVX512IFMA-18] - _ = x[AVX512PF-19] - _ = x[AVX512VBMI-20] - _ = x[AVX512VBMI2-21] - _ = x[AVX512VL-22] - _ = x[AVX512VNNI-23] - _ = x[AVX512VP2INTERSECT-24] - _ = x[AVX512VPOPCNTDQ-25] - _ = x[AVXSLOW-26] - _ = x[AVXVNNI-27] - _ = x[BMI1-28] - _ = x[BMI2-29] - _ = x[CETIBT-30] - _ = x[CETSS-31] - _ = x[CLDEMOTE-32] - _ = x[CLMUL-33] - _ = x[CLZERO-34] - _ = x[CMOV-35] - _ = x[CMPSB_SCADBS_SHORT-36] - _ = x[CMPXCHG8-37] - _ = x[CPBOOST-38] - _ = x[CX16-39] - _ = x[ENQCMD-40] - _ = x[ERMS-41] - _ = x[F16C-42] - _ = x[FMA3-43] - _ = x[FMA4-44] - _ = x[FXSR-45] - _ = x[FXSROPT-46] - _ = x[GFNI-47] - _ = x[HLE-48] - _ = x[HRESET-49] - _ = x[HTT-50] - _ = x[HWA-51] - _ = x[HYPERVISOR-52] - _ = x[IBPB-53] - _ = x[IBS-54] - _ = x[IBSBRNTRGT-55] - _ = x[IBSFETCHSAM-56] - _ = x[IBSFFV-57] - _ = x[IBSOPCNT-58] - _ = x[IBSOPCNTEXT-59] - _ = x[IBSOPSAM-60] - _ = x[IBSRDWROPCNT-61] - _ = x[IBSRIPINVALIDCHK-62] - _ = x[IBS_PREVENTHOST-63] - _ = x[INT_WBINVD-64] - _ = x[INVLPGB-65] - _ = x[LAHF-66] - _ = x[LAM-67] - _ = x[LBRVIRT-68] - _ = x[LZCNT-69] - _ = x[MCAOVERFLOW-70] - _ = x[MCOMMIT-71] - _ = x[MMX-72] - _ = x[MMXEXT-73] - _ = x[MOVBE-74] - _ = x[MOVDIR64B-75] - _ = x[MOVDIRI-76] - _ = x[MOVSB_ZL-77] - _ = x[MPX-78] - _ = x[MSRIRC-79] - _ = x[MSR_PAGEFLUSH-80] - _ = x[NRIPS-81] - _ = x[NX-82] - _ = x[OSXSAVE-83] - _ = x[PCONFIG-84] - _ = x[POPCNT-85] - _ = x[RDPRU-86] - _ = x[RDRAND-87] - _ = x[RDSEED-88] - _ = x[RDTSCP-89] - _ = x[RTM-90] - _ = x[RTM_ALWAYS_ABORT-91] - _ = x[SCE-92] - _ = x[SERIALIZE-93] - _ = x[SEV-94] - _ = x[SEV_64BIT-95] - _ = x[SEV_ALTERNATIVE-96] - _ = x[SEV_DEBUGSWAP-97] - _ = x[SEV_ES-98] - _ = x[SEV_RESTRICTED-99] - _ = x[SEV_SNP-100] - _ = x[SGX-101] - _ = x[SGXLC-102] - _ = x[SHA-103] - _ = x[SME-104] - _ = x[SME_COHERENT-105] - _ = x[SSE-106] - _ = x[SSE2-107] - _ = x[SSE3-108] - _ = x[SSE4-109] - _ = x[SSE42-110] - _ = x[SSE4A-111] - _ = x[SSSE3-112] - _ = x[STIBP-113] - _ = x[STOSB_SHORT-114] - _ = x[SUCCOR-115] - _ = x[SVM-116] - _ = x[SVMDA-117] - _ = x[SVMFBASID-118] - _ = x[SVML-119] - _ = x[SVMNP-120] - _ = x[SVMPF-121] - _ = x[SVMPFT-122] - _ = x[TBM-123] - _ = x[TME-124] - _ = x[TSCRATEMSR-125] - _ = x[TSXLDTRK-126] - _ = x[VAES-127] - _ = x[VMCBCLEAN-128] - _ = x[VMPL-129] - _ = x[VMSA_REGPROT-130] - _ = x[VMX-131] - _ = x[VPCLMULQDQ-132] - _ = x[VTE-133] - _ = x[WAITPKG-134] - _ = x[WBNOINVD-135] - _ = x[X87-136] - _ = x[XGETBV1-137] - _ = x[XOP-138] - _ = x[XSAVE-139] - _ = x[XSAVEC-140] - _ = x[XSAVEOPT-141] - _ = x[XSAVES-142] - _ = x[AESARM-143] - _ = x[ARMCPUID-144] - _ = x[ASIMD-145] - _ = x[ASIMDDP-146] - _ = x[ASIMDHP-147] - _ = x[ASIMDRDM-148] - _ = x[ATOMICS-149] - _ = x[CRC32-150] - _ = x[DCPOP-151] - _ = x[EVTSTRM-152] - _ = x[FCMA-153] - _ = x[FP-154] - _ = x[FPHP-155] - _ = x[GPA-156] - _ = x[JSCVT-157] - _ = x[LRCPC-158] - _ = x[PMULL-159] - _ = x[SHA1-160] - _ = x[SHA2-161] - _ = x[SHA3-162] - _ = x[SHA512-163] - _ = x[SM3-164] - _ = x[SM4-165] - _ = x[SVE-166] - _ = x[lastID-167] + _ = x[AMXFP16-6] + _ = x[AMXINT8-7] + _ = x[AMXTILE-8] + _ = x[AVX-9] + _ = x[AVX2-10] + _ = x[AVX512BF16-11] + _ = x[AVX512BITALG-12] + _ = x[AVX512BW-13] + _ = x[AVX512CD-14] + _ = x[AVX512DQ-15] + _ = x[AVX512ER-16] + _ = x[AVX512F-17] + _ = x[AVX512FP16-18] + _ = x[AVX512IFMA-19] + _ = x[AVX512PF-20] + _ = x[AVX512VBMI-21] + _ = x[AVX512VBMI2-22] + _ = x[AVX512VL-23] + _ = x[AVX512VNNI-24] + _ = x[AVX512VP2INTERSECT-25] + _ = x[AVX512VPOPCNTDQ-26] + _ = x[AVXIFMA-27] + _ = x[AVXNECONVERT-28] + _ = x[AVXSLOW-29] + _ = x[AVXVNNI-30] + _ = x[AVXVNNIINT8-31] + _ = x[BMI1-32] + _ = x[BMI2-33] + _ = x[CETIBT-34] + _ = x[CETSS-35] + _ = x[CLDEMOTE-36] + _ = x[CLMUL-37] + _ = x[CLZERO-38] + _ = x[CMOV-39] + _ = x[CMPCCXADD-40] + _ = x[CMPSB_SCADBS_SHORT-41] + _ = x[CMPXCHG8-42] + _ = x[CPBOOST-43] + _ = x[CPPC-44] + _ = x[CX16-45] + _ = x[EFER_LMSLE_UNS-46] + _ = x[ENQCMD-47] + _ = x[ERMS-48] + _ = x[F16C-49] + _ = x[FLUSH_L1D-50] + _ = x[FMA3-51] + _ = x[FMA4-52] + _ = x[FP128-53] + _ = x[FP256-54] + _ = x[FSRM-55] + _ = x[FXSR-56] + _ = x[FXSROPT-57] + _ = x[GFNI-58] + _ = x[HLE-59] + _ = x[HRESET-60] + _ = x[HTT-61] + _ = x[HWA-62] + _ = x[HYBRID_CPU-63] + _ = x[HYPERVISOR-64] + _ = x[IA32_ARCH_CAP-65] + _ = x[IA32_CORE_CAP-66] + _ = x[IBPB-67] + _ = x[IBRS-68] + _ = x[IBRS_PREFERRED-69] + _ = x[IBRS_PROVIDES_SMP-70] + _ = x[IBS-71] + _ = x[IBSBRNTRGT-72] + _ = x[IBSFETCHSAM-73] + _ = x[IBSFFV-74] + _ = x[IBSOPCNT-75] + _ = x[IBSOPCNTEXT-76] + _ = x[IBSOPSAM-77] + _ = x[IBSRDWROPCNT-78] + _ = x[IBSRIPINVALIDCHK-79] + _ = x[IBS_FETCH_CTLX-80] + _ = x[IBS_OPDATA4-81] + _ = x[IBS_OPFUSE-82] + _ = x[IBS_PREVENTHOST-83] + _ = x[IBS_ZEN4-84] + _ = x[INT_WBINVD-85] + _ = x[INVLPGB-86] + _ = x[LAHF-87] + _ = x[LAM-88] + _ = x[LBRVIRT-89] + _ = x[LZCNT-90] + _ = x[MCAOVERFLOW-91] + _ = x[MCDT_NO-92] + _ = x[MCOMMIT-93] + _ = x[MD_CLEAR-94] + _ = x[MMX-95] + _ = x[MMXEXT-96] + _ = x[MOVBE-97] + _ = x[MOVDIR64B-98] + _ = x[MOVDIRI-99] + _ = x[MOVSB_ZL-100] + _ = x[MOVU-101] + _ = x[MPX-102] + _ = x[MSRIRC-103] + _ = x[MSR_PAGEFLUSH-104] + _ = x[NRIPS-105] + _ = x[NX-106] + _ = x[OSXSAVE-107] + _ = x[PCONFIG-108] + _ = x[POPCNT-109] + _ = x[PPIN-110] + _ = x[PREFETCHI-111] + _ = x[PSFD-112] + _ = x[RDPRU-113] + _ = x[RDRAND-114] + _ = x[RDSEED-115] + _ = x[RDTSCP-116] + _ = x[RTM-117] + _ = x[RTM_ALWAYS_ABORT-118] + _ = x[SERIALIZE-119] + _ = x[SEV-120] + _ = x[SEV_64BIT-121] + _ = x[SEV_ALTERNATIVE-122] + _ = x[SEV_DEBUGSWAP-123] + _ = x[SEV_ES-124] + _ = x[SEV_RESTRICTED-125] + _ = x[SEV_SNP-126] + _ = x[SGX-127] + _ = x[SGXLC-128] + _ = x[SHA-129] + _ = x[SME-130] + _ = x[SME_COHERENT-131] + _ = x[SPEC_CTRL_SSBD-132] + _ = x[SRBDS_CTRL-133] + _ = x[SSE-134] + _ = x[SSE2-135] + _ = x[SSE3-136] + _ = x[SSE4-137] + _ = x[SSE42-138] + _ = x[SSE4A-139] + _ = x[SSSE3-140] + _ = x[STIBP-141] + _ = x[STIBP_ALWAYSON-142] + _ = x[STOSB_SHORT-143] + _ = x[SUCCOR-144] + _ = x[SVM-145] + _ = x[SVMDA-146] + _ = x[SVMFBASID-147] + _ = x[SVML-148] + _ = x[SVMNP-149] + _ = x[SVMPF-150] + _ = x[SVMPFT-151] + _ = x[SYSCALL-152] + _ = x[SYSEE-153] + _ = x[TBM-154] + _ = x[TLB_FLUSH_NESTED-155] + _ = x[TME-156] + _ = x[TOPEXT-157] + _ = x[TSCRATEMSR-158] + _ = x[TSXLDTRK-159] + _ = x[VAES-160] + _ = x[VMCBCLEAN-161] + _ = x[VMPL-162] + _ = x[VMSA_REGPROT-163] + _ = x[VMX-164] + _ = x[VPCLMULQDQ-165] + _ = x[VTE-166] + _ = x[WAITPKG-167] + _ = x[WBNOINVD-168] + _ = x[X87-169] + _ = x[XGETBV1-170] + _ = x[XOP-171] + _ = x[XSAVE-172] + _ = x[XSAVEC-173] + _ = x[XSAVEOPT-174] + _ = x[XSAVES-175] + _ = x[AESARM-176] + _ = x[ARMCPUID-177] + _ = x[ASIMD-178] + _ = x[ASIMDDP-179] + _ = x[ASIMDHP-180] + _ = x[ASIMDRDM-181] + _ = x[ATOMICS-182] + _ = x[CRC32-183] + _ = x[DCPOP-184] + _ = x[EVTSTRM-185] + _ = x[FCMA-186] + _ = x[FP-187] + _ = x[FPHP-188] + _ = x[GPA-189] + _ = x[JSCVT-190] + _ = x[LRCPC-191] + _ = x[PMULL-192] + _ = x[SHA1-193] + _ = x[SHA2-194] + _ = x[SHA3-195] + _ = x[SHA512-196] + _ = x[SM3-197] + _ = x[SM4-198] + _ = x[SVE-199] + _ = x[lastID-200] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTTBMTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4INT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 668, 677, 680, 689, 704, 717, 723, 737, 744, 747, 752, 755, 758, 770, 773, 777, 781, 785, 790, 795, 800, 805, 816, 822, 825, 830, 839, 843, 848, 853, 859, 862, 865, 875, 883, 887, 896, 900, 912, 915, 925, 928, 935, 943, 946, 953, 956, 961, 967, 975, 981, 987, 995, 1000, 1007, 1014, 1022, 1029, 1034, 1039, 1046, 1050, 1052, 1056, 1059, 1064, 1069, 1074, 1078, 1082, 1086, 1092, 1095, 1098, 1101, 1107} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 278, 282, 288, 293, 301, 306, 312, 316, 325, 343, 351, 358, 362, 366, 380, 386, 390, 394, 403, 407, 411, 416, 421, 425, 429, 436, 440, 443, 449, 452, 455, 465, 475, 488, 501, 505, 509, 523, 540, 543, 553, 564, 570, 578, 589, 597, 609, 625, 639, 650, 660, 675, 683, 693, 700, 704, 707, 714, 719, 730, 737, 744, 752, 755, 761, 766, 775, 782, 790, 794, 797, 803, 816, 821, 823, 830, 837, 843, 847, 856, 860, 865, 871, 877, 883, 886, 902, 911, 914, 923, 938, 951, 957, 971, 978, 981, 986, 989, 992, 1004, 1018, 1028, 1031, 1035, 1039, 1043, 1048, 1053, 1058, 1063, 1077, 1088, 1094, 1097, 1102, 1111, 1115, 1120, 1125, 1131, 1138, 1143, 1146, 1162, 1165, 1171, 1181, 1189, 1193, 1202, 1206, 1218, 1221, 1231, 1234, 1241, 1249, 1252, 1259, 1262, 1267, 1273, 1281, 1287, 1293, 1301, 1306, 1313, 1320, 1328, 1335, 1340, 1345, 1352, 1356, 1358, 1362, 1365, 1370, 1375, 1380, 1384, 1388, 1392, 1398, 1401, 1404, 1407, 1413} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go index d91d0210947..84b1acd2153 100644 --- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -83,7 +83,7 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) { c.Model = sysctlGetInt(0, "machdep.cpu.model") c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize") c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize") - c.Cache.L1D = sysctlGetInt64(-1, "hw.l1icachesize") + c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize") c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize") c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize") diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore index 12ecd6ae9b7..8ae0384ebce 100644 --- a/vendor/github.com/minio/minio-go/v7/.gitignore +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -2,4 +2,5 @@ *.test validator golangci-lint -functional_tests \ No newline at end of file +functional_tests +.idea \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index b17f3146b8b..e31e4cf929e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -23,8 +23,6 @@ import ( "fmt" "io" "net/http" - "net/url" - "strconv" "sync" "github.com/minio/minio-go/v7/pkg/s3utils" @@ -654,19 +652,11 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o return nil, ObjectInfo{}, nil, err } - urlValues := make(url.Values) - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - if opts.PartNumber > 0 { - urlValues.Set("partNumber", strconv.Itoa(opts.PartNumber)) - } - // Execute GET on objectName. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, objectName: objectName, - queryValues: urlValues, + queryValues: opts.toQueryValues(), customHeader: opts.Header(), contentSHA256Hex: emptySHA256Hex, }) diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go index 0082c1fa762..bb86a59941a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -20,6 +20,8 @@ package minio import ( "fmt" "net/http" + "net/url" + "strconv" "time" "github.com/minio/minio-go/v7/pkg/encrypt" @@ -36,6 +38,7 @@ type AdvancedGetOptions struct { // during GET requests. type GetObjectOptions struct { headers map[string]string + reqParams url.Values ServerSideEncryption encrypt.ServerSide VersionID string PartNumber int @@ -83,6 +86,34 @@ func (o *GetObjectOptions) Set(key, value string) { o.headers[http.CanonicalHeaderKey(key)] = value } +// SetReqParam - set request query string parameter +// supported key: see supportedQueryValues. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) SetReqParam(key, value string) { + if !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Set(key, value) +} + +// AddReqParam - add request query string parameter +// supported key: see supportedQueryValues. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) AddReqParam(key, value string) { + if !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Add(key, value) +} + // SetMatchETag - set match etag. func (o *GetObjectOptions) SetMatchETag(etag string) error { if etag == "" { @@ -149,3 +180,24 @@ func (o *GetObjectOptions) SetRange(start, end int64) error { } return nil } + +// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters. +func (o *GetObjectOptions) toQueryValues() url.Values { + urlValues := make(url.Values) + if o.VersionID != "" { + urlValues.Set("versionId", o.VersionID) + } + if o.PartNumber > 0 { + urlValues.Set("partNumber", strconv.Itoa(o.PartNumber)) + } + + if o.reqParams != nil { + for key, values := range o.reqParams { + for _, value := range values { + urlValues.Add(key, value) + } + } + } + + return urlValues +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index cdc7d3d3885..55b3f38e678 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -500,8 +500,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - md5Hash := c.md5Hasher() - defer md5Hash.Close() // Total data read and written to server. should be equal to 'size' at the end of the call. var totalUploadedSize int64 @@ -569,9 +567,10 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam var md5Base64 string if opts.SendContentMd5 { - md5Hash.Reset() + md5Hash := c.md5Hasher() md5Hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) + md5Hash.Close() } defer wg.Done() @@ -590,6 +589,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { errCh <- uerr + return } // Save successfully uploaded part metadata. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 2376ee87466..b29df17d498 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -93,6 +93,28 @@ type PutObjectOptions struct { // This can be used for faster uploads on non-seekable or slow-to-seek input. ConcurrentStreamParts bool Internal AdvancedPutOptions + + customHeaders http.Header +} + +// SetMatchETag if etag matches while PUT MinIO returns an error +// this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETag(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + opts.customHeaders.Set("If-Match", "\""+etag+"\"") +} + +// SetMatchETagExcept if etag does not match while PUT MinIO returns an +// error this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") } // getNumThreads - gets the number of threads to be used in the multipart @@ -187,6 +209,12 @@ func (opts PutObjectOptions) Header() (header http.Header) { header.Set("x-amz-meta-"+k, v) } } + + // set any other additional custom headers. + for k, v := range opts.customHeaders { + header[k] = v + } + return } diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go index 899bc6b730b..849471e33bb 100644 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -59,6 +59,7 @@ type SnowballObject struct { Size int64 // Modtime to apply to the object. + // If Modtime is the zero value current time will be used. ModTime time.Time // Content of the object. @@ -172,6 +173,10 @@ objectLoop: ModTime: obj.ModTime, Format: tar.FormatPAX, } + if header.ModTime.IsZero() { + header.ModTime = time.Now().UTC() + } + if err := t.WriteHeader(&header); err != nil { closeObj() return err diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 99a03df958f..7ec7a620b0d 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -118,7 +118,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.47" + libraryVersion = "v7.0.49" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index a0ad090468b..dee0a8cbb03 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -140,6 +140,9 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { if err != nil { return Value{}, err } + if req.Form == nil { + req.Form = url.Values{} + } req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) resp, err := i.Client.Do(req) diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go index 589c0e5498f..8bf2e5baab3 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -48,6 +48,7 @@ var awsS3EndpointMap = map[string]string{ "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com", + "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com", } // getS3Endpoint get Amazon S3 endpoint based on the bucket location. diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 7ce8ec5caf7..9389a7fafb1 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -511,6 +511,23 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") } +// supportedQueryValues is a list of query strings that can be passed in when using GetObject. +var supportedQueryValues = map[string]bool{ + "partNumber": true, + "versionId": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "response-content-language": true, + "response-content-type": true, + "response-expires": true, +} + +// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized. +func isStandardQueryValue(qsKey string) bool { + return supportedQueryValues[qsKey] +} + var ( md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index aeb73f81a14..5577c0f939a 100644 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -50,7 +50,7 @@ func (ih InvalidHashPrefixError) Error() string { type InvalidCostError int func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) } const ( @@ -82,11 +82,20 @@ type hashed struct { minor byte } +// ErrPasswordTooLong is returned when the password passed to +// GenerateFromPassword is too long (i.e. > 72 bytes). +var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes") + // GenerateFromPassword returns the bcrypt hash of the password at the given // cost. If the cost given is less than MinCost, the cost will be set to // DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, // to compare the returned hashed password with its cleartext version. +// GenerateFromPassword does not accept passwords longer than 72 bytes, which +// is the longest password bcrypt will operate on. func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + if len(password) > 72 { + return nil, ErrPasswordTooLong + } p, err := newFromPassword(password, cost) if err != nil { return nil, err diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go index 7499e3fb69d..05de9cc2cdc 100644 --- a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -14,6 +14,7 @@ package rc2 import ( "crypto/cipher" "encoding/binary" + "math/bits" ) // The rc2 block size in bytes @@ -80,10 +81,6 @@ func expandKey(key []byte, t1 int) [64]uint16 { return k } -func rotl16(x uint16, b uint) uint16 { - return (x >> (16 - b)) | (x << b) -} - func (c *rc2Cipher) Encrypt(dst, src []byte) { r0 := binary.LittleEndian.Uint16(src[0:]) @@ -96,22 +93,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 16 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -124,22 +121,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 40 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -152,22 +149,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 60 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -188,22 +185,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 44 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- } @@ -215,22 +212,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 20 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- @@ -243,22 +240,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 0 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index 750ac52f2a5..b7dbd186957 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -18,7 +18,7 @@ type inflow struct { unsent int32 } -// set sets the initial window. +// init sets the initial window. func (f *inflow) init(n int32) { f.avail = n } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 184ac45feb7..c1f6b90dc32 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -662,6 +662,15 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { // It is the caller's responsibility not to violate the maximum frame size // and to not call other Write methods concurrently. func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil { + return err + } + return f.endWrite() +} + +// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer. +// The caller should call endWrite to flush the frame to the underlying writer. +func (f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } @@ -691,7 +700,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by } f.wbuf = append(f.wbuf, data...) f.wbuf = append(f.wbuf, pad...) - return f.endWrite() + return nil } // A SettingsFrame conveys configuration parameters that affect how diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index ebdfbee964a..7a1d976696a 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -211,7 +211,7 @@ func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { return dt.ents[dt.len()-(int(i)-staticTable.len())], true } -// Decode decodes an entire block. +// DecodeFull decodes an entire block. // // TODO: remove this method and make it incremental later? This is // easier for debugging now. @@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { var hf HeaderField wantStr := d.emitEnabled || it.indexed() + var undecodedName undecodedString if nameIdx > 0 { ihf, ok := d.at(nameIdx) if !ok { @@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { } hf.Name = ihf.Name } else { - hf.Name, buf, err = d.readString(buf, wantStr) + undecodedName, buf, err = d.readString(buf) if err != nil { return err } } - hf.Value, buf, err = d.readString(buf, wantStr) + undecodedValue, buf, err := d.readString(buf) if err != nil { return err } + if wantStr { + if nameIdx <= 0 { + hf.Name, err = d.decodeString(undecodedName) + if err != nil { + return err + } + } + hf.Value, err = d.decodeString(undecodedValue) + if err != nil { + return err + } + } d.buf = buf if it.indexed() { d.dynTab.add(hf) @@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { return 0, origP, errNeedMore } -// readString decodes an hpack string from p. +// readString reads an hpack string from p. // -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { +// It returns a reference to the encoded string data to permit deferring decode costs +// until after the caller verifies all data is present. +func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) { if len(p) == 0 { - return "", p, errNeedMore + return u, p, errNeedMore } isHuff := p[0]&128 != 0 strLen, p, err := readVarInt(7, p) if err != nil { - return "", p, err + return u, p, err } if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength + // Returning an error here means Huffman decoding errors + // for non-indexed strings past the maximum string length + // are ignored, but the server is returning an error anyway + // and because the string is not indexed the error will not + // affect the decoding state. + return u, nil, ErrStringLength } if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil + return u, p, errNeedMore } + u.isHuff = isHuff + u.b = p[:strLen] + return u, p[strLen:], nil +} - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } +type undecodedString struct { + isHuff bool + b []byte +} + +func (d *Decoder) decodeString(u undecodedString) (string, error) { + if !u.isHuff { + return string(u.b), nil + } + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + var s string + err := huffmanDecode(buf, d.maxStrLen, u.b) + if err == nil { s = buf.String() - buf.Reset() // be nice to GC } - return s, p[strLen:], nil + buf.Reset() // be nice to GC + bufPool.Put(buf) + return s, err } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b624dc0a705..8cb14f3c97f 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -843,8 +843,13 @@ type frameWriteResult struct { // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + var err error + if wd == nil { + err = wr.write.writeFrame(sc) + } else { + err = sc.framer.endWrite() + } sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err} } @@ -1251,9 +1256,16 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { sc.writingFrameAsync = false err := wr.write.writeFrame(sc) sc.wroteFrame(frameWriteResult{wr: wr, err: err}) + } else if wd, ok := wr.write.(*writeData); ok { + // Encode the frame in the serve goroutine, to ensure we don't have + // any lingering asynchronous references to data passed to Write. + // See https://go.dev/issue/58446. + sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil) + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr, wd) } else { sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) + go sc.writeFrameAsync(wr, nil) } } @@ -2192,7 +2204,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r tlsState = sc.tlsState } - needsContinue := rp.header.Get("Expect") == "100-continue" + needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") if needsContinue { rp.header.Del("Expect") } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b43ec10cfed..05ba23d3d98 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1569,7 +1569,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { close(cs.donec) } -// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams. +// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go index 1f422e71dc3..846f0e1f9cd 100644 --- a/vendor/golang.org/x/net/ipv6/dgramopt.go +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -245,7 +245,7 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { return true, offset, nil } -// SetChecksum enables the kernel checksum processing. If on is ture, +// SetChecksum enables the kernel checksum processing. If on is true, // the offset should be an offset in bytes into the data of where the // checksum field is located. func (c *dgramOpt) SetChecksum(on bool, offset int) error { diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go index d5dfbab24fd..f8b779ea27e 100644 --- a/vendor/golang.org/x/net/netutil/listen.go +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -29,7 +29,7 @@ type limitListener struct { } // acquire acquires the limiting semaphore. Returns true if successfully -// accquired, false if the listener is closed and the semaphore is not +// acquired, false if the listener is closed and the semaphore is not // acquired. func (l *limitListener) acquire() bool { select { diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go index 9bf4286c794..d6c71101e4d 100644 --- a/vendor/golang.org/x/net/trace/histogram.go +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -32,7 +32,7 @@ type histogram struct { valueCount int64 // number of values recorded for single value } -// AddMeasurement records a value measurement observation to the histogram. +// addMeasurement records a value measurement observation to the histogram. func (h *histogram) addMeasurement(value int64) { // TODO: assert invariant h.sum += value diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index a4605e6d12e..6cc73109f59 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (386 || amd64 || amd64p32) && gccgo // +build 386 amd64 amd64p32 // +build gccgo diff --git a/vendor/golang.org/x/sys/cpu/endian_big.go b/vendor/golang.org/x/sys/cpu/endian_big.go new file mode 100644 index 00000000000..93ce03a3460 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/endian_big.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 +// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 + +package cpu + +// IsBigEndian records whether the GOARCH's byte order is big endian. +const IsBigEndian = true diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go new file mode 100644 index 00000000000..fe545966b61 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/endian_little.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh + +package cpu + +// IsBigEndian records whether the GOARCH's byte order is big endian. +const IsBigEndian = false diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index c4fce0e7003..f98a1c542f0 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo,!hurd -// +build !aix,!hurd +//go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 1f63382182f..192b071b3d0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -230,6 +230,7 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } +func PtraceDenyAttach() (err error) { return ptrace(PT_DENY_ATTACH, 0, 0, 0) } //sysnb pipe(p *[2]int32) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b11ede89a96..6a91d471d09 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) { return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint32(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 9ed8eec6c28..48110a0abb9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) { return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint64(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index f8ac9824790..52f1d4b75a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint32(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index 8e932036ec3..5537ee4f2ed 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint64(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index cbe12227896..164abd5d215 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. + Len: uint64(countin), + } err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index d839962e663..5443dddd48d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1800,6 +1800,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) +//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) @@ -1999,7 +2000,7 @@ func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { // offs2lohi splits offs into its low and high order bits. func offs2lohi(offs int64) (lo, hi uintptr) { const longBits = SizeofLong * 8 - return uintptr(offs), uintptr(uint64(offs) >> longBits) + return uintptr(offs), uintptr(uint64(offs) >> (longBits - 1) >> 1) // two shifts to avoid false positive in vet } func Readv(fd int, iovs [][]byte) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index a386f8897df..00f0aa37588 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -578,7 +578,7 @@ func Lutimes(path string, tv []Timeval) error { return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } -// emptyIovec reports whether there are no bytes in the slice of Iovec. +// emptyIovecs reports whether there are no bytes in the slice of Iovec. func emptyIovecs(iov []Iovec) bool { for i := range iov { if iov[i].Len > 0 { diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 3d893040553..616b1b28485 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -9,7 +9,7 @@ package unix import "time" -// TimespecToNSec returns the time stored in ts as nanoseconds. +// TimespecToNsec returns the time stored in ts as nanoseconds. func TimespecToNsec(ts Timespec) int64 { return ts.Nano() } // NsecToTimespec converts a number of nanoseconds into a Timespec. diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 663b3779de2..f5f8e9f3665 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -36,9 +36,14 @@ func xattrnamespace(fullattr string) (ns int, attr string, err error) { func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { if len(dest) > idx { return unsafe.Pointer(&dest[idx]) - } else { - return unsafe.Pointer(_zero) } + if dest != nil { + // extattr_get_file and extattr_list_file treat NULL differently from + // a non-NULL pointer of length zero. Preserve the property of nilness, + // even if we can't use dest directly. + return unsafe.Pointer(&_zero) + } + return nil } // FreeBSD and NetBSD implement their own syscalls to handle extended attributes diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 785d693eb32..e174685adbd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -457,7 +457,6 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -563,6 +562,7 @@ const ( BUS_USB = 0x3 BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 + CAN_BUS_OFF_THRESHOLD = 0x100 CAN_CTRLMODE_3_SAMPLES = 0x4 CAN_CTRLMODE_BERR_REPORTING = 0x10 CAN_CTRLMODE_CC_LEN8_DLC = 0x100 @@ -577,9 +577,12 @@ const ( CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff + CAN_ERROR_PASSIVE_THRESHOLD = 0x80 + CAN_ERROR_WARNING_THRESHOLD = 0x60 CAN_ERR_ACK = 0x20 CAN_ERR_BUSERROR = 0x80 CAN_ERR_BUSOFF = 0x40 + CAN_ERR_CNT = 0x200 CAN_ERR_CRTL = 0x4 CAN_ERR_CRTL_ACTIVE = 0x40 CAN_ERR_CRTL_RX_OVERFLOW = 0x1 @@ -820,9 +823,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2022-02-22)" + DM_VERSION_EXTRA = "-ioctl (2022-07-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2e + DM_VERSION_MINOR = 0x2f DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -1049,6 +1052,7 @@ const ( ETH_P_CAIF = 0xf7 ETH_P_CAN = 0xc ETH_P_CANFD = 0xd + ETH_P_CANXL = 0xe ETH_P_CFM = 0x8902 ETH_P_CONTROL = 0x16 ETH_P_CUST = 0x6006 @@ -1060,6 +1064,7 @@ const ( ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b ETH_P_DSA_8021Q = 0xdadb + ETH_P_DSA_A5PSW = 0xe001 ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1194,8 +1199,10 @@ const ( FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORE = 0x400 FAN_MARK_IGNORED_MASK = 0x20 FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 @@ -1253,6 +1260,7 @@ const ( FSCRYPT_MODE_AES_128_CBC = 0x5 FSCRYPT_MODE_AES_128_CTS = 0x6 FSCRYPT_MODE_AES_256_CTS = 0x4 + FSCRYPT_MODE_AES_256_HCTR2 = 0xa FSCRYPT_MODE_AES_256_XTS = 0x1 FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2 FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3 @@ -1430,6 +1438,7 @@ const ( IFF_NOARP = 0x80 IFF_NOFILTER = 0x1000 IFF_NOTRAILERS = 0x20 + IFF_NO_CARRIER = 0x40 IFF_NO_PI = 0x1000 IFF_ONE_QUEUE = 0x2000 IFF_PERSIST = 0x800 @@ -1805,6 +1814,7 @@ const ( MADV_DONTDUMP = 0x10 MADV_DONTFORK = 0xa MADV_DONTNEED = 0x4 + MADV_DONTNEED_LOCKED = 0x18 MADV_FREE = 0x8 MADV_HUGEPAGE = 0xe MADV_HWPOISON = 0x64 @@ -1846,7 +1856,7 @@ const ( MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16GB = 0x88000000 MFD_HUGE_16MB = 0x60000000 MFD_HUGE_1GB = 0x78000000 MFD_HUGE_1MB = 0x50000000 @@ -2212,6 +2222,11 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BR_ARM64_DEBUG_DATA = 0x7 + PERF_BR_ARM64_DEBUG_EXIT = 0x5 + PERF_BR_ARM64_DEBUG_HALT = 0x4 + PERF_BR_ARM64_DEBUG_INST = 0x6 + PERF_BR_ARM64_FIQ = 0x3 PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 @@ -2232,6 +2247,8 @@ const ( PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_SHIFT = 0x18 PERF_MEM_LVLNUM_ANY_CACHE = 0xb + PERF_MEM_LVLNUM_CXL = 0x9 + PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 PERF_MEM_LVLNUM_L3 = 0x3 @@ -2265,6 +2282,7 @@ const ( PERF_MEM_REMOTE_REMOTE = 0x1 PERF_MEM_REMOTE_SHIFT = 0x25 PERF_MEM_SNOOPX_FWD = 0x1 + PERF_MEM_SNOOPX_PEER = 0x2 PERF_MEM_SNOOPX_SHIFT = 0x26 PERF_MEM_SNOOP_HIT = 0x4 PERF_MEM_SNOOP_HITM = 0x10 @@ -2301,7 +2319,6 @@ const ( PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 PIPEFS_MAGIC = 0x50495045 - PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e PRIO_PGRP = 0x1 @@ -2999,6 +3016,7 @@ const ( STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 STATX_CTIME = 0x80 + STATX_DIOALIGN = 0x2000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3392,9 +3410,7 @@ const ( XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 - Z3FOLD_MAGIC = 0x33 ZONEFS_MAGIC = 0x5a4f4653 - ZSMALLOC_MAGIC = 0x58295829 _HIDIOCGRAWNAME_LEN = 0x80 _HIDIOCGRAWPHYS_LEN = 0x40 _HIDIOCGRAWUNIQ_LEN = 0x40 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 36c0dfc7c4c..a46df0f1e57 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -133,6 +133,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc03c4d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 4ff942703b7..6cd4a3ea9d3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -133,6 +133,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3eaa0fb78e3..c7ebee24df3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d7995bdc3a2..9d5352c3e45 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -134,6 +134,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 928e24c2053..f26a164f4aa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -132,6 +132,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 179bffb474b..890bc3c9b70 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 1fba17bd75c..549f26ac646 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index b77dde31537..e0365e32c17 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 78c6c751bfa..fdccce15ca2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1c0d31f0b4c..b2205c83faa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 959dd9bb8fc..81aa5ad0f69 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5a873cdbc9d..76807a1fd4f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e336d141e1f..d4a5ab9e4e0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 390c01d92a5..66e65db9519 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 98a6e5f11f5..f619252691e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -136,6 +136,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 293cf36804e..36ea3a55b72 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -537,6 +537,17 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockAdjtime(clockid int32, buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_CLOCK_ADJTIME, uintptr(clockid), uintptr(unsafe.Pointer(buf)), 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGetres(clockid int32, res *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index ff6881167d9..7d9fc8f1c91 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -29,6 +29,41 @@ type Itimerval struct { Value Timeval } +const ( + ADJ_OFFSET = 0x1 + ADJ_FREQUENCY = 0x2 + ADJ_MAXERROR = 0x4 + ADJ_ESTERROR = 0x8 + ADJ_STATUS = 0x10 + ADJ_TIMECONST = 0x20 + ADJ_TAI = 0x80 + ADJ_SETOFFSET = 0x100 + ADJ_MICRO = 0x1000 + ADJ_NANO = 0x2000 + ADJ_TICK = 0x4000 + ADJ_OFFSET_SINGLESHOT = 0x8001 + ADJ_OFFSET_SS_READ = 0xa001 +) + +const ( + STA_PLL = 0x1 + STA_PPSFREQ = 0x2 + STA_PPSTIME = 0x4 + STA_FLL = 0x8 + STA_INS = 0x10 + STA_DEL = 0x20 + STA_UNSYNC = 0x40 + STA_FREQHOLD = 0x80 + STA_PPSSIGNAL = 0x100 + STA_PPSJITTER = 0x200 + STA_PPSWANDER = 0x400 + STA_PPSERROR = 0x800 + STA_CLOCKERR = 0x1000 + STA_NANO = 0x2000 + STA_MODE = 0x4000 + STA_CLK = 0x8000 +) + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -53,29 +88,30 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - _ uint64 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + _ [12]uint64 } type Fsid struct { @@ -1099,7 +1135,8 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 0xf PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1118,7 +1155,8 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 - PERF_SAMPLE_BRANCH_MAX = 0x40000 + PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 + PERF_SAMPLE_BRANCH_MAX = 0x80000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1132,7 +1170,10 @@ const ( PERF_BR_COND_RET = 0xa PERF_BR_ERET = 0xb PERF_BR_IRQ = 0xc - PERF_BR_MAX = 0xd + PERF_BR_SERROR = 0xd + PERF_BR_NO_TX = 0xe + PERF_BR_EXTEND_ABI = 0xf + PERF_BR_MAX = 0x10 PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1151,7 +1192,8 @@ const ( PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_FORMAT_MAX = 0x10 + PERF_FORMAT_LOST = 0x10 + PERF_FORMAT_MAX = 0x20 PERF_IOC_FLAG_GROUP = 0x1 PERF_RECORD_MMAP = 0x1 PERF_RECORD_LOST = 0x2 @@ -2979,7 +3021,16 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x51 + DEVLINK_CMD_RATE_GET = 0x4a + DEVLINK_CMD_RATE_SET = 0x4b + DEVLINK_CMD_RATE_NEW = 0x4c + DEVLINK_CMD_RATE_DEL = 0x4d + DEVLINK_CMD_LINECARD_GET = 0x4e + DEVLINK_CMD_LINECARD_SET = 0x4f + DEVLINK_CMD_LINECARD_NEW = 0x50 + DEVLINK_CMD_LINECARD_DEL = 0x51 + DEVLINK_CMD_SELFTESTS_GET = 0x52 + DEVLINK_CMD_MAX = 0x53 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3208,7 +3259,13 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xae + DEVLINK_ATTR_LINECARD_INDEX = 0xab + DEVLINK_ATTR_LINECARD_STATE = 0xac + DEVLINK_ATTR_LINECARD_TYPE = 0xad + DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae + DEVLINK_ATTR_NESTED_DEVLINK = 0xaf + DEVLINK_ATTR_SELFTESTS = 0xb0 + DEVLINK_ATTR_MAX = 0xb0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3317,7 +3374,8 @@ const ( LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 LWTUNNEL_ENCAP_IOAM6 = 0x9 - LWTUNNEL_ENCAP_MAX = 0x9 + LWTUNNEL_ENCAP_XFRM = 0xa + LWTUNNEL_ENCAP_MAX = 0xa MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3512,7 +3570,9 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET = 0x21 ETHTOOL_MSG_MODULE_GET = 0x22 ETHTOOL_MSG_MODULE_SET = 0x23 - ETHTOOL_MSG_USER_MAX = 0x23 + ETHTOOL_MSG_PSE_GET = 0x24 + ETHTOOL_MSG_PSE_SET = 0x25 + ETHTOOL_MSG_USER_MAX = 0x25 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3550,7 +3610,8 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 0x22 ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 - ETHTOOL_MSG_KERNEL_MAX = 0x24 + ETHTOOL_MSG_PSE_GET_REPLY = 0x25 + ETHTOOL_MSG_KERNEL_MAX = 0x25 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3609,7 +3670,8 @@ const ( ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 0x7 ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 0x8 ETHTOOL_A_LINKMODES_LANES = 0x9 - ETHTOOL_A_LINKMODES_MAX = 0x9 + ETHTOOL_A_LINKMODES_RATE_MATCHING = 0xa + ETHTOOL_A_LINKMODES_MAX = 0xa ETHTOOL_A_LINKSTATE_UNSPEC = 0x0 ETHTOOL_A_LINKSTATE_HEADER = 0x1 ETHTOOL_A_LINKSTATE_LINK = 0x2 @@ -4201,6 +4263,9 @@ const ( NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1 NL80211_AC_VI = 0x1 NL80211_AC_VO = 0x0 + NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 0x1 + NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 0x2 + NL80211_AP_SME_SA_QUERY_OFFLOAD = 0x1 NL80211_ATTR_4ADDR = 0x53 NL80211_ATTR_ACK = 0x5c NL80211_ATTR_ACK_SIGNAL = 0x107 @@ -4209,6 +4274,7 @@ const ( NL80211_ATTR_AIRTIME_WEIGHT = 0x112 NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 + NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4240,6 +4306,9 @@ const ( NL80211_ATTR_COALESCE_RULE_DELAY = 0x1 NL80211_ATTR_COALESCE_RULE_MAX = 0x3 NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3 + NL80211_ATTR_COLOR_CHANGE_COLOR = 0x130 + NL80211_ATTR_COLOR_CHANGE_COUNT = 0x12f + NL80211_ATTR_COLOR_CHANGE_ELEMS = 0x131 NL80211_ATTR_CONN_FAILED_REASON = 0x9b NL80211_ATTR_CONTROL_PORT = 0x44 NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66 @@ -4266,6 +4335,7 @@ const ( NL80211_ATTR_DEVICE_AP_SME = 0x8d NL80211_ATTR_DFS_CAC_TIME = 0x7 NL80211_ATTR_DFS_REGION = 0x92 + NL80211_ATTR_DISABLE_EHT = 0x137 NL80211_ATTR_DISABLE_HE = 0x12d NL80211_ATTR_DISABLE_HT = 0x93 NL80211_ATTR_DISABLE_VHT = 0xaf @@ -4273,6 +4343,8 @@ const ( NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 + NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104 @@ -4337,10 +4409,11 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x137 + NL80211_ATTR_MAX = 0x140 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 + NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde @@ -4350,6 +4423,8 @@ const ( NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0 NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c + NL80211_ATTR_MBSSID_CONFIG = 0x132 + NL80211_ATTR_MBSSID_ELEMS = 0x133 NL80211_ATTR_MCAST_RATE = 0x6b NL80211_ATTR_MDID = 0xb1 NL80211_ATTR_MEASUREMENT_DURATION = 0xeb @@ -4359,6 +4434,11 @@ const ( NL80211_ATTR_MESH_PEER_AID = 0xed NL80211_ATTR_MESH_SETUP = 0x70 NL80211_ATTR_MGMT_SUBTYPE = 0x29 + NL80211_ATTR_MLD_ADDR = 0x13a + NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_ID = 0x139 + NL80211_ATTR_MLO_LINKS = 0x138 + NL80211_ATTR_MLO_SUPPORT = 0x13b NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4371,6 +4451,7 @@ const ( NL80211_ATTR_NETNS_FD = 0xdb NL80211_ATTR_NOACK_MAP = 0x95 NL80211_ATTR_NSS = 0x106 + NL80211_ATTR_OBSS_COLOR_BITMAP = 0x12e NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c NL80211_ATTR_OPER_CLASS = 0xd6 NL80211_ATTR_OPMODE_NOTIF = 0xc2 @@ -4397,6 +4478,7 @@ const ( NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d NL80211_ATTR_QOS_MAP = 0xc7 + NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 NL80211_ATTR_REASON_CODE = 0x36 NL80211_ATTR_RECEIVE_MULTICAST = 0x121 @@ -4412,6 +4494,7 @@ const ( NL80211_ATTR_RESP_IE = 0x4e NL80211_ATTR_ROAM_SUPPORT = 0x83 NL80211_ATTR_RX_FRAME_TYPES = 0x64 + NL80211_ATTR_RX_HW_TIMESTAMP = 0x140 NL80211_ATTR_RXMGMT_FLAGS = 0xbc NL80211_ATTR_RX_SIGNAL_DBM = 0x97 NL80211_ATTR_S1G_CAPABILITY = 0x128 @@ -4484,6 +4567,7 @@ const ( NL80211_ATTR_TSID = 0xd2 NL80211_ATTR_TWT_RESPONDER = 0x116 NL80211_ATTR_TX_FRAME_TYPES = 0x63 + NL80211_ATTR_TX_HW_TIMESTAMP = 0x13f NL80211_ATTR_TX_NO_CCK_RATE = 0x87 NL80211_ATTR_TXQ_LIMIT = 0x10a NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b @@ -4557,6 +4641,10 @@ const ( NL80211_BAND_ATTR_RATES = 0x2 NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET = 0xa + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY = 0x9 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE = 0xb NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4 @@ -4564,6 +4652,8 @@ const ( NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 NL80211_BAND_IFTYPE_ATTR_MAX = 0xb + NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS = 0x7 + NL80211_BAND_LC = 0x5 NL80211_BAND_S1GHZ = 0x4 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 NL80211_BITRATE_ATTR_MAX = 0x2 @@ -4584,7 +4674,9 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x14 + NL80211_BSS_MAX = 0x16 + NL80211_BSS_MLD_ADDR = 0x16 + NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 NL80211_BSS_PARENT_BSSID = 0x12 NL80211_BSS_PARENT_TSF = 0x11 @@ -4612,6 +4704,7 @@ const ( NL80211_CHAN_WIDTH_20 = 0x1 NL80211_CHAN_WIDTH_20_NOHT = 0x0 NL80211_CHAN_WIDTH_2 = 0x9 + NL80211_CHAN_WIDTH_320 = 0xd NL80211_CHAN_WIDTH_40 = 0x2 NL80211_CHAN_WIDTH_4 = 0xa NL80211_CHAN_WIDTH_5 = 0x6 @@ -4621,8 +4714,11 @@ const ( NL80211_CMD_ABORT_SCAN = 0x72 NL80211_CMD_ACTION = 0x3b NL80211_CMD_ACTION_TX_STATUS = 0x3c + NL80211_CMD_ADD_LINK = 0x94 + NL80211_CMD_ADD_LINK_STA = 0x96 NL80211_CMD_ADD_NAN_FUNCTION = 0x75 NL80211_CMD_ADD_TX_TS = 0x69 + NL80211_CMD_ASSOC_COMEBACK = 0x93 NL80211_CMD_ASSOCIATE = 0x26 NL80211_CMD_AUTHENTICATE = 0x25 NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38 @@ -4630,6 +4726,10 @@ const ( NL80211_CMD_CHANNEL_SWITCH = 0x66 NL80211_CMD_CH_SWITCH_NOTIFY = 0x58 NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e + NL80211_CMD_COLOR_CHANGE_ABORTED = 0x90 + NL80211_CMD_COLOR_CHANGE_COMPLETED = 0x91 + NL80211_CMD_COLOR_CHANGE_REQUEST = 0x8e + NL80211_CMD_COLOR_CHANGE_STARTED = 0x8f NL80211_CMD_CONNECT = 0x2e NL80211_CMD_CONN_FAILED = 0x5b NL80211_CMD_CONTROL_PORT_FRAME = 0x81 @@ -4678,8 +4778,9 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x93 + NL80211_CMD_MAX = 0x98 NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 + NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 NL80211_CMD_NEW_BEACON = 0xf NL80211_CMD_NEW_INTERFACE = 0x7 @@ -4692,6 +4793,7 @@ const ( NL80211_CMD_NEW_WIPHY = 0x3 NL80211_CMD_NOTIFY_CQM = 0x40 NL80211_CMD_NOTIFY_RADAR = 0x86 + NL80211_CMD_OBSS_COLOR_COLLISION = 0x8d NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85 NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84 NL80211_CMD_PEER_MEASUREMENT_START = 0x83 @@ -4707,6 +4809,8 @@ const ( NL80211_CMD_REGISTER_FRAME = 0x3a NL80211_CMD_RELOAD_REGDB = 0x7e NL80211_CMD_REMAIN_ON_CHANNEL = 0x37 + NL80211_CMD_REMOVE_LINK = 0x95 + NL80211_CMD_REMOVE_LINK_STA = 0x98 NL80211_CMD_REQ_SET_REG = 0x1b NL80211_CMD_ROAM = 0x2f NL80211_CMD_SCAN_ABORTED = 0x23 @@ -4717,6 +4821,7 @@ const ( NL80211_CMD_SET_CHANNEL = 0x41 NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f + NL80211_CMD_SET_FILS_AAD = 0x92 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -4791,6 +4896,8 @@ const ( NL80211_EDMG_BW_CONFIG_MIN = 0x4 NL80211_EDMG_CHANNELS_MAX = 0x3c NL80211_EDMG_CHANNELS_MIN = 0x1 + NL80211_EHT_MAX_CAPABILITY_LEN = 0x33 + NL80211_EHT_MIN_CAPABILITY_LEN = 0xd NL80211_EXTERNAL_AUTH_ABORT = 0x1 NL80211_EXTERNAL_AUTH_START = 0x0 NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32 @@ -4807,6 +4914,7 @@ const ( NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7 NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6 NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8 + NL80211_EXT_FEATURE_BSS_COLOR = 0x3a NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4 NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a @@ -4818,6 +4926,7 @@ const ( NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 + NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD = 0x3b NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11 NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe @@ -4833,8 +4942,10 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26 @@ -4906,7 +5017,9 @@ const ( NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 + NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb + NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9 NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa @@ -5006,6 +5119,12 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 + NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 + NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 + NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 NL80211_MESHCONF_ATTR_MAX = 0x1f NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7 NL80211_MESHCONF_AWAKE_WINDOW = 0x1b @@ -5168,6 +5287,7 @@ const ( NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0 NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3 NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR = 0xd NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5 NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4 NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6 @@ -5244,12 +5364,36 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 + NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 + NL80211_RATE_INFO_EHT_GI_1_6 = 0x1 + NL80211_RATE_INFO_EHT_GI_3_2 = 0x2 + NL80211_RATE_INFO_EHT_GI = 0x15 + NL80211_RATE_INFO_EHT_MCS = 0x13 + NL80211_RATE_INFO_EHT_NSS = 0x14 + NL80211_RATE_INFO_EHT_RU_ALLOC_106 = 0x3 + NL80211_RATE_INFO_EHT_RU_ALLOC_106P26 = 0x4 + NL80211_RATE_INFO_EHT_RU_ALLOC_242 = 0x5 + NL80211_RATE_INFO_EHT_RU_ALLOC_26 = 0x0 + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 = 0xb + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484 = 0xc + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996 = 0xd + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484 = 0xe + NL80211_RATE_INFO_EHT_RU_ALLOC_484 = 0x6 + NL80211_RATE_INFO_EHT_RU_ALLOC_484P242 = 0x7 + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996 = 0xf + NL80211_RATE_INFO_EHT_RU_ALLOC_52 = 0x1 + NL80211_RATE_INFO_EHT_RU_ALLOC_52P26 = 0x2 + NL80211_RATE_INFO_EHT_RU_ALLOC_996 = 0x8 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484 = 0x9 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242 = 0xa + NL80211_RATE_INFO_EHT_RU_ALLOC = 0x16 NL80211_RATE_INFO_HE_1XLTF = 0x0 NL80211_RATE_INFO_HE_2XLTF = 0x1 NL80211_RATE_INFO_HE_4XLTF = 0x2 @@ -5292,6 +5436,7 @@ const ( NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 + NL80211_RRF_NO_320MHZ = 0x40000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 NL80211_RRF_NO_HE = 0x20000 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index a49853e9d3a..41cb3c01fd9 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,7 +10,6 @@ import ( errorspkg "errors" "fmt" "runtime" - "strings" "sync" "syscall" "time" @@ -87,22 +86,13 @@ func StringToUTF16(s string) []uint16 { // s, with a terminating NUL added. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func UTF16FromString(s string) ([]uint16, error) { - if strings.IndexByte(s, 0) != -1 { - return nil, syscall.EINVAL - } - return utf16.Encode([]rune(s + "\x00")), nil + return syscall.UTF16FromString(s) } // UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, // with a terminating NUL and any bytes after the NUL removed. func UTF16ToString(s []uint16) string { - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) + return syscall.UTF16ToString(s) } // StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. diff --git a/vendor/golang.org/x/text/internal/language/compact/language.go b/vendor/golang.org/x/text/internal/language/compact/language.go index 83816a72a8a..8c1b6666fb8 100644 --- a/vendor/golang.org/x/text/internal/language/compact/language.go +++ b/vendor/golang.org/x/text/internal/language/compact/language.go @@ -118,7 +118,7 @@ func (t Tag) Parent() Tag { return Tag{language: lang, locale: lang} } -// returns token t and the rest of the string. +// nextToken returns token t and the rest of the string. func nextToken(s string) (t, tail string) { p := strings.Index(s[1:], "-") if p == -1 { diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go index 6105bc7fadc..09d41c73670 100644 --- a/vendor/golang.org/x/text/internal/language/language.go +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -409,7 +409,7 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { return t, nil } -// findKeyAndType returns the start and end position for the type corresponding +// findTypeForKey returns the start and end position for the type corresponding // to key or the point at which to insert the key-value pair if the type // wasn't found. The hasExt return value reports whether an -u extension was present. // Note: the extensions are typically very small and are likely to contain diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go index 289b3a36d52..4d9c6612129 100644 --- a/vendor/golang.org/x/text/language/language.go +++ b/vendor/golang.org/x/text/language/language.go @@ -344,7 +344,7 @@ func (t Tag) Parent() Tag { return Tag(compact.Tag(t).Parent()) } -// returns token t and the rest of the string. +// nextToken returns token t and the rest of the string. func nextToken(s string) (t, tail string) { p := strings.Index(s[1:], "-") if p == -1 { diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/gopkg.in/ini.v1/deprecated.go index e8bda06e6ff..48b8e66d6d6 100644 --- a/vendor/gopkg.in/ini.v1/deprecated.go +++ b/vendor/gopkg.in/ini.v1/deprecated.go @@ -14,12 +14,9 @@ package ini -const ( +var ( // Deprecated: Use "DefaultSection" instead. DEFAULT_SECTION = DefaultSection -) - -var ( // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. AllCapsUnderscore = SnackCase ) diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index ac2a93a5b39..99e7f86511a 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -23,15 +23,15 @@ import ( ) const ( - // DefaultSection is the name of default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - // Maximum allowed depth when recursively substituing variable names. depthValues = 99 ) var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + // LineBreak is the delimiter to determine or compose a new line. // This variable will be changed to "\r\n" automatically on Windows at package init time. LineBreak = "\n" diff --git a/vendor/modules.txt b/vendor/modules.txt index fc01a725a74..4fd0d8d7fbe 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -519,7 +519,7 @@ github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/s2 github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.1.0 +# github.com/klauspost/cpuid/v2 v2.2.3 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 # github.com/kylelemons/godebug v1.1.0 @@ -557,7 +557,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.47 +# github.com/minio/minio-go/v7 v7.0.49 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials @@ -1068,7 +1068,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.1.0 +# golang.org/x/crypto v0.6.0 ## explicit; go 1.17 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1083,7 +1083,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.7.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.5.0 +# golang.org/x/net v0.7.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -1113,7 +1113,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.4.0 +# golang.org/x/sys v0.5.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1121,7 +1121,7 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.6.0 +# golang.org/x/text v0.7.0 ## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/internal @@ -1313,7 +1313,7 @@ google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/alecthomas/kingpin.v2 v2.2.6 ## explicit gopkg.in/alecthomas/kingpin.v2 -# gopkg.in/ini.v1 v1.66.6 +# gopkg.in/ini.v1 v1.67.0 ## explicit gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.4.0 From 295ff7938b527317f6098550376639d669fa72db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 10:23:32 -0800 Subject: [PATCH 079/133] Bump golang.org/x/net from 0.5.0 to 0.7.0 (#5161) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.5.0 to 0.7.0. - [Release notes](https://github.com/golang/net/releases) - [Commits](https://github.com/golang/net/compare/v0.5.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le From 8bcc4862c3d0a77d0eb2e3e207ecfbd0800b6835 Mon Sep 17 00:00:00 2001 From: Alvin Lin Date: Tue, 21 Feb 2023 12:46:13 -0800 Subject: [PATCH 080/133] Update README.md for chunk deprecation note (#5164) Since 1.14.0 is released, change from "to be" to "was. Signed-off-by: Alvin Lin Signed-off-by: Alex Le --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d2474dca1c2..9701ab033dc 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Cortex is primarily used as a [remote write](https://prometheus.io/docs/operatin The chunks storage is deprecated since v1.10.0. You're encouraged to use the [blocks storage](docs/blocks-storage/_index.md). -Chunks storage is scheduled to be removed in release 1.14.0 +Chunks storage was removed in release 1.14.0 ## Documentation From 8d6adfaa9533c8ff3d89a6ecc9477516f4c9e312 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Wed, 22 Feb 2023 07:29:16 -0800 Subject: [PATCH 081/133] Disabling ValidateJsonRawMessage flag from json jsoniter (#5167) Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- pkg/querier/tripperware/instantquery/instant_query.go | 2 +- pkg/querier/tripperware/query.go | 2 +- pkg/querier/tripperware/queryrange/query_range.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index fd1f3559db3..beb9a04cc5d 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -35,7 +35,7 @@ var ( json = jsoniter.Config{ EscapeHTML: false, // No HTML in our responses. SortMapKeys: true, - ValidateJsonRawMessage: true, + ValidateJsonRawMessage: false, }.Froze() ) diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 6c6d0dd3bbd..708d4aa3b08 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -28,7 +28,7 @@ var ( json = jsoniter.Config{ EscapeHTML: false, // No HTML in our responses. SortMapKeys: true, - ValidateJsonRawMessage: true, + ValidateJsonRawMessage: false, }.Froze() ) diff --git a/pkg/querier/tripperware/queryrange/query_range.go b/pkg/querier/tripperware/queryrange/query_range.go index 56654f18be2..1c39d0b60d2 100644 --- a/pkg/querier/tripperware/queryrange/query_range.go +++ b/pkg/querier/tripperware/queryrange/query_range.go @@ -34,7 +34,7 @@ var ( json = jsoniter.Config{ EscapeHTML: false, // No HTML in our responses. SortMapKeys: true, - ValidateJsonRawMessage: true, + ValidateJsonRawMessage: false, }.Froze() errEndBeforeStart = httpgrpc.Errorf(http.StatusBadRequest, "end timestamp must not be before start time") errNegativeStep = httpgrpc.Errorf(http.StatusBadRequest, "zero or negative query resolution step widths are not accepted. Try a positive integer") From 0b21bdc532b26e968faab241789c97a2ae1238ce Mon Sep 17 00:00:00 2001 From: Harry John Date: Wed, 22 Feb 2023 09:14:04 -0800 Subject: [PATCH 082/133] StoreGateway: Allow filtering out recent blocks during sync (#5166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Allow filtering out recent blocks during sync Signed-off-by: 🌲 Harry 🌊 John 🏔 * Fix build failures and comments Signed-off-by: 🌲 Harry 🌊 John 🏔 --------- Signed-off-by: 🌲 Harry 🌊 John 🏔 Signed-off-by: Alex Le --- CHANGELOG.md | 1 + docs/blocks-storage/querier.md | 9 +++ docs/blocks-storage/store-gateway.md | 9 +++ docs/configuration/config-file-reference.md | 9 +++ pkg/querier/blocks_finder_bucket_index.go | 1 + .../blocks_finder_bucket_index_test.go | 15 ++++- pkg/querier/blocks_finder_bucket_scan.go | 6 ++ pkg/querier/blocks_finder_bucket_scan_test.go | 11 ++++ pkg/querier/blocks_store_queryable.go | 2 + pkg/storage/tsdb/config.go | 2 + pkg/storegateway/bucket_stores.go | 5 ++ pkg/storegateway/metadata_fetcher_filters.go | 30 +++++++++ .../metadata_fetcher_filters_test.go | 64 +++++++++++++++++++ 13 files changed, 162 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9b308e95a4..17f62475c93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ * [FEATURE] Added zstd as an option for grpc compression #5092 * [FEATURE] Ring: Add new kv store option `dynamodb`. #5026 * [FEATURE] Cache: Support redis as backend for caching bucket and index cache. #5057 +* [FEATURE] Querier/Store-Gateway: Added `-blocks-storage.bucket-store.ignore-blocks-within` allowing to filter out the recently created blocks from being synced by queriers and store-gateways. #5166 * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 * [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 7dee1d32fa7..89546149346 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -1084,6 +1084,15 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.ignore-deletion-marks-delay [ignore_deletion_mark_delay: | default = 6h] + # The blocks created since `now() - ignore_blocks_within` will not be + # synced. This should be used together with `-querier.query-store-after` to + # filter out the blocks that are too new to be queried. A reasonable value + # for this flag would be `-querier.query-store-after - + # blocks-storage.bucket-store.bucket-index.max-stale-period` to give some + # buffer. 0 to disable. + # CLI flag: -blocks-storage.bucket-store.ignore-blocks-within + [ignore_blocks_within: | default = 0s] + bucket_index: # True to enable querier and store-gateway to discover blocks in the # storage via bucket index instead of bucket scanning. diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 23b8295f35a..907e7cbbf7f 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -1161,6 +1161,15 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.ignore-deletion-marks-delay [ignore_deletion_mark_delay: | default = 6h] + # The blocks created since `now() - ignore_blocks_within` will not be + # synced. This should be used together with `-querier.query-store-after` to + # filter out the blocks that are too new to be queried. A reasonable value + # for this flag would be `-querier.query-store-after - + # blocks-storage.bucket-store.bucket-index.max-stale-period` to give some + # buffer. 0 to disable. + # CLI flag: -blocks-storage.bucket-store.ignore-blocks-within + [ignore_blocks_within: | default = 0s] + bucket_index: # True to enable querier and store-gateway to discover blocks in the # storage via bucket index instead of bucket scanning. diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index a85cc9957fe..3076d1320b6 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -4024,6 +4024,15 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.ignore-deletion-marks-delay [ignore_deletion_mark_delay: | default = 6h] + # The blocks created since `now() - ignore_blocks_within` will not be synced. + # This should be used together with `-querier.query-store-after` to filter out + # the blocks that are too new to be queried. A reasonable value for this flag + # would be `-querier.query-store-after - + # blocks-storage.bucket-store.bucket-index.max-stale-period` to give some + # buffer. 0 to disable. + # CLI flag: -blocks-storage.bucket-store.ignore-blocks-within + [ignore_blocks_within: | default = 0s] + bucket_index: # True to enable querier and store-gateway to discover blocks in the storage # via bucket index instead of bucket scanning. diff --git a/pkg/querier/blocks_finder_bucket_index.go b/pkg/querier/blocks_finder_bucket_index.go index 551582c21cf..31686fcb7ba 100644 --- a/pkg/querier/blocks_finder_bucket_index.go +++ b/pkg/querier/blocks_finder_bucket_index.go @@ -24,6 +24,7 @@ type BucketIndexBlocksFinderConfig struct { IndexLoader bucketindex.LoaderConfig MaxStalePeriod time.Duration IgnoreDeletionMarksDelay time.Duration + IgnoreBlocksWithin time.Duration } // BucketIndexBlocksFinder implements BlocksFinder interface and find blocks in the bucket diff --git a/pkg/querier/blocks_finder_bucket_index_test.go b/pkg/querier/blocks_finder_bucket_index_test.go index 70fd84e0f9c..9f574e0b10e 100644 --- a/pkg/querier/blocks_finder_bucket_index_test.go +++ b/pkg/querier/blocks_finder_bucket_index_test.go @@ -25,17 +25,19 @@ func TestBucketIndexBlocksFinder_GetBlocks(t *testing.T) { bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) // Mock a bucket index. + now := time.Now() block1 := &bucketindex.Block{ID: ulid.MustNew(1, nil), MinTime: 10, MaxTime: 15} block2 := &bucketindex.Block{ID: ulid.MustNew(2, nil), MinTime: 12, MaxTime: 20} block3 := &bucketindex.Block{ID: ulid.MustNew(3, nil), MinTime: 20, MaxTime: 30} block4 := &bucketindex.Block{ID: ulid.MustNew(4, nil), MinTime: 30, MaxTime: 40} - block5 := &bucketindex.Block{ID: ulid.MustNew(5, nil), MinTime: 30, MaxTime: 40} // Time range overlaps with block4, but this block deletion mark is above the threshold. + block5 := &bucketindex.Block{ID: ulid.MustNew(5, nil), MinTime: 30, MaxTime: 40} // Time range overlaps with block4, but this block deletion mark is above the threshold. + block6 := &bucketindex.Block{ID: ulid.MustNew(6, nil), MinTime: now.Add(-2 * time.Hour).UnixMilli(), MaxTime: now.UnixMilli()} // This block is within ignoreBlocksWithin and shouldn't be loaded. mark3 := &bucketindex.BlockDeletionMark{ID: block3.ID, DeletionTime: time.Now().Unix()} mark5 := &bucketindex.BlockDeletionMark{ID: block5.ID, DeletionTime: time.Now().Add(-2 * time.Hour).Unix()} require.NoError(t, bucketindex.WriteIndex(ctx, bkt, userID, nil, &bucketindex.Index{ Version: bucketindex.IndexVersion1, - Blocks: bucketindex.Blocks{block1, block2, block3, block4, block5}, + Blocks: bucketindex.Blocks{block1, block2, block3, block4, block5, block6}, BlockDeletionMarks: bucketindex.BlockDeletionMarks{mark3, mark5}, UpdatedAt: time.Now().Unix(), })) @@ -102,6 +104,14 @@ func TestBucketIndexBlocksFinder_GetBlocks(t *testing.T) { block3.ID: mark3, }, }, + "query range matching all blocks but should ignore non-queryable block": { + minT: 0, + maxT: block5.MaxTime, + expectedBlocks: bucketindex.Blocks{block4, block3, block2, block1}, + expectedMarks: map[ulid.ULID]*bucketindex.BlockDeletionMark{ + block3.ID: mark3, + }, + }, } for testName, testData := range tests { @@ -209,6 +219,7 @@ func prepareBucketIndexBlocksFinder(t testing.TB, bkt objstore.Bucket) *BucketIn }, MaxStalePeriod: time.Hour, IgnoreDeletionMarksDelay: time.Hour, + IgnoreBlocksWithin: 10 * time.Hour, } finder := NewBucketIndexBlocksFinder(cfg, bkt, nil, log.NewNopLogger(), nil) diff --git a/pkg/querier/blocks_finder_bucket_scan.go b/pkg/querier/blocks_finder_bucket_scan.go index 0012a6beb10..43b0b12f8c4 100644 --- a/pkg/querier/blocks_finder_bucket_scan.go +++ b/pkg/querier/blocks_finder_bucket_scan.go @@ -42,6 +42,7 @@ type BucketScanBlocksFinderConfig struct { CacheDir string ConsistencyDelay time.Duration IgnoreDeletionMarksDelay time.Duration + IgnoreBlocksWithin time.Duration } // BucketScanBlocksFinder is a BlocksFinder implementation periodically scanning the bucket to discover blocks. @@ -378,6 +379,11 @@ func (d *BucketScanBlocksFinder) createMetaFetcher(userID string) (block.Metadat deletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, d.cfg.IgnoreDeletionMarksDelay, d.cfg.MetasConcurrency) filters := []block.MetadataFilter{deletionMarkFilter} + // Here we filter out the blocks that are too new to query. + if d.cfg.IgnoreBlocksWithin > 0 { + filters = append(filters, storegateway.NewIgnoreNonQueryableBlocksFilter(d.logger, d.cfg.IgnoreBlocksWithin)) + } + f, err := block.NewMetaFetcher( userLogger, d.cfg.MetasConcurrency, diff --git a/pkg/querier/blocks_finder_bucket_scan_test.go b/pkg/querier/blocks_finder_bucket_scan_test.go index b05452eb619..748a0976db8 100644 --- a/pkg/querier/blocks_finder_bucket_scan_test.go +++ b/pkg/querier/blocks_finder_bucket_scan_test.go @@ -383,10 +383,12 @@ func TestBucketScanBlocksFinder_GetBlocks(t *testing.T) { ctx := context.Background() s, bucket, _, _ := prepareBucketScanBlocksFinder(t, prepareBucketScanBlocksFinderConfig()) + now := time.Now() block1 := cortex_testutil.MockStorageBlock(t, bucket, "user-1", 10, 15) block2 := cortex_testutil.MockStorageBlock(t, bucket, "user-1", 12, 20) block3 := cortex_testutil.MockStorageBlock(t, bucket, "user-1", 20, 30) block4 := cortex_testutil.MockStorageBlock(t, bucket, "user-1", 30, 40) + block5 := cortex_testutil.MockStorageBlock(t, bucket, "user-1", now.Add(-2*time.Hour).UnixMilli(), now.UnixMilli()) // This block is within ignoreBlocksWithin mark3 := bucketindex.BlockDeletionMarkFromThanosMarker(cortex_testutil.MockStorageDeletionMark(t, bucket, "user-1", block3)) require.NoError(t, services.StartAndAwaitRunning(ctx, s)) @@ -451,6 +453,14 @@ func TestBucketScanBlocksFinder_GetBlocks(t *testing.T) { block3.ULID: mark3, }, }, + "query range matching all blocks but should ignore non-queryable block": { + minT: 0, + maxT: block5.MaxTime, + expectedMetas: []tsdb.BlockMeta{block4, block3, block2, block1}, + expectedMarks: map[ulid.ULID]*bucketindex.BlockDeletionMark{ + block3.ULID: mark3, + }, + }, } for testName, testData := range tests { @@ -488,5 +498,6 @@ func prepareBucketScanBlocksFinderConfig() BucketScanBlocksFinderConfig { TenantsConcurrency: 10, MetasConcurrency: 10, IgnoreDeletionMarksDelay: time.Hour, + IgnoreBlocksWithin: 10 * time.Hour, // All blocks created in the last 10 hour shoudn't be scanned. } } diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 95f69bda97b..841d7fdf6a3 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -198,6 +198,7 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa }, MaxStalePeriod: storageCfg.BucketStore.BucketIndex.MaxStalePeriod, IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, + IgnoreBlocksWithin: storageCfg.BucketStore.IgnoreBlocksWithin, }, bucketClient, limits, logger, reg) } else { finder = NewBucketScanBlocksFinder(BucketScanBlocksFinderConfig{ @@ -206,6 +207,7 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa MetasConcurrency: storageCfg.BucketStore.MetaSyncConcurrency, CacheDir: storageCfg.BucketStore.SyncDir, IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, + IgnoreBlocksWithin: storageCfg.BucketStore.IgnoreBlocksWithin, }, bucketClient, limits, logger, reg) } diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index ed57dc5ba30..2e030220a3e 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -239,6 +239,7 @@ type BucketStoreConfig struct { ChunksCache ChunksCacheConfig `yaml:"chunks_cache"` MetadataCache MetadataCacheConfig `yaml:"metadata_cache"` IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` + IgnoreBlocksWithin time.Duration `yaml:"ignore_blocks_within"` BucketIndex BucketIndexConfig `yaml:"bucket_index"` // Chunk pool. @@ -282,6 +283,7 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "blocks-storage.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+ "Default is 6h, half of the default value for -compactor.deletion-delay.") + f.DurationVar(&cfg.IgnoreBlocksWithin, "blocks-storage.bucket-store.ignore-blocks-within", 0, "The blocks created since `now() - ignore_blocks_within` will not be synced. This should be used together with `-querier.query-store-after` to filter out the blocks that are too new to be queried. A reasonable value for this flag would be `-querier.query-store-after - blocks-storage.bucket-store.bucket-index.max-stale-period` to give some buffer. 0 to disable.") f.IntVar(&cfg.PostingOffsetsInMemSampling, "blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") f.BoolVar(&cfg.IndexHeaderLazyLoadingEnabled, "blocks-storage.bucket-store.index-header-lazy-loading-enabled", false, "If enabled, store-gateway will lazily memory-map an index-header only once required by a query.") f.DurationVar(&cfg.IndexHeaderLazyLoadingIdleTimeout, "blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout", 20*time.Minute, "If index-header lazy loading is enabled and this setting is > 0, the store-gateway will release memory-mapped index-headers after 'idle timeout' inactivity.") diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index 680ad43a6e0..ac3fe6a2b07 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -435,6 +435,11 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro // Remove Cortex external labels so that they're not injected when querying blocks. }...) + if u.cfg.BucketStore.IgnoreBlocksWithin > 0 { + // Filter out blocks that are too new to be queried. + filters = append(filters, NewIgnoreNonQueryableBlocksFilter(userLogger, u.cfg.BucketStore.IgnoreBlocksWithin)) + } + // Instantiate a different blocks metadata fetcher based on whether bucket index is enabled or not. var fetcher block.MetadataFetcher if u.cfg.BucketStore.BucketIndex.Enabled { diff --git a/pkg/storegateway/metadata_fetcher_filters.go b/pkg/storegateway/metadata_fetcher_filters.go index efbca69d0d5..3bc35b88cac 100644 --- a/pkg/storegateway/metadata_fetcher_filters.go +++ b/pkg/storegateway/metadata_fetcher_filters.go @@ -5,6 +5,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" @@ -75,3 +76,32 @@ func (f *IgnoreDeletionMarkFilter) FilterWithBucketIndex(_ context.Context, meta return nil } + +func NewIgnoreNonQueryableBlocksFilter(logger log.Logger, ignoreWithin time.Duration) *IgnoreNonQueryableBlocksFilter { + return &IgnoreNonQueryableBlocksFilter{ + logger: logger, + ignoreWithin: ignoreWithin, + } +} + +// IgnoreNonQueryableBlocksFilter ignores blocks that are too new be queried. +// This has be used in conjunction with `-querier.query-store-after` with some buffer. +type IgnoreNonQueryableBlocksFilter struct { + // Blocks that were created since `now() - ignoreWithin` will not be synced. + ignoreWithin time.Duration + logger log.Logger +} + +// Filter implements block.MetadataFilter. +func (f *IgnoreNonQueryableBlocksFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced block.GaugeVec, modified block.GaugeVec) error { + ignoreWithin := time.Now().Add(-f.ignoreWithin).UnixMilli() + + for id, m := range metas { + if m.MinTime > ignoreWithin { + level.Debug(f.logger).Log("msg", "ignoring block because it won't be queried", "id", id) + delete(metas, id) + } + } + + return nil +} diff --git a/pkg/storegateway/metadata_fetcher_filters_test.go b/pkg/storegateway/metadata_fetcher_filters_test.go index 0d988a99210..12a8a0f9619 100644 --- a/pkg/storegateway/metadata_fetcher_filters_test.go +++ b/pkg/storegateway/metadata_fetcher_filters_test.go @@ -19,6 +19,8 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" + "github.com/prometheus/prometheus/tsdb" + "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" @@ -106,3 +108,65 @@ func testIgnoreDeletionMarkFilter(t *testing.T, bucketIndexEnabled bool) { assert.Equal(t, expectedMetas, inputMetas) assert.Equal(t, expectedDeletionMarks, f.DeletionMarkBlocks()) } + +func TestIgnoreNonQueryableBlocksFilter(t *testing.T) { + now := time.Now() + ctx := context.Background() + logger := log.NewNopLogger() + + inputMetas := map[ulid.ULID]*metadata.Meta{ + ulid.MustNew(1, nil): { + BlockMeta: tsdb.BlockMeta{ + MinTime: now.Add(-2 * time.Hour).UnixMilli(), + MaxTime: now.Add(-0 * time.Hour).UnixMilli(), + }, + }, + ulid.MustNew(2, nil): { + BlockMeta: tsdb.BlockMeta{ + MinTime: now.Add(-4 * time.Hour).UnixMilli(), + MaxTime: now.Add(-2 * time.Hour).UnixMilli(), + }, + }, + ulid.MustNew(3, nil): { + BlockMeta: tsdb.BlockMeta{ + MinTime: now.Add(-6 * time.Hour).UnixMilli(), + MaxTime: now.Add(-4 * time.Hour).UnixMilli(), + }, + }, + ulid.MustNew(4, nil): { + BlockMeta: tsdb.BlockMeta{ + MinTime: now.Add(-8 * time.Hour).UnixMilli(), + MaxTime: now.Add(-6 * time.Hour).UnixMilli(), + }, + }, + } + + expectedMetas := map[ulid.ULID]*metadata.Meta{ + ulid.MustNew(2, nil): { + BlockMeta: tsdb.BlockMeta{ + MinTime: now.Add(-4 * time.Hour).UnixMilli(), + MaxTime: now.Add(-2 * time.Hour).UnixMilli(), + }, + }, + ulid.MustNew(3, nil): { + BlockMeta: tsdb.BlockMeta{ + MinTime: now.Add(-6 * time.Hour).UnixMilli(), + MaxTime: now.Add(-4 * time.Hour).UnixMilli(), + }, + }, + ulid.MustNew(4, nil): { + BlockMeta: tsdb.BlockMeta{ + MinTime: now.Add(-8 * time.Hour).UnixMilli(), + MaxTime: now.Add(-6 * time.Hour).UnixMilli(), + }, + }, + } + + synced := extprom.NewTxGaugeVec(nil, prometheus.GaugeOpts{Name: "synced"}, []string{"state"}) + modified := extprom.NewTxGaugeVec(nil, prometheus.GaugeOpts{Name: "modified"}, []string{"state"}) + + f := NewIgnoreNonQueryableBlocksFilter(logger, 3*time.Hour) + + require.NoError(t, f.Filter(ctx, inputMetas, synced, modified)) + assert.Equal(t, expectedMetas, inputMetas) +} From 76c949b3f75199fad3413e1553a836471370636e Mon Sep 17 00:00:00 2001 From: Kama Huang <121007071+kama910@users.noreply.github.com> Date: Thu, 23 Feb 2023 22:37:09 -0800 Subject: [PATCH 083/133] remove unnecessary sprintf (#5173) Signed-off-by: Kama Huang Signed-off-by: Alex Le --- pkg/util/limiter/query_limiter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/limiter/query_limiter.go b/pkg/util/limiter/query_limiter.go index 13da54dc74d..84031711e1b 100644 --- a/pkg/util/limiter/query_limiter.go +++ b/pkg/util/limiter/query_limiter.go @@ -119,7 +119,7 @@ func (ql *QueryLimiter) AddChunks(count int) error { } if ql.chunkCount.Add(int64(count)) > int64(ql.maxChunksPerQuery) { - return fmt.Errorf(fmt.Sprintf(ErrMaxChunksPerQueryLimit, ql.maxChunksPerQuery)) + return fmt.Errorf(ErrMaxChunksPerQueryLimit, ql.maxChunksPerQuery) } return nil } From 77eca8abaf7a10c5eb0c3f40d1e0568739d457e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Feb 2023 08:49:52 -0800 Subject: [PATCH 084/133] Bump go.etcd.io/etcd/client/v3 from 3.5.6 to 3.5.7 (#5162) Bumps [go.etcd.io/etcd/client/v3](https://github.com/etcd-io/etcd) from 3.5.6 to 3.5.7. - [Release notes](https://github.com/etcd-io/etcd/releases) - [Changelog](https://github.com/etcd-io/etcd/blob/main/Dockerfile-release.ppc64le) - [Commits](https://github.com/etcd-io/etcd/compare/v3.5.6...v3.5.7) --- updated-dependencies: - dependency-name: go.etcd.io/etcd/client/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Alex Le --- go.mod | 2 +- go.sum | 8 ++----- vendor/go.etcd.io/etcd/client/v3/watch.go | 26 ----------------------- vendor/modules.txt | 4 ++-- 4 files changed, 5 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index fad8859df30..39c4209ccd1 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,7 @@ require ( github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.7 go.etcd.io/etcd/client/pkg/v3 v3.5.7 - go.etcd.io/etcd/client/v3 v3.5.6 + go.etcd.io/etcd/client/v3 v3.5.7 go.opentelemetry.io/contrib/propagators/aws v1.14.0 go.opentelemetry.io/otel v1.13.0 go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 diff --git a/go.sum b/go.sum index 65baa920744..17aba9fea82 100644 --- a/go.sum +++ b/go.sum @@ -1336,7 +1336,6 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod h1:25h+Uz1WvXDBZYwqGX8PAb71RBkcjxEVV/R5wGnsq4I= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= @@ -1582,17 +1581,15 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.6 h1:coLs69PWCXE9G4FKquzNaSHrRyMCAXwF+IX1tAPVO8E= -go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk= +go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= +go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= @@ -2368,7 +2365,6 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index 90f125ac466..bc886936c86 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -18,7 +18,6 @@ import ( "context" "errors" "fmt" - "strings" "sync" "time" @@ -589,26 +588,6 @@ func (w *watchGrpcStream) run() { switch { case pbresp.Created: - cancelReasonError := v3rpc.Error(errors.New(pbresp.CancelReason)) - if shouldRetryWatch(cancelReasonError) { - var newErr error - if wc, newErr = w.newWatchClient(); newErr != nil { - w.lg.Error("failed to create a new watch client", zap.Error(newErr)) - return - } - - if len(w.resuming) != 0 { - if ws := w.resuming[0]; ws != nil { - if err := wc.Send(ws.initReq.toPB()); err != nil { - w.lg.Debug("error when sending request", zap.Error(err)) - } - } - } - - cur = nil - continue - } - // response to head of queue creation if len(w.resuming) != 0 { if ws := w.resuming[0]; ws != nil { @@ -718,11 +697,6 @@ func (w *watchGrpcStream) run() { } } -func shouldRetryWatch(cancelReasonError error) bool { - return (strings.Compare(cancelReasonError.Error(), v3rpc.ErrGRPCInvalidAuthToken.Error()) == 0) || - (strings.Compare(cancelReasonError.Error(), v3rpc.ErrGRPCAuthOldRevision.Error()) == 0) -} - // nextResume chooses the next resuming to register with the grpc stream. Abandoned // streams are marked as nil in the queue since the head must wait for its inflight registration. func (w *watchGrpcStream) nextResume() *watcherStream { diff --git a/vendor/modules.txt b/vendor/modules.txt index 4fd0d8d7fbe..beef81fea34 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -938,8 +938,8 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.6 -## explicit; go 1.16 +# go.etcd.io/etcd/client/v3 v3.5.7 +## explicit; go 1.17 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint From 607b748a47220b9fdd40656adbef5356770e1bbc Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Fri, 24 Feb 2023 11:35:51 -0800 Subject: [PATCH 085/133] add a trace span for merging series from ingesters (#5176) Signed-off-by: Ben Ye Signed-off-by: Alex Le --- pkg/distributor/query.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 2a58c4955c1..6543185b2eb 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -355,6 +355,8 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri return nil, err } + span, _ := opentracing.StartSpanFromContext(ctx, "Distributor.MergeIngesterStreams") + defer span.Finish() hashToChunkseries := map[string]ingester_client.TimeSeriesChunk{} hashToTimeSeries := map[string]cortexpb.TimeSeries{} From cb352bbe6f399d93ed745cbaaad7f2f9beb517d0 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Fri, 24 Feb 2023 12:03:56 -0800 Subject: [PATCH 086/133] [chore] Update Prometheus/Thanos/Thanos engine (#5177) * Updating Dependencies Signed-off-by: Alan Protasio * fix test Signed-off-by: Alan Protasio * fix ling Signed-off-by: Alan Protasio * fix test Signed-off-by: Alan Protasio * Adding KeepFiringFor on the response Signed-off-by: Alan Protasio * changelog Signed-off-by: Alan Protasio --------- Signed-off-by: Alan Protasio Signed-off-by: Alex Le --- CHANGELOG.md | 1 + go.mod | 70 +- go.sum | 160 +- pkg/api/handlers.go | 1 + pkg/configs/userconfig/config.go | 1 + pkg/configs/userconfig/config_test.go | 1 + pkg/distributor/distributor.go | 2 +- pkg/ingester/ingester.go | 7 +- pkg/querier/block.go | 2 +- pkg/querier/block_test.go | 9 +- pkg/querier/blocks_store_queryable_test.go | 3 +- pkg/querier/chunk_store_queryable.go | 2 +- pkg/querier/distributor_queryable_test.go | 2 +- pkg/querier/error_translate_queryable_test.go | 1 + pkg/querier/remote_read.go | 3 +- pkg/querier/series/series_set.go | 8 +- .../tenantfederation/merge_queryable.go | 4 +- pkg/querier/timeseries_series_set.go | 2 +- pkg/querier/timeseries_series_set_test.go | 10 +- .../tripperware/queryrange/series_test.go | 3 +- pkg/ruler/compat.go | 2 +- pkg/ruler/ruler.go | 11 +- pkg/ruler/rulespb/rules.pb.go | 144 +- pkg/ruler/rulespb/rules.proto | 1 + pkg/storegateway/bucket_stores.go | 2 +- pkg/tracing/migration/bridge_wrapper.go | 2 +- .../go/compute/internal/version.go | 2 +- .../go/compute/metadata/CHANGES.md | 7 + .../go/compute/metadata/metadata.go | 2 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 42 + .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 178 +- .../sdk/azcore/internal/exported/exported.go | 3 +- .../azcore/internal/pollers/async/async.go | 13 +- .../sdk/azcore/internal/pollers/body/body.go | 4 + .../sdk/azcore/internal/pollers/loc/loc.go | 9 +- .../sdk/azcore/internal/pollers/op/op.go | 4 + .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/policy/policy.go | 23 +- .../sdk/azcore/runtime/pipeline.go | 8 +- .../sdk/azcore/runtime/policy_api_version.go | 75 + .../sdk/azcore/runtime/policy_logging.go | 4 +- .../sdk/azcore/runtime/policy_retry.go | 21 +- .../sdk/azcore/runtime/request.go | 55 +- .../sdk/azcore/runtime/response.go | 3 +- .../sdk/azcore/tracing/constants.go | 41 + .../sdk/azcore/tracing/tracing.go | 168 + .../sdk/azidentity/CHANGELOG.md | 30 + .../azure-sdk-for-go/sdk/azidentity/README.md | 6 +- .../sdk/azidentity/TROUBLESHOOTING.md | 5 + .../sdk/azidentity/azidentity.go | 46 +- .../azidentity/client_assertion_credential.go | 74 + .../client_certificate_credential.go | 60 +- .../azidentity/client_secret_credential.go | 15 +- .../azidentity/default_azure_credential.go | 10 +- .../sdk/azidentity/device_code_credential.go | 13 +- .../sdk/azidentity/environment_credential.go | 26 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 16 + .../interactive_browser_credential.go | 13 +- .../sdk/azidentity/managed_identity_client.go | 20 +- .../azidentity/managed_identity_credential.go | 32 +- .../username_password_credential.go | 13 +- .../sdk/azidentity/version.go | 2 +- .../sdk/internal/diag/diag.go | 2 +- .../sdk/internal/temporal/resource.go | 5 +- .../sdk/storage/azblob/CHANGELOG.md | 39 +- .../sdk/storage/azblob/README.md | 531 ++-- .../sdk/storage/azblob/appendblob/client.go | 276 ++ .../sdk/storage/azblob/appendblob/models.go | 166 + .../storage/azblob/appendblob/responses.go | 23 + .../sdk/storage/azblob/autorest.md | 171 -- .../sdk/storage/azblob/blob/client.go | 423 +++ .../sdk/storage/azblob/blob/constants.go | 241 ++ .../sdk/storage/azblob/blob/models.go | 492 +++ .../sdk/storage/azblob/blob/responses.go | 104 + .../retry_reader.go} | 140 +- .../sdk/storage/azblob/blob/utils.go | 79 + .../storage/azblob/bloberror/error_codes.go | 150 + .../azblob/{ => blockblob}/chunkwriting.go | 47 +- .../sdk/storage/azblob/blockblob/client.go | 481 +++ .../sdk/storage/azblob/blockblob/constants.go | 40 + .../sdk/storage/azblob/blockblob/models.go | 311 ++ .../sdk/storage/azblob/blockblob/responses.go | 111 + .../sdk/storage/azblob/client.go | 171 ++ .../sdk/storage/azblob/common.go | 36 + .../sdk/storage/azblob/connection.go | 39 - .../sdk/storage/azblob/constants.go | 53 +- .../sdk/storage/azblob/container/client.go | 334 ++ .../sdk/storage/azblob/container/constants.go | 166 + .../sdk/storage/azblob/container/models.go | 263 ++ .../sdk/storage/azblob/container/responses.go | 38 + .../sdk/storage/azblob/doc.go | 9 +- .../sdk/storage/azblob/highlevel.go | 316 -- .../storage/azblob/internal/base/clients.go | 89 + .../internal/exported/access_conditions.go | 43 + .../exported/access_policy.go} | 6 +- .../azblob/internal/exported/exported.go | 33 + .../exported/shared_key_credential.go} | 67 +- .../exported/user_delegation_credential.go | 64 + .../azblob/internal/exported/version.go | 12 + .../internal/generated/appendblob_client.go | 19 + .../azblob/internal/generated/autorest.md | 304 ++ .../azblob/internal/generated/blob_client.go | 17 + .../internal/generated/block_blob_client.go | 19 + .../internal/generated/container_client.go | 17 + .../internal/generated/pageblob_client.go | 17 + .../internal/generated/service_client.go | 17 + .../generated/zz_appendblob_client.go | 653 ++++ .../generated/zz_blob_client.go} | 1478 ++++----- .../internal/generated/zz_blockblob_client.go | 960 ++++++ .../generated/zz_constants.go} | 263 +- .../generated/zz_container_client.go} | 770 ++--- .../generated/zz_models.go} | 2696 +++++++---------- .../internal/generated/zz_models_serde.go | 481 +++ .../internal/generated/zz_pageblob_client.go | 1287 ++++++++ .../generated/zz_response_types.go} | 850 ++---- .../generated/zz_service_client.go} | 228 +- .../generated/zz_time_rfc1123.go} | 3 +- .../generated/zz_time_rfc3339.go} | 3 +- .../generated/zz_xml_helper.go} | 3 +- .../azblob/internal/shared/batch_transfer.go | 78 + .../{ => internal/shared}/bytes_writer.go | 6 +- .../azblob/internal/shared/section_writer.go | 53 + .../storage/azblob/internal/shared/shared.go | 238 ++ .../{ => internal/shared}/transfer_manager.go | 6 +- .../sdk/storage/azblob/internal/zc_shared.go | 150 - .../sdk/storage/azblob/migrationguide.md | 76 + .../sdk/storage/azblob/models.go | 70 + .../sdk/storage/azblob/pageblob/client.go | 403 +++ .../sdk/storage/azblob/pageblob/constants.go | 65 + .../models.go} | 282 +- .../sdk/storage/azblob/pageblob/responses.go | 38 + .../sdk/storage/azblob/responses.go | 51 + .../sdk/storage/azblob/sas/account.go | 316 ++ .../sdk/storage/azblob/sas/query_params.go | 440 +++ .../{zc_sas_service.go => sas/service.go} | 252 +- .../{zc_parsing_urls.go => sas/url_parts.go} | 98 +- .../sdk/storage/azblob/section_writer.go | 53 - .../sdk/storage/azblob/service/client.go | 287 ++ .../sdk/storage/azblob/service/constants.go | 92 + .../sdk/storage/azblob/service/models.go | 220 ++ .../sdk/storage/azblob/service/responses.go | 41 + .../sdk/storage/azblob/test-resources.json | 513 ++++ .../storage/azblob/zc_append_blob_client.go | 154 - .../sdk/storage/azblob/zc_blob_client.go | 278 -- .../storage/azblob/zc_blob_lease_client.go | 98 - .../storage/azblob/zc_block_blob_client.go | 201 -- .../storage/azblob/zc_connection_string.go | 88 - .../sdk/storage/azblob/zc_container_client.go | 253 -- .../azblob/zc_container_lease_client.go | 102 - .../sdk/storage/azblob/zc_page_blob_client.go | 261 -- .../sdk/storage/azblob/zc_response_error.go | 17 - .../sdk/storage/azblob/zc_response_helpers.go | 35 - .../sdk/storage/azblob/zc_sas_account.go | 243 -- .../sdk/storage/azblob/zc_sas_query_params.go | 427 --- .../sdk/storage/azblob/zc_service_client.go | 266 -- .../sdk/storage/azblob/zc_storage_error.go | 236 -- .../sdk/storage/azblob/zc_validators.go | 107 - .../storage/azblob/zm_access_conditions.go | 43 - .../azblob/zm_append_blob_client_util.go | 184 -- .../sdk/storage/azblob/zm_blob_client_util.go | 478 --- .../azblob/zm_blob_lease_client_util.go | 160 - .../azblob/zm_block_blob_client_util.go | 272 -- .../sdk/storage/azblob/zm_client_util.go | 55 - .../azblob/zm_container_client_util.go | 271 -- .../azblob/zm_container_lease_client_util.go | 166 - .../sdk/storage/azblob/zm_highlevel_util.go | 201 -- .../zm_serialize_and_desearilize_util.go | 68 - .../storage/azblob/zm_service_client_util.go | 226 -- .../azblob/zz_generated_appendblob_client.go | 648 ---- .../azblob/zz_generated_blockblob_client.go | 953 ------ .../azblob/zz_generated_pageblob_client.go | 1247 -------- .../sdk/storage/azblob/zz_generated_pagers.go | 287 -- .../apps/confidential/confidential.go | 157 +- .../apps/errors/errors.go | 3 +- .../apps/internal/base/base.go | 23 +- .../internal/base/internal/storage/items.go | 6 + .../internal/storage/partitioned_storage.go | 8 +- .../internal/base/internal/storage/storage.go | 36 +- .../apps/internal/exported/exported.go | 34 + .../apps/internal/json/json.go | 2 +- .../apps/internal/local/server.go | 3 +- .../apps/internal/oauth/oauth.go | 32 +- .../oauth/ops/accesstokens/accesstokens.go | 71 +- .../internal/oauth/ops/authority/authority.go | 22 +- .../internal/oauth/ops/internal/comm/comm.go | 13 +- .../apps/internal/oauth/ops/ops.go | 1 + .../apps/internal/version/version.go | 2 +- .../efficientgo/tools/extkingpin/LICENSE | 201 ++ .../efficientgo/tools/extkingpin/doc.go | 20 + .../tools/extkingpin/pathorcontent.go | 143 + .../github.com/go-logfmt/logfmt/CHANGELOG.md | 46 +- vendor/github.com/go-logfmt/logfmt/README.md | 30 +- vendor/github.com/go-logfmt/logfmt/decode.go | 17 + vendor/github.com/golang-jwt/jwt/.travis.yml | 11 - .../golang-jwt/jwt/MIGRATION_GUIDE.md | 22 - vendor/github.com/golang-jwt/jwt/claims.go | 146 - vendor/github.com/golang-jwt/jwt/errors.go | 59 - vendor/github.com/golang-jwt/jwt/token.go | 108 - .../golang-jwt/jwt/{ => v4}/.gitignore | 0 .../golang-jwt/jwt/{ => v4}/LICENSE | 0 .../golang-jwt/jwt/v4/MIGRATION_GUIDE.md | 22 + .../golang-jwt/jwt/{ => v4}/README.md | 70 +- .../github.com/golang-jwt/jwt/v4/SECURITY.md | 19 + .../jwt/{ => v4}/VERSION_HISTORY.md | 11 + vendor/github.com/golang-jwt/jwt/v4/claims.go | 269 ++ .../github.com/golang-jwt/jwt/{ => v4}/doc.go | 0 .../golang-jwt/jwt/{ => v4}/ecdsa.go | 24 +- .../golang-jwt/jwt/{ => v4}/ecdsa_utils.go | 8 +- .../github.com/golang-jwt/jwt/v4/ed25519.go | 85 + .../golang-jwt/jwt/v4/ed25519_utils.go | 64 + vendor/github.com/golang-jwt/jwt/v4/errors.go | 112 + .../golang-jwt/jwt/{ => v4}/hmac.go | 6 +- .../golang-jwt/jwt/{ => v4}/map_claims.go | 101 +- .../golang-jwt/jwt/{ => v4}/none.go | 2 +- .../golang-jwt/jwt/{ => v4}/parser.go | 49 +- .../golang-jwt/jwt/v4/parser_option.go | 29 + .../github.com/golang-jwt/jwt/{ => v4}/rsa.go | 6 +- .../golang-jwt/jwt/{ => v4}/rsa_pss.go | 7 +- .../golang-jwt/jwt/{ => v4}/rsa_utils.go | 16 +- .../golang-jwt/jwt/{ => v4}/signing_method.go | 17 +- .../golang-jwt/jwt/v4/staticcheck.conf | 1 + vendor/github.com/golang-jwt/jwt/v4/token.go | 132 + vendor/github.com/golang-jwt/jwt/v4/types.go | 145 + .../client/client.go | 20 +- .../prometheus/common/model/value.go | 30 +- .../prometheus/common/model/value_float.go | 28 +- .../common/model/value_histogram.go | 61 +- .../prometheus/common/model/value_marshal.go | 131 + .../prometheus/procfs/Makefile.common | 9 +- .../github.com/prometheus/procfs/cpuinfo.go | 36 + .../prometheus/procfs/cpuinfo_loong64.go} | 15 +- .../prometheus/procfs/cpuinfo_others.go | 4 +- vendor/github.com/prometheus/procfs/doc.go | 51 +- .../prometheus/procfs/mountstats.go | 3 +- .../prometheus/procfs/net_softnet.go | 70 +- .../github.com/prometheus/procfs/netstat.go | 53 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../prometheus/procfs/proc_interrupts.go | 98 + .../prometheus/procfs/proc_netstat.go | 491 +-- .../github.com/prometheus/procfs/proc_snmp.go | 318 +- .../prometheus/procfs/proc_snmp6.go | 364 +-- .../github.com/prometheus/procfs/proc_stat.go | 4 +- .../prometheus/procfs/proc_status.go | 6 +- vendor/github.com/prometheus/procfs/stat.go | 22 +- vendor/github.com/prometheus/procfs/thread.go | 79 + vendor/github.com/prometheus/procfs/vm.go | 4 +- .../prometheus/prometheus/config/config.go | 20 +- .../prometheus/discovery/manager.go | 4 +- .../model/histogram/float_histogram.go | 33 + .../prometheus/model/histogram/generic.go | 20 +- .../prometheus/model/histogram/histogram.go | 44 +- .../prometheus/model/labels/labels.go | 86 +- .../prometheus/model/labels/test_utils.go | 9 +- .../prometheus/model/relabel/relabel.go | 38 +- .../prometheus/model/rulefmt/rulefmt.go | 32 +- .../model/textparse/openmetricsparse.go | 32 +- .../prometheus/model/textparse/promparse.go | 31 +- .../model/textparse/protobufparse.go | 64 +- .../prometheus/notifier/notifier.go | 18 +- .../prometheus/prometheus/promql/engine.go | 67 +- .../prometheus/prometheus/promql/functions.go | 27 +- .../promql/parser/generated_parser.y | 10 +- .../promql/parser/generated_parser.y.go | 29 +- .../prometheus/prometheus/promql/test.go | 18 +- .../prometheus/prometheus/promql/value.go | 11 +- .../prometheus/prometheus/rules/alerting.go | 101 +- .../prometheus/prometheus/rules/manager.go | 30 +- .../prometheus/prometheus/rules/recording.go | 6 +- .../prometheus/prometheus/scrape/manager.go | 12 + .../prometheus/prometheus/scrape/scrape.go | 137 +- .../prometheus/prometheus/scrape/target.go | 64 +- .../prometheus/prometheus/storage/buffer.go | 6 +- .../prometheus/prometheus/storage/fanout.go | 6 +- .../prometheus/storage/interface.go | 15 +- .../prometheus/prometheus/storage/merge.go | 100 +- .../prometheus/storage/remote/codec.go | 108 +- .../storage/remote/queue_manager.go | 112 +- .../prometheus/storage/remote/read.go | 37 +- .../prometheus/storage/remote/write.go | 2 +- .../storage/remote/write_handler.go | 11 +- .../prometheus/prometheus/storage/series.go | 88 +- .../prometheus/prometheus/tsdb/block.go | 14 +- .../prometheus/tsdb/chunkenc/chunk.go | 38 +- .../tsdb/chunkenc/float_histogram.go | 831 +++++ .../prometheus/tsdb/chunkenc/histogram.go | 187 +- .../tsdb/chunkenc/histogram_meta.go | 295 +- .../prometheus/tsdb/chunkenc/xor.go | 4 + .../prometheus/prometheus/tsdb/compact.go | 7 +- .../prometheus/prometheus/tsdb/db.go | 2 +- .../prometheus/prometheus/tsdb/exemplar.go | 5 +- .../prometheus/prometheus/tsdb/head.go | 174 +- .../prometheus/prometheus/tsdb/head_append.go | 405 ++- .../prometheus/prometheus/tsdb/head_read.go | 44 +- .../prometheus/prometheus/tsdb/head_wal.go | 113 +- .../prometheus/prometheus/tsdb/index/index.go | 27 +- .../prometheus/tsdb/index/postings.go | 11 +- .../prometheus/prometheus/tsdb/ooo_head.go | 10 + .../prometheus/tsdb/ooo_head_read.go | 46 +- .../prometheus/prometheus/tsdb/querier.go | 214 +- .../prometheus/tsdb/record/record.go | 185 +- .../prometheus/tsdb/tsdbblockutil.go | 8 +- .../prometheus/prometheus/tsdb/wal.go | 2 +- .../prometheus/tsdb/wlog/live_reader.go | 2 +- .../prometheus/prometheus/tsdb/wlog/reader.go | 2 +- .../prometheus/tsdb/wlog/watcher.go | 45 +- .../prometheus/prometheus/web/api/v1/api.go | 68 +- vendor/github.com/rueian/rueidis/README.md | 496 +-- .../rueian/rueidis/internal/cmds/builder.go | 2 +- .../rueian/rueidis/internal/cmds/gen.go | 475 ++- vendor/github.com/rueian/rueidis/message.go | 68 +- vendor/github.com/rueian/rueidis/pipe.go | 10 +- .../promql-engine/api/remote.go | 1 - .../promql-engine/engine/engine.go | 45 +- .../execution/aggregate/hashaggregate.go | 2 +- .../execution/aggregate/khashaggregate.go | 3 +- .../execution/aggregate/scalar_table.go | 125 +- .../execution/aggregate/vector_table.go | 84 +- .../promql-engine/execution/binary/scalar.go | 3 +- .../promql-engine/execution/binary/table.go | 3 +- .../execution/exchange/coalesce.go | 23 +- .../promql-engine/execution/exchange/dedup.go | 3 +- .../promql-engine/execution/execution.go | 4 - .../execution/function/functions.go | 189 +- .../execution/function/histogram.go | 39 +- .../execution/function/operator.go | 84 +- .../execution/function/quantile.go | 119 + .../promql-engine/execution/model/pool.go | 48 +- .../promql-engine/execution/model/vector.go | 56 +- .../execution/scan/literal_selector.go | 4 +- .../execution/scan/matrix_selector.go | 27 +- .../execution/scan/vector_selector.go | 31 +- .../step_invariant/step_invariant.go | 8 +- .../promql-engine/execution/storage/filter.go | 9 +- .../promql-engine/execution/storage/pool.go | 5 +- .../thanos-io/objstore/CHANGELOG.md | 4 + .../github.com/thanos-io/objstore/README.md | 8 +- vendor/github.com/thanos-io/objstore/inmem.go | 12 +- .../github.com/thanos-io/objstore/objstore.go | 2 +- .../objstore/providers/azure/azure.go | 170 +- .../objstore/providers/azure/helpers.go | 33 +- .../providers/filesystem/filesystem.go | 34 +- .../thanos-io/objstore/providers/s3/s3.go | 11 +- .../thanos-io/thanos/pkg/block/fetcher.go | 8 +- .../thanos-io/thanos/pkg/block/index.go | 12 +- .../thanos/pkg/cacheutil/memcached_client.go | 1 + .../thanos/pkg/cacheutil/redis_client.go | 2 + .../pkg/compact/downsample/downsample.go | 21 +- .../thanos-io/thanos/pkg/dedup/chunk_iter.go | 12 +- .../thanos-io/thanos/pkg/dedup/iter.go | 157 +- .../thanos-io/thanos/pkg/extkingpin/app.go | 148 + .../thanos-io/thanos/pkg/extkingpin/flags.go | 152 + .../pkg/extkingpin/path_content_reloader.go | 128 + .../thanos-io/thanos/pkg/gate/gate.go | 120 +- .../thanos-io/thanos/pkg/store/bucket.go | 112 +- .../thanos/pkg/store/labelpb/label.go | 35 +- .../thanos-io/thanos/pkg/store/limiter.go | 86 +- .../thanos-io/thanos/pkg/store/local.go | 3 +- .../thanos-io/thanos/pkg/store/prometheus.go | 37 +- .../thanos-io/thanos/pkg/store/proxy.go | 28 +- .../thanos-io/thanos/pkg/store/proxy_heap.go | 233 +- .../thanos/pkg/store/storepb/rpc.pb.go | 234 +- .../thanos/pkg/store/storepb/rpc.proto | 21 +- .../thanos-io/thanos/pkg/store/telemetry.go | 58 + .../thanos-io/thanos/pkg/store/tsdb.go | 12 +- .../thanos/pkg/testutil/e2eutil/prometheus.go | 55 +- .../net/http/otelhttp/config.go | 9 + .../net/http/otelhttp/handler.go | 50 +- .../net/http/otelhttp/transport.go | 8 +- .../net/http/otelhttp/version.go | 2 +- .../contrib/propagators/b3/b3_config.go | 4 +- .../contrib/propagators/b3/b3_propagator.go | 13 +- .../contrib/propagators/b3/version.go | 2 +- .../propagators/jaeger/jaeger_propagator.go | 19 +- .../contrib/propagators/jaeger/version.go | 2 +- .../contrib/propagators/ot/version.go | 2 +- .../otel/bridge/opentracing/provider.go | 71 + .../otel/bridge/opentracing/wrapper.go | 9 +- .../otel/metric/instrument/asyncfloat64.go | 131 + .../instrument/asyncfloat64/asyncfloat64.go | 80 - .../otel/metric/instrument/asyncint64.go | 131 + .../instrument/asyncint64/asyncint64.go | 80 - .../otel/metric/instrument/config.go | 69 - .../otel/metric/instrument/instrument.go | 60 + .../otel/metric/instrument/syncfloat64.go | 86 + .../instrument/syncfloat64/syncfloat64.go | 64 - .../instrument/{syncint64 => }/syncint64.go | 68 +- .../metric/internal/global/instruments.go | 207 +- .../otel/metric/internal/global/meter.go | 319 +- .../go.opentelemetry.io/otel/metric/meter.go | 114 +- .../go.opentelemetry.io/otel/metric/noop.go | 127 +- .../otel/semconv/internal/http.go | 336 -- .../otel/semconv/internal/v2/http.go | 404 +++ .../otel/semconv/internal/v2/net.go | 324 ++ .../otel/semconv/v1.12.0/doc.go | 20 - .../otel/semconv/v1.12.0/http.go | 114 - .../otel/semconv/v1.12.0/resource.go | 1042 ------- .../otel/semconv/v1.12.0/schema.go | 20 - .../otel/semconv/v1.12.0/trace.go | 1704 ----------- .../otel/semconv/v1.17.0/httpconv/http.go | 150 + vendor/go.uber.org/multierr/CHANGELOG.md | 8 + vendor/go.uber.org/multierr/error.go | 345 ++- vendor/go4.org/intern/LICENSE | 29 + vendor/go4.org/intern/README.md | 19 + vendor/go4.org/intern/intern.go | 183 ++ .../unsafe/assume-no-moving-gc/LICENSE | 29 + .../unsafe/assume-no-moving-gc/README.md | 13 + .../assume-no-moving-gc.go | 22 + .../unsafe/assume-no-moving-gc/untested.go | 26 + vendor/golang.org/x/exp/slices/slices.go | 3 + vendor/golang.org/x/exp/slices/sort.go | 47 +- .../x/tools/go/gcexportdata/gcexportdata.go | 24 +- .../x/tools/go/packages/packages.go | 5 + .../x/tools/internal/gcimporter/gcimporter.go | 919 +----- .../tools/internal/gcimporter/ureader_yes.go | 39 +- .../x/tools/internal/gocommand/version.go | 16 +- .../api/googleapi/googleapi.go | 5 +- .../iamcredentials/v1/iamcredentials-gen.go | 2 +- .../api/internal/gensupport/json.go | 26 +- .../api/internal/gensupport/resumable.go | 16 +- .../api/internal/gensupport/send.go | 4 +- .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 7 + .../api/storage/v1/storage-gen.go | 2 +- .../googleapis/api/annotations/client.pb.go | 371 +-- .../genproto/googleapis/rpc/code/code.pb.go | 27 +- .../rpc/errdetails/error_details.pb.go | 374 +-- .../googleapis/rpc/status/status.pb.go | 10 +- .../alecthomas/kingpin.v2/.travis.yml | 12 +- .../gopkg.in/alecthomas/kingpin.v2/README.md | 79 +- vendor/gopkg.in/alecthomas/kingpin.v2/app.go | 17 +- vendor/gopkg.in/alecthomas/kingpin.v2/args.go | 21 + vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go | 55 +- .../gopkg.in/alecthomas/kingpin.v2/envar.go | 9 +- .../gopkg.in/alecthomas/kingpin.v2/flags.go | 24 + .../gopkg.in/alecthomas/kingpin.v2/global.go | 2 + .../gopkg.in/alecthomas/kingpin.v2/model.go | 74 +- .../gopkg.in/alecthomas/kingpin.v2/parser.go | 16 +- .../gopkg.in/alecthomas/kingpin.v2/parsers.go | 4 + .../alecthomas/kingpin.v2/templates.go | 38 +- .../gopkg.in/alecthomas/kingpin.v2/usage.go | 29 +- .../gopkg.in/alecthomas/kingpin.v2/values.go | 24 +- vendor/modules.txt | 120 +- 442 files changed, 27620 insertions(+), 22718 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_retry_reader.go => blob/retry_reader.go} (57%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{ => blockblob}/chunkwriting.go (82%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_access_policy.go => internal/exported/access_policy.go} (88%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_shared_policy_shared_key_credential.go => internal/exported/shared_key_credential.go} (73%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_blob_client.go => internal/generated/zz_blob_client.go} (52%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_constants.go => internal/generated/zz_constants.go} (84%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_container_client.go => internal/generated/zz_container_client.go} (50%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_models.go => internal/generated/zz_models.go} (75%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_response_types.go => internal/generated/zz_response_types.go} (70%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_service_client.go => internal/generated/zz_service_client.go} (62%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_time_rfc1123.go => internal/generated/zz_time_rfc1123.go} (96%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_time_rfc3339.go => internal/generated/zz_time_rfc3339.go} (97%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_xml_helper.go => internal/generated/zz_xml_helper.go} (96%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{ => internal/shared}/bytes_writer.go (74%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{ => internal/shared}/transfer_manager.go (96%) delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zm_page_blob_client_util.go => pageblob/models.go} (51%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_sas_service.go => sas/service.go} (51%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_parsing_urls.go => sas/url_parts.go} (68%) delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go create mode 100644 vendor/github.com/efficientgo/tools/extkingpin/LICENSE create mode 100644 vendor/github.com/efficientgo/tools/extkingpin/doc.go create mode 100644 vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go delete mode 100644 vendor/github.com/golang-jwt/jwt/.travis.yml delete mode 100644 vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/golang-jwt/jwt/claims.go delete mode 100644 vendor/github.com/golang-jwt/jwt/errors.go delete mode 100644 vendor/github.com/golang-jwt/jwt/token.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/.gitignore (100%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/LICENSE (100%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md rename vendor/github.com/golang-jwt/jwt/{ => v4}/README.md (61%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/SECURITY.md rename vendor/github.com/golang-jwt/jwt/{ => v4}/VERSION_HISTORY.md (89%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/claims.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/doc.go (100%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/ecdsa.go (81%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/ecdsa_utils.go (81%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/errors.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/hmac.go (90%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/map_claims.go (50%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/none.go (94%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/parser.go (70%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/parser_option.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/rsa.go (92%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/rsa_pss.go (93%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/rsa_utils.go (72%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/signing_method.go (66%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf create mode 100644 vendor/github.com/golang-jwt/jwt/v4/token.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/types.go create mode 100644 vendor/github.com/prometheus/common/model/value_marshal.go rename vendor/{go.opentelemetry.io/otel/semconv/v1.12.0/exception.go => github.com/prometheus/procfs/cpuinfo_loong64.go} (64%) create mode 100644 vendor/github.com/prometheus/procfs/proc_interrupts.go create mode 100644 vendor/github.com/prometheus/procfs/thread.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/extkingpin/app.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/extkingpin/flags.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/extkingpin/path_content_reloader.go create mode 100644 vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/config.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go rename vendor/go.opentelemetry.io/otel/metric/instrument/{syncint64 => }/syncint64.go (50%) delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go create mode 100644 vendor/go4.org/intern/LICENSE create mode 100644 vendor/go4.org/intern/README.md create mode 100644 vendor/go4.org/intern/intern.go create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/README.md create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/assume-no-moving-gc.go create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/untested.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 17f62475c93..be7a57b7a74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ * [FEATURE] Ring: Add new kv store option `dynamodb`. #5026 * [FEATURE] Cache: Support redis as backend for caching bucket and index cache. #5057 * [FEATURE] Querier/Store-Gateway: Added `-blocks-storage.bucket-store.ignore-blocks-within` allowing to filter out the recently created blocks from being synced by queriers and store-gateways. #5166 +* [FEATURE] AlertManager/Ruler: Added support for `keep_firing_for` on alerting rulers. * [BUGFIX] Updated `golang.org/x/net` dependency to fix CVE-2022-27664. #5008 * [BUGFIX] Fix panic when otel and xray tracing is enabled. #5044 diff --git a/go.mod b/go.mod index 39c4209ccd1..e7c885fba93 100644 --- a/go.mod +++ b/go.mod @@ -44,15 +44,16 @@ require ( github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 - github.com/prometheus/prometheus v0.41.0 + github.com/prometheus/common v0.39.1-0.20230202092144-f9c1994be032 + // Prometheus maps version 2.x.y to tags v0.x.y. + github.com/prometheus/prometheus v0.42.0 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 - github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 - github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 - github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 + github.com/thanos-community/promql-engine v0.0.0-20230224075812-ae04bbea7613 + github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204 + github.com/thanos-io/thanos v0.29.1-0.20230224131058-fdeea3917591 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.7 @@ -60,7 +61,7 @@ require ( go.etcd.io/etcd/client/v3 v3.5.7 go.opentelemetry.io/contrib/propagators/aws v1.14.0 go.opentelemetry.io/otel v1.13.0 - go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 + go.opentelemetry.io/otel/bridge/opentracing v1.12.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.13.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 go.opentelemetry.io/otel/sdk v1.13.0 @@ -77,15 +78,15 @@ require ( require ( cloud.google.com/go v0.105.0 // indirect - cloud.google.com/go/compute v1.13.0 // indirect - cloud.google.com/go/compute/metadata v0.2.2 // indirect + cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.8.0 // indirect cloud.google.com/go/storage v1.27.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect @@ -113,10 +114,11 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.5.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect @@ -129,13 +131,13 @@ require ( github.com/go-openapi/validate v0.22.0 // indirect github.com/gofrs/uuid v4.3.1+incompatible // indirect github.com/gogo/googleapis v1.4.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect + github.com/golang-jwt/jwt/v4 v4.4.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 // indirect + github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect @@ -174,11 +176,11 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.8.2 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/cors v1.8.2 // indirect github.com/rs/xid v1.4.0 // indirect - github.com/rueian/rueidis v0.0.90 // indirect + github.com/rueian/rueidis v0.0.93 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect @@ -191,29 +193,31 @@ require ( github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect go.mongodb.org/mongo-driver v1.11.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect - go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect - go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0 // indirect + go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.13.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.13.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.13.0 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect + go.opentelemetry.io/otel/metric v0.36.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/goleak v1.2.0 // indirect - go.uber.org/multierr v1.8.0 // indirect + go.uber.org/multierr v1.9.0 // indirect go.uber.org/zap v1.21.0 // indirect + go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect golang.org/x/crypto v0.6.0 // indirect - golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 // indirect + golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 // indirect golang.org/x/mod v0.7.0 // indirect - golang.org/x/oauth2 v0.3.0 // indirect + golang.org/x/oauth2 v0.4.0 // indirect golang.org/x/sys v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect - golang.org/x/tools v0.4.0 // indirect + golang.org/x/tools v0.5.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect - google.golang.org/api v0.104.0 // indirect + google.golang.org/api v0.108.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect + google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -241,3 +245,7 @@ replace github.com/go-openapi/spec => github.com/go-openapi/spec v0.20.6 replace github.com/ionos-cloud/sdk-go/v6 => github.com/ionos-cloud/sdk-go/v6 v6.0.4 replace github.com/googleapis/gnostic => github.com/google/gnostic v0.6.9 + +// Same replace used by thanos: (may be removed in the future) +// https://github.com/thanos-io/thanos/blob/fdeea3917591fc363a329cbe23af37c6fff0b5f0/go.mod#L265 +replace gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497 diff --git a/go.sum b/go.sum index 17aba9fea82..82d59860032 100644 --- a/go.sum +++ b/go.sum @@ -47,10 +47,10 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= -cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= @@ -78,14 +78,14 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -101,7 +101,7 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= +github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -113,8 +113,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= +github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= @@ -163,12 +163,15 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497 h1:aDITxVUQ/3KBhpVWX57Vo9ntGTxoRw1F0T6/x/tRzNU= +github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497/go.mod h1:b6br6/pDFSfMkBgC96TbpOji05q5pa+v5rIlS0Y6XtI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= @@ -475,7 +478,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= -github.com/digitalocean/godo v1.91.1 h1:1o30VOCu1aC6488qBd0SkQiBeAZ35RSTvLwCA1pQMhc= +github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -486,7 +489,7 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6 github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= +github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -514,6 +517,8 @@ github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= github.com/efficientgo/e2e v0.14.1-0.20230119090947-fa7ceb0197c5 h1:N1fHVcNEPMJNB93sxT6icl5yvoFxa2sp3/1NnVlrAFc= +github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd h1:VaYzzXeUbC5fVheskcKVNOyJMEYD+HgrJNzIAg/mRIM= +github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= @@ -587,8 +592,9 @@ github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2C github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -697,11 +703,10 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= +github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= +github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -803,8 +808,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 h1:D5iJJZKAi0rU4e/5E58BkrnN+xeCDjAIqcm1GGxAGSI= -github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc= +github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -813,8 +818,8 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -920,11 +925,11 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU= +github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7 h1:XOdd3JHyeQnBRxotBo9ibxBFiYGuYhQU25s/YeV2cTU= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go v1.38.0 h1:K6Pd/mMdcLfBhvwG39qyAaacp4pCS3dKa8gChmLKxLg= +github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -1078,7 +1083,7 @@ github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ= +github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1180,7 +1185,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= @@ -1240,7 +1244,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1305,7 +1309,6 @@ github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= @@ -1363,8 +1366,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635 h1:hK3y58iUBjRFZ6kFNJTWsES1GnVKsqEYUeiyeRXridQ= -github.com/prometheus/common v0.39.1-0.20230110141620-846591a16635/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.39.1-0.20230202092144-f9c1994be032 h1:OKL/rHv39LgZmRQZZcUOCzl0UuFu3OC6IMofPoseG0Y= +github.com/prometheus/common v0.39.1-0.20230202092144-f9c1994be032/go.mod h1:L65ZJPSmfn/UBWLQIHV7dBrKFidB/wPlF1y5TlSt9OE= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= @@ -1383,10 +1386,11 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns= -github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI= +github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1405,8 +1409,8 @@ github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rueian/rueidis v0.0.90 h1:zuTFPvouPmO4FojrWg2p47XhiFcJ5Ux/cQu37ZRIdVs= -github.com/rueian/rueidis v0.0.90/go.mod h1:LiKWMM/QnILwRfDZIhSIXi4vQqZ/UZy4+/aNkSCt8XA= +github.com/rueian/rueidis v0.0.93 h1:cG905akj2+QyHx0x9y4mN0K8vLi6M94QiyoLulXS3l0= +github.com/rueian/rueidis v0.0.93/go.mod h1:lo6LBci0D986usi5Wxjb4RVNaWENKYbHZSnufGJ9bTE= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1415,7 +1419,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12 h1:Aaz4T7dZp7cB2cv7D/tGtRdSMh48sRaDYr7Jh0HV4qQ= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -1504,15 +1508,15 @@ github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= +github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4 h1:D0ruzvS/U+49eMLg/7IIYK2iMdVP4PrJLpVHgguMVkg= -github.com/thanos-community/promql-engine v0.0.0-20230124070417-9e293186b7e4/go.mod h1:d52Wfzxs6L3xhc2snodyWvM2bZMMVn0XT2q4vsfBPVo= -github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= -github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= -github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00 h1:8/GjzkmjltOUSO+7FZQhhpTj7SPDXZwY5bWassilNTo= -github.com/thanos-io/thanos v0.29.1-0.20230131102841-a3b6b10afb00/go.mod h1:oTvgV468yMvGgECD06eWuVWw0OBTHcznyT2BZl5rgeA= +github.com/thanos-community/promql-engine v0.0.0-20230224075812-ae04bbea7613 h1:ANPnp+Z81Y/RPlY1T3+YYHwGnT0Ea5oEowGamE2rWh0= +github.com/thanos-community/promql-engine v0.0.0-20230224075812-ae04bbea7613/go.mod h1:gREn4JarQ2DZdWirOtqZQd3p+c1xH+UVpGRjGKVoWx8= +github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204 h1:W4w5Iph7j32Sf1QFWLJDCqvO0WgZS0jHGID+qnq3wV0= +github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204/go.mod h1:STSgpY8M6EKF2G/raUFdbIMf2U9GgYlEjAEHJxjvpAo= +github.com/thanos-io/thanos v0.29.1-0.20230224131058-fdeea3917591 h1:E/MHYpGqH+Uqw1kAO2k2/zonREakCoooSCDExZ7rY0s= +github.com/thanos-io/thanos v0.29.1-0.20230224131058-fdeea3917591/go.mod h1:zozJCI4rIf2R4PjQ5szW4ME6XkWVQ6Atgbib93NX8W0= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1615,24 +1619,24 @@ go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUz go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= -go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= -go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0 h1:vFEBG7SieZJzvnRWQ81jxpuEqe6J8Ex+hgc9CqOTzHc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0/go.mod h1:9rgTcOKdIhDOC0IcAu8a+R+FChqSUBihKpM1lVNi6T0= +go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 h1:WZwiLCwOL0XW/6TVT7LTtdRDveoHZ6q3wL+0iYsBcdE= +go.opentelemetry.io/contrib/propagators/autoprop v0.38.0/go.mod h1:JBebP2d0HiffbfelbIEoBOCl4790g7Z8lD1scUd3Vd8= go.opentelemetry.io/contrib/propagators/aws v1.14.0 h1:At9KgnobpQSaC7LYQ2JpDEOxLlMW6EsmLPsCAFfXSNg= go.opentelemetry.io/contrib/propagators/aws v1.14.0/go.mod h1:KB4fnXEZfSGUC39lmyXfqfuw7D1C8n01nXxsYgKvQhc= -go.opentelemetry.io/contrib/propagators/b3 v1.9.0 h1:Lzb9zU98jCE2kyfCjWfSSsiQoGtvBL+COxvUBf7FNhU= -go.opentelemetry.io/contrib/propagators/b3 v1.9.0/go.mod h1:fyx3gFXn+4w5uWTTiqaI8oBNBW/6w9Ow5zxXf7NGixU= -go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 h1:edJTgwezAtLKUINAXfjxllJ1vlsphNPV7RkuKNd/HkQ= -go.opentelemetry.io/contrib/propagators/jaeger v1.9.0/go.mod h1:Q/AXutvrBTfEDSeRLwOmKhyviX5adJvTesg6JFTybYg= -go.opentelemetry.io/contrib/propagators/ot v1.9.0 h1:+pYoqyFoA3H6EZ7Wie2ZQdqS4ZfG42PAGvj3eLUukHE= -go.opentelemetry.io/contrib/propagators/ot v1.9.0/go.mod h1:D2GfaecHHX67fXT93/5iKl2DArjt8+H0XWtFD8b4Z+k= +go.opentelemetry.io/contrib/propagators/b3 v1.13.0 h1:f17PBmZK60RoHvOpJVqEka8oS2EXjpjHquESD/8zZ50= +go.opentelemetry.io/contrib/propagators/b3 v1.13.0/go.mod h1:zy2hz1TpGUoJzSwlBchVGvVAFQS8s2pglKLbrAFZ+Sc= +go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 h1:+tVlvpiQMOCzi4EYCaBjblibpyKfqoph0fcITmtXMws= +go.opentelemetry.io/contrib/propagators/jaeger v1.13.0/go.mod h1:Qf7eVCLYawiNIB+A81kk8aFDFwYqXSqmt0N2RcvkLLI= +go.opentelemetry.io/contrib/propagators/ot v1.13.0 h1:tHWNd0WRS6w9keZoZg9aF3zYohdaBacQfojPYZJgATQ= +go.opentelemetry.io/contrib/propagators/ot v1.13.0/go.mod h1:R6Op9T6LxNaMRVlGD0wVwz40LSsAq296CXiEydKLQBU= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= -go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1 h1:jrqVQNRKglMRBUrSf40J5mBYnUyleAKL+g0iuyEyaGA= -go.opentelemetry.io/otel/bridge/opentracing v1.11.1-0.20221013154440-84e28fd3bdb1/go.mod h1:zZWeMK8RgtSJddl2Oox9MfjMRgD/HqzuLujCFdF0CZU= +go.opentelemetry.io/otel/bridge/opentracing v1.12.0 h1:tU684zGp/ft9QpXRixnoeKbz0vNjrcd3tEDsYy+uJUI= +go.opentelemetry.io/otel/bridge/opentracing v1.12.0/go.mod h1:qjLYKFXmUQhZHVa0EbQOY29U061UO/14B+NGWUOnOnk= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.13.0 h1:pa05sNT/P8OsIQ8mPZKTIyiBuzS/xDGLVx+DCt0y6Vs= @@ -1645,8 +1649,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 h1:+tsVd go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0/go.mod h1:jSqjV+Knu1Jyvh+l3fx7V210Ev3HHgNQAi8YqpXaQP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= +go.opentelemetry.io/otel/metric v0.36.0 h1:t0lgGI+L68QWt3QtOIlqM9gXoxqxWLhZ3R/e5oOAY0Q= +go.opentelemetry.io/otel/metric v0.36.0/go.mod h1:wKVw57sd2HdSZAzyfOM9gTqqE8v7CbqWsYL6AyrH9qk= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= @@ -1679,8 +1683,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1688,6 +1692,10 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= +go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1732,8 +1740,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4= -golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 h1:kWC3b7j6Fu09SnEBr7P4PuQyM0R6sqyH9R+EjIvT1nQ= +golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1878,8 +1886,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= -golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2159,8 +2167,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2222,8 +2230,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg= -google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U= +google.golang.org/api v0.108.0 h1:WVBc/faN0DkKtR43Q/7+tPny9ZoLZdIiAyG5Q9vFClg= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2329,8 +2337,8 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI= +google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2392,8 +2400,6 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2456,13 +2462,13 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= +k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2471,7 +2477,7 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= +k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index bc32b64002f..5668bae893e 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -199,6 +199,7 @@ func NewQuerierHandler( querier.NewErrorTranslateSampleAndChunkQueryable(queryable), // Translate errors to errors expected by API. nil, // No remote write support. exemplarQueryable, + func(ctx context.Context) v1.ScrapePoolsRetriever { return nil }, func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} }, func(context.Context) v1.AlertmanagerRetriever { return &querier.DummyAlertmanagerRetriever{} }, func() config.Config { return config.Config{} }, diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go index 9366974a0f7..b46bd17a777 100644 --- a/pkg/configs/userconfig/config.go +++ b/pkg/configs/userconfig/config.go @@ -305,6 +305,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { rl.Alert.Value, expr, time.Duration(rl.For), + time.Duration(rl.KeepFiringFor), labels.FromMap(rl.Labels), labels.FromMap(rl.Annotations), nil, diff --git a/pkg/configs/userconfig/config_test.go b/pkg/configs/userconfig/config_test.go index 3292b29ad2d..5cc99211479 100644 --- a/pkg/configs/userconfig/config_test.go +++ b/pkg/configs/userconfig/config_test.go @@ -85,6 +85,7 @@ func TestParseLegacyAlerts(t *testing.T) { "TestAlert", parsed, 5*time.Minute, + 0, labels.Labels{ labels.Label{Name: "severity", Value: "critical"}, }, diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 1b02f84a384..1f6ddb6475d 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -647,7 +647,7 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co } if mrc := limits.MetricRelabelConfigs; len(mrc) > 0 { - l := relabel.Process(cortexpb.FromLabelAdaptersToLabels(ts.Labels), mrc...) + l, _ := relabel.Process(cortexpb.FromLabelAdaptersToLabels(ts.Labels), mrc...) if len(l) == 0 { // all labels are gone, samples will be discarded validation.DiscardedSamples.WithLabelValues( diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 7314a8587fb..039f35f4a88 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -14,6 +14,7 @@ import ( "time" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -1256,6 +1257,7 @@ func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client numSamples := 0 result := &client.QueryResponse{} + var it chunkenc.Iterator for ss.Next() { series := ss.At() if sm.IsSharded() && !sm.MatchesLabels(series.Labels()) { @@ -1266,7 +1268,7 @@ func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), } - it := series.Iterator() + it = series.Iterator(it) for it.Next() != chunkenc.ValNone { t, v := it.At() ts.Samples = append(ts.Samples, cortexpb.Sample{Value: v, TimestampMs: t}) @@ -1715,6 +1717,7 @@ func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, th chunkSeries := make([]client.TimeSeriesChunk, 0, queryStreamBatchSize) batchSizeBytes := 0 + var it chunks.Iterator for ss.Next() { series := ss.At() @@ -1727,7 +1730,7 @@ func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, th Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), } - it := series.Iterator() + it := series.Iterator(it) for it.Next() { // Chunks are ordered by min time. meta := it.At() diff --git a/pkg/querier/block.go b/pkg/querier/block.go index 5ab049f3d1a..c0ce54f4b72 100644 --- a/pkg/querier/block.go +++ b/pkg/querier/block.go @@ -104,7 +104,7 @@ func (bqs *blockQuerierSeries) Labels() labels.Labels { return bqs.labels } -func (bqs *blockQuerierSeries) Iterator() chunkenc.Iterator { +func (bqs *blockQuerierSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator { if len(bqs.chunks) == 0 { // should not happen in practice, but we have a unit test for it return series.NewErrIterator(errors.New("no chunks")) diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go index 055af0b0725..503e2333510 100644 --- a/pkg/querier/block_test.go +++ b/pkg/querier/block_test.go @@ -77,7 +77,7 @@ func TestBlockQuerierSeries(t *testing.T) { sampleIx := 0 - it := series.Iterator() + it := series.Iterator(nil) for it.Next() != chunkenc.ValNone { ts, val := it.At() require.True(t, sampleIx < len(testData.expectedSamples)) @@ -208,7 +208,7 @@ func verifyNextSeries(t *testing.T, ss storage.SeriesSet, labels labels.Labels, prevTS := int64(0) count := 0 - for it := s.Iterator(); it.Next() != chunkenc.ValNone; { + for it := s.Iterator(nil); it.Next() != chunkenc.ValNone; { count++ ts, v := it.At() require.Equal(t, math.Sin(float64(ts)), v) @@ -332,9 +332,10 @@ func Benchmark_blockQuerierSeriesSet_iteration(b *testing.B) { for n := 0; n < b.N; n++ { set := blockQuerierSeriesSet{series: series} + var it chunkenc.Iterator for set.Next() { - for t := set.At().Iterator(); t.Next() != chunkenc.ValNone; { - t.At() + for it = set.At().Iterator(it); it.Next() != chunkenc.ValNone; { + it.At() } } } diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 664ba36ca0f..d38780b543e 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -710,10 +710,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // Read all returned series and their values. var actualSeries []seriesResult + var it chunkenc.Iterator for set.Next() { var actualValues []valueResult - it := set.At().Iterator() + it = set.At().Iterator(it) for it.Next() != chunkenc.ValNone { t, v := it.At() actualValues = append(actualValues, valueResult{ diff --git a/pkg/querier/chunk_store_queryable.go b/pkg/querier/chunk_store_queryable.go index 71a9d830ffc..596ff7ee851 100644 --- a/pkg/querier/chunk_store_queryable.go +++ b/pkg/querier/chunk_store_queryable.go @@ -23,7 +23,7 @@ func (s *chunkSeries) Labels() labels.Labels { } // Iterator returns a new iterator of the data of the series. -func (s *chunkSeries) Iterator() chunkenc.Iterator { +func (s *chunkSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator { return s.chunkIteratorFunc(s.chunks, model.Time(s.mint), model.Time(s.maxt)) } diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index dec55e368bc..213c3dc9517 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -319,7 +319,7 @@ func TestIngesterStreamingMixedResults(t *testing.T) { func verifySeries(t *testing.T, series storage.Series, l labels.Labels, samples []cortexpb.Sample) { require.Equal(t, l, series.Labels()) - it := series.Iterator() + it := series.Iterator(nil) for _, s := range samples { require.NotEqual(t, it.Next(), chunkenc.ValNone) require.Nil(t, it.Err()) diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index 5c1682c6e2c..90820e13666 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -147,6 +147,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable, engine v1.QueryEngin q, nil, nil, + func(ctx context.Context) v1.ScrapePoolsRetriever { return nil }, func(context.Context) v1.TargetRetriever { return &DummyTargetRetriever{} }, func(context.Context) v1.AlertmanagerRetriever { return &DummyAlertmanagerRetriever{} }, func() config.Config { return config.Config{} }, diff --git a/pkg/querier/remote_read.go b/pkg/querier/remote_read.go index 5c943741e39..12a1251b444 100644 --- a/pkg/querier/remote_read.go +++ b/pkg/querier/remote_read.go @@ -79,10 +79,11 @@ func RemoteReadHandler(q storage.Queryable, logger log.Logger) http.Handler { func seriesSetToQueryResponse(s storage.SeriesSet) (*client.QueryResponse, error) { result := &client.QueryResponse{} + var it chunkenc.Iterator for s.Next() { series := s.At() samples := []cortexpb.Sample{} - it := series.Iterator() + it = series.Iterator(it) for it.Next() != chunkenc.ValNone { t, v := it.At() samples = append(samples, cortexpb.Sample{ diff --git a/pkg/querier/series/series_set.go b/pkg/querier/series/series_set.go index 9d30b69c8b4..d2c9270fc2f 100644 --- a/pkg/querier/series/series_set.go +++ b/pkg/querier/series/series_set.go @@ -88,7 +88,7 @@ func (c *ConcreteSeries) Labels() labels.Labels { } // Iterator implements storage.Series -func (c *ConcreteSeries) Iterator() chunkenc.Iterator { +func (c *ConcreteSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator { return NewConcreteSeriesIterator(c) } @@ -252,8 +252,8 @@ func (d DeletedSeries) Labels() labels.Labels { return d.series.Labels() } -func (d DeletedSeries) Iterator() chunkenc.Iterator { - return NewDeletedSeriesIterator(d.series.Iterator(), d.deletedIntervals) +func (d DeletedSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + return NewDeletedSeriesIterator(d.series.Iterator(it), d.deletedIntervals) } type DeletedSeriesIterator struct { @@ -332,7 +332,7 @@ func (e emptySeries) Labels() labels.Labels { return e.labels } -func (emptySeries) Iterator() chunkenc.Iterator { +func (emptySeries) Iterator(chunkenc.Iterator) chunkenc.Iterator { return NewEmptySeriesIterator() } diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index ee35eb6b1df..ec724288fc7 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -451,8 +451,8 @@ func (a *addLabelsSeries) Labels() labels.Labels { } // Iterator returns a new, independent iterator of the data of the series. -func (a *addLabelsSeries) Iterator() chunkenc.Iterator { - return a.upstream.Iterator() +func (a *addLabelsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + return a.upstream.Iterator(it) } // this sets a label and preserves an existing value a new label prefixed with diff --git a/pkg/querier/timeseries_series_set.go b/pkg/querier/timeseries_series_set.go index c42efa764cb..3001ea20969 100644 --- a/pkg/querier/timeseries_series_set.go +++ b/pkg/querier/timeseries_series_set.go @@ -71,7 +71,7 @@ func (t *timeseries) Labels() labels.Labels { } // Iterator implements the storage.Series interface -func (t *timeseries) Iterator() chunkenc.Iterator { +func (t *timeseries) Iterator(chunkenc.Iterator) chunkenc.Iterator { return iterators.NewCompatibleChunksIterator(&timeSeriesSeriesIterator{ ts: t, i: -1, diff --git a/pkg/querier/timeseries_series_set_test.go b/pkg/querier/timeseries_series_set_test.go index 99f7636dbf4..0432b50cbfe 100644 --- a/pkg/querier/timeseries_series_set_test.go +++ b/pkg/querier/timeseries_series_set_test.go @@ -36,7 +36,7 @@ func TestTimeSeriesSeriesSet(t *testing.T) { require.Equal(t, ss.ts[0].Labels[0].Name, series.Labels()[0].Name) require.Equal(t, ss.ts[0].Labels[0].Value, series.Labels()[0].Value) - it := series.Iterator() + it := series.Iterator(nil) require.NotEqual(t, it.Next(), chunkenc.ValNone) ts, v := it.At() require.Equal(t, 3.14, v) @@ -51,7 +51,7 @@ func TestTimeSeriesSeriesSet(t *testing.T) { ss = newTimeSeriesSeriesSet(true, timeseries) require.True(t, ss.Next()) - it = ss.At().Iterator() + it = ss.At().Iterator(nil) require.NotEqual(t, it.Seek(2000), chunkenc.ValNone) ts, v = it.At() require.Equal(t, 1.618, v) @@ -84,7 +84,7 @@ func TestTimeSeriesIterator(t *testing.T) { }, } - it := ts.Iterator() + it := ts.Iterator(nil) require.NotEqual(t, it.Seek(1235), chunkenc.ValNone) // Seek to middle i, _ := it.At() require.EqualValues(t, 1235, i) @@ -93,7 +93,7 @@ func TestTimeSeriesIterator(t *testing.T) { require.EqualValues(t, 1236, i) require.Equal(t, it.Seek(1238), chunkenc.ValNone) // Seek past end - it = ts.Iterator() + it = ts.Iterator(nil) require.NotEqual(t, it.Next(), chunkenc.ValNone) require.NotEqual(t, it.Next(), chunkenc.ValNone) i, _ = it.At() @@ -102,7 +102,7 @@ func TestTimeSeriesIterator(t *testing.T) { i, _ = it.At() require.EqualValues(t, 1235, i) - it = ts.Iterator() + it = ts.Iterator(nil) for i := 0; it.Next() != chunkenc.ValNone; { j, _ := it.At() switch i { diff --git a/pkg/querier/tripperware/queryrange/series_test.go b/pkg/querier/tripperware/queryrange/series_test.go index 0ad1b67a3a2..d2bae6acc38 100644 --- a/pkg/querier/tripperware/queryrange/series_test.go +++ b/pkg/querier/tripperware/queryrange/series_test.go @@ -58,8 +58,9 @@ func Test_ResponseToSamples(t *testing.T) { setCt := 0 + var iter chunkenc.Iterator for set.Next() { - iter := set.At().Iterator() + iter = set.At().Iterator(iter) require.Nil(t, set.Err()) sampleCt := 0 diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 0e1d99ffe03..b1ee9fd4980 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -44,7 +44,7 @@ type PusherAppender struct { evaluationDelay time.Duration } -func (a *PusherAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram) (storage.SeriesRef, error) { +func (a *PusherAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, errors.New("querying native histograms is not supported") } diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index e932292213e..cab3e4d2102 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -710,11 +710,12 @@ func (r *Ruler) getLocalRules(userID string) ([]*GroupStateDesc, error) { } ruleDesc = &RuleStateDesc{ Rule: &rulespb.RuleDesc{ - Expr: rule.Query().String(), - Alert: rule.Name(), - For: rule.HoldDuration(), - Labels: cortexpb.FromLabelsToLabelAdapters(rule.Labels()), - Annotations: cortexpb.FromLabelsToLabelAdapters(rule.Annotations()), + Expr: rule.Query().String(), + Alert: rule.Name(), + For: rule.HoldDuration(), + KeepFiringFor: rule.KeepFiringFor(), + Labels: cortexpb.FromLabelsToLabelAdapters(rule.Labels()), + Annotations: cortexpb.FromLabelsToLabelAdapters(rule.Annotations()), }, State: rule.State().String(), Health: string(rule.Health()), diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go index 778448f6633..b6081b637a8 100644 --- a/pkg/ruler/rulespb/rules.pb.go +++ b/pkg/ruler/rulespb/rules.pb.go @@ -122,12 +122,13 @@ func (m *RuleGroupDesc) GetOptions() []*types.Any { // RuleDesc is a proto representation of a Prometheus Rule type RuleDesc struct { - Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"` - Record string `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` - Alert string `protobuf:"bytes,3,opt,name=alert,proto3" json:"alert,omitempty"` - For time.Duration `protobuf:"bytes,4,opt,name=for,proto3,stdduration" json:"for"` - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,5,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"labels"` - Annotations []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,6,rep,name=annotations,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"annotations"` + Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"` + Record string `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` + Alert string `protobuf:"bytes,3,opt,name=alert,proto3" json:"alert,omitempty"` + For time.Duration `protobuf:"bytes,4,opt,name=for,proto3,stdduration" json:"for"` + Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,5,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"labels"` + Annotations []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,6,rep,name=annotations,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"annotations"` + KeepFiringFor time.Duration `protobuf:"bytes,13,opt,name=keepFiringFor,proto3,stdduration" json:"keep_firing_for"` } func (m *RuleDesc) Reset() { *m = RuleDesc{} } @@ -190,6 +191,13 @@ func (m *RuleDesc) GetFor() time.Duration { return 0 } +func (m *RuleDesc) GetKeepFiringFor() time.Duration { + if m != nil { + return m.KeepFiringFor + } + return 0 +} + func init() { proto.RegisterType((*RuleGroupDesc)(nil), "rules.RuleGroupDesc") proto.RegisterType((*RuleDesc)(nil), "rules.RuleDesc") @@ -198,37 +206,39 @@ func init() { func init() { proto.RegisterFile("rules.proto", fileDescriptor_8e722d3e922f0937) } var fileDescriptor_8e722d3e922f0937 = []byte{ - // 471 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xbf, 0x6e, 0xd3, 0x40, - 0x18, 0xf7, 0x35, 0x8e, 0x63, 0x5f, 0x54, 0x11, 0x1d, 0x11, 0x72, 0x2b, 0x74, 0x89, 0x2a, 0x21, - 0x65, 0x3a, 0x4b, 0x45, 0x0c, 0x0c, 0x08, 0x25, 0xaa, 0x84, 0x14, 0x31, 0x20, 0x8f, 0x6c, 0x67, - 0xe7, 0x6a, 0x02, 0xae, 0xef, 0x74, 0x3e, 0xa3, 0x76, 0xe3, 0x11, 0x18, 0x79, 0x04, 0x1e, 0xa5, - 0x63, 0xc6, 0x8a, 0xa1, 0x10, 0x67, 0x61, 0xac, 0xc4, 0x03, 0x80, 0xee, 0x8f, 0x01, 0xc1, 0x02, - 0x43, 0x27, 0x7f, 0xdf, 0xf7, 0xbb, 0xcf, 0xbf, 0x3f, 0x77, 0x70, 0x28, 0x9b, 0x92, 0xd5, 0x44, - 0x48, 0xae, 0x38, 0xea, 0x9b, 0xe6, 0x70, 0x5c, 0xf0, 0x82, 0x9b, 0x49, 0xa2, 0x2b, 0x0b, 0x1e, - 0xe2, 0x82, 0xf3, 0xa2, 0x64, 0x89, 0xe9, 0xb2, 0xe6, 0x34, 0x59, 0x35, 0x92, 0xaa, 0x35, 0xaf, - 0x1c, 0x7e, 0xf0, 0x27, 0x4e, 0xab, 0x0b, 0x07, 0x3d, 0x2e, 0xd6, 0xea, 0x55, 0x93, 0x91, 0x9c, - 0x9f, 0x25, 0x39, 0x97, 0x8a, 0x9d, 0x0b, 0xc9, 0x5f, 0xb3, 0x5c, 0xb9, 0x2e, 0x11, 0x6f, 0x8a, - 0x0e, 0xc8, 0x5c, 0x61, 0x57, 0x8f, 0xbe, 0x03, 0xb8, 0x9f, 0x36, 0x25, 0x7b, 0x26, 0x79, 0x23, - 0x4e, 0x58, 0x9d, 0x23, 0x04, 0xfd, 0x8a, 0x9e, 0xb1, 0x18, 0x4c, 0xc1, 0x2c, 0x4a, 0x4d, 0x8d, - 0xee, 0xc3, 0x48, 0x7f, 0x6b, 0x41, 0x73, 0x16, 0xef, 0x19, 0xe0, 0xd7, 0x00, 0x3d, 0x85, 0xe1, - 0xba, 0x52, 0x4c, 0xbe, 0xa5, 0x65, 0xdc, 0x9b, 0x82, 0xd9, 0xf0, 0xf8, 0x80, 0x58, 0xb1, 0xa4, - 0x13, 0x4b, 0x4e, 0x9c, 0x99, 0x45, 0x78, 0x79, 0x3d, 0xf1, 0x3e, 0x7c, 0x9e, 0x80, 0xf4, 0xe7, - 0x12, 0x7a, 0x00, 0x6d, 0x32, 0xb1, 0x3f, 0xed, 0xcd, 0x86, 0xc7, 0x77, 0x88, 0x0d, 0x4d, 0xeb, - 0xd2, 0x92, 0x52, 0x8b, 0x6a, 0x65, 0x4d, 0xcd, 0x64, 0x1c, 0x58, 0x65, 0xba, 0x46, 0x04, 0x0e, - 0xb8, 0xd0, 0x3f, 0xae, 0xe3, 0xc8, 0x2c, 0x8f, 0xff, 0xa2, 0x9e, 0x57, 0x17, 0x69, 0x77, 0x68, - 0xe9, 0x87, 0xfd, 0x51, 0xb0, 0xf4, 0xc3, 0xc1, 0x28, 0x5c, 0xfa, 0x61, 0x38, 0x8a, 0x8e, 0xbe, - 0xed, 0xc1, 0xb0, 0x63, 0xd2, 0x14, 0x3a, 0xbc, 0xce, 0xbc, 0xae, 0xd1, 0x3d, 0x18, 0x48, 0x96, - 0x73, 0xb9, 0x72, 0xce, 0x5d, 0x87, 0xc6, 0xb0, 0x4f, 0x4b, 0x26, 0x95, 0xf1, 0x1c, 0xa5, 0xb6, - 0x41, 0x8f, 0x60, 0xef, 0x94, 0xcb, 0xd8, 0xff, 0xf7, 0x1c, 0xf4, 0x79, 0x54, 0xc1, 0xa0, 0xa4, - 0x19, 0x2b, 0xeb, 0xb8, 0x6f, 0x6c, 0xdc, 0x25, 0xdd, 0x7d, 0x91, 0xe7, 0x7a, 0xfe, 0x82, 0xae, - 0xe5, 0x62, 0xae, 0x77, 0x3e, 0x5d, 0x4f, 0xfe, 0xeb, 0xbe, 0xed, 0xfe, 0x7c, 0x45, 0x85, 0x62, - 0x32, 0x75, 0x2c, 0xe8, 0x1c, 0x0e, 0x69, 0x55, 0x71, 0x45, 0x6d, 0x76, 0xc1, 0xad, 0x92, 0xfe, - 0x4e, 0x65, 0xb2, 0xdf, 0x5f, 0x3c, 0xd9, 0x6c, 0xb1, 0x77, 0xb5, 0xc5, 0xde, 0xcd, 0x16, 0x83, - 0x77, 0x2d, 0x06, 0x1f, 0x5b, 0x0c, 0x2e, 0x5b, 0x0c, 0x36, 0x2d, 0x06, 0x5f, 0x5a, 0x0c, 0xbe, - 0xb6, 0xd8, 0xbb, 0x69, 0x31, 0x78, 0xbf, 0xc3, 0xde, 0x66, 0x87, 0xbd, 0xab, 0x1d, 0xf6, 0x5e, - 0x0e, 0xcc, 0x43, 0x10, 0x59, 0x16, 0x98, 0x40, 0x1f, 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc1, - 0x26, 0xeb, 0x81, 0x5f, 0x03, 0x00, 0x00, + // 511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0x4f, 0x6f, 0xd3, 0x30, + 0x1c, 0x8d, 0x69, 0x9a, 0xa6, 0xae, 0xaa, 0x55, 0xa6, 0x42, 0xd9, 0x40, 0x6e, 0x35, 0x09, 0xa9, + 0xa7, 0x44, 0x1a, 0xe2, 0xc0, 0x01, 0xa1, 0x56, 0xd3, 0x90, 0x2a, 0x0e, 0x28, 0x47, 0x84, 0x34, + 0x39, 0xa9, 0x1b, 0xc2, 0xb2, 0xd8, 0x72, 0x1c, 0xb4, 0xdd, 0xf8, 0x08, 0x1c, 0xf9, 0x08, 0x7c, + 0x94, 0x1d, 0xcb, 0x6d, 0xe2, 0x50, 0x68, 0x7a, 0x41, 0x9c, 0xf6, 0x0d, 0x40, 0xb6, 0x13, 0xfe, + 0x1e, 0x80, 0xc3, 0x4e, 0xf9, 0xfd, 0xfc, 0xfc, 0xfc, 0xde, 0xef, 0xfd, 0x02, 0x7b, 0xa2, 0xcc, + 0x68, 0xe1, 0x73, 0xc1, 0x24, 0x43, 0x6d, 0xdd, 0xec, 0x0d, 0x13, 0x96, 0x30, 0x7d, 0x12, 0xa8, + 0xca, 0x80, 0x7b, 0x38, 0x61, 0x2c, 0xc9, 0x68, 0xa0, 0xbb, 0xa8, 0x5c, 0x06, 0x8b, 0x52, 0x10, + 0x99, 0xb2, 0xbc, 0xc6, 0x77, 0x7f, 0xc7, 0x49, 0x7e, 0x5e, 0x43, 0x0f, 0x92, 0x54, 0xbe, 0x28, + 0x23, 0x3f, 0x66, 0xa7, 0x41, 0xcc, 0x84, 0xa4, 0x67, 0x5c, 0xb0, 0x97, 0x34, 0x96, 0x75, 0x17, + 0xf0, 0x93, 0xa4, 0x01, 0xa2, 0xba, 0x30, 0xd4, 0xfd, 0xaf, 0x00, 0xf6, 0xc3, 0x32, 0xa3, 0x8f, + 0x05, 0x2b, 0xf9, 0x21, 0x2d, 0x62, 0x84, 0xa0, 0x9d, 0x93, 0x53, 0xea, 0x81, 0x31, 0x98, 0x74, + 0x43, 0x5d, 0xa3, 0x3b, 0xb0, 0xab, 0xbe, 0x05, 0x27, 0x31, 0xf5, 0x6e, 0x68, 0xe0, 0xc7, 0x01, + 0x7a, 0x04, 0xdd, 0x34, 0x97, 0x54, 0xbc, 0x22, 0x99, 0xd7, 0x1a, 0x83, 0x49, 0xef, 0x60, 0xd7, + 0x37, 0x66, 0xfd, 0xc6, 0xac, 0x7f, 0x58, 0x0f, 0x33, 0x73, 0x2f, 0xd6, 0x23, 0xeb, 0xed, 0xc7, + 0x11, 0x08, 0xbf, 0x93, 0xd0, 0x5d, 0x68, 0x92, 0xf1, 0xec, 0x71, 0x6b, 0xd2, 0x3b, 0xd8, 0xf1, + 0x4d, 0x68, 0xca, 0x97, 0xb2, 0x14, 0x1a, 0x54, 0x39, 0x2b, 0x0b, 0x2a, 0x3c, 0xc7, 0x38, 0x53, + 0x35, 0xf2, 0x61, 0x87, 0x71, 0xf5, 0x70, 0xe1, 0x75, 0x35, 0x79, 0xf8, 0x87, 0xf4, 0x34, 0x3f, + 0x0f, 0x9b, 0x4b, 0x73, 0xdb, 0x6d, 0x0f, 0x9c, 0xb9, 0xed, 0x76, 0x06, 0xee, 0xdc, 0x76, 0xdd, + 0x41, 0x77, 0xff, 0x7d, 0x0b, 0xba, 0x8d, 0x92, 0x92, 0x50, 0xe1, 0x35, 0xc3, 0xab, 0x1a, 0xdd, + 0x82, 0x8e, 0xa0, 0x31, 0x13, 0x8b, 0x7a, 0xf2, 0xba, 0x43, 0x43, 0xd8, 0x26, 0x19, 0x15, 0x52, + 0xcf, 0xdc, 0x0d, 0x4d, 0x83, 0xee, 0xc3, 0xd6, 0x92, 0x09, 0xcf, 0xfe, 0xf7, 0x1c, 0xd4, 0x7d, + 0x94, 0x43, 0x27, 0x23, 0x11, 0xcd, 0x0a, 0xaf, 0xad, 0xc7, 0xb8, 0xe9, 0x37, 0xfb, 0xf2, 0x9f, + 0xa8, 0xf3, 0xa7, 0x24, 0x15, 0xb3, 0xa9, 0xe2, 0x7c, 0x58, 0x8f, 0xfe, 0x6b, 0xdf, 0x86, 0x3f, + 0x5d, 0x10, 0x2e, 0xa9, 0x08, 0x6b, 0x15, 0x74, 0x06, 0x7b, 0x24, 0xcf, 0x99, 0x24, 0x26, 0x3b, + 0xe7, 0x5a, 0x45, 0x7f, 0x96, 0x42, 0xcf, 0x61, 0xff, 0x84, 0x52, 0x7e, 0x94, 0x8a, 0x34, 0x4f, + 0x8e, 0x98, 0xf0, 0xfa, 0x7f, 0x8b, 0xea, 0xb6, 0x72, 0xf0, 0x65, 0x3d, 0xda, 0x51, 0xbc, 0xe3, + 0xa5, 0x26, 0x1e, 0x2f, 0x99, 0xd0, 0xe9, 0xfd, 0xfa, 0x98, 0xde, 0x6c, 0x7f, 0xf6, 0x70, 0xb5, + 0xc1, 0xd6, 0xe5, 0x06, 0x5b, 0x57, 0x1b, 0x0c, 0x5e, 0x57, 0x18, 0xbc, 0xab, 0x30, 0xb8, 0xa8, + 0x30, 0x58, 0x55, 0x18, 0x7c, 0xaa, 0x30, 0xf8, 0x5c, 0x61, 0xeb, 0xaa, 0xc2, 0xe0, 0xcd, 0x16, + 0x5b, 0xab, 0x2d, 0xb6, 0x2e, 0xb7, 0xd8, 0x7a, 0xd6, 0xd1, 0xbf, 0x19, 0x8f, 0x22, 0x47, 0x7b, + 0xb8, 0xf7, 0x2d, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x1a, 0x99, 0x0f, 0xbd, 0x03, 0x00, 0x00, } func (this *RuleGroupDesc) Equal(that interface{}) bool { @@ -327,6 +337,9 @@ func (this *RuleDesc) Equal(that interface{}) bool { return false } } + if this.KeepFiringFor != that1.KeepFiringFor { + return false + } return true } func (this *RuleGroupDesc) GoString() string { @@ -352,7 +365,7 @@ func (this *RuleDesc) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 10) + s := make([]string, 0, 11) s = append(s, "&rulespb.RuleDesc{") s = append(s, "Expr: "+fmt.Sprintf("%#v", this.Expr)+",\n") s = append(s, "Record: "+fmt.Sprintf("%#v", this.Record)+",\n") @@ -360,6 +373,7 @@ func (this *RuleDesc) GoString() string { s = append(s, "For: "+fmt.Sprintf("%#v", this.For)+",\n") s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") + s = append(s, "KeepFiringFor: "+fmt.Sprintf("%#v", this.KeepFiringFor)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -471,6 +485,14 @@ func (m *RuleDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.KeepFiringFor, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.KeepFiringFor):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintRules(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x6a if len(m.Annotations) > 0 { for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { { @@ -499,12 +521,12 @@ func (m *RuleDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x2a } } - n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.For):]) - if err2 != nil { - return 0, err2 + n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.For):]) + if err3 != nil { + return 0, err3 } - i -= n2 - i = encodeVarintRules(dAtA, i, uint64(n2)) + i -= n3 + i = encodeVarintRules(dAtA, i, uint64(n3)) i-- dAtA[i] = 0x22 if len(m.Alert) > 0 { @@ -609,6 +631,8 @@ func (m *RuleDesc) Size() (n int) { n += 1 + l + sovRules(uint64(l)) } } + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.KeepFiringFor) + n += 1 + l + sovRules(uint64(l)) return n } @@ -654,6 +678,7 @@ func (this *RuleDesc) String() string { `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `KeepFiringFor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.KeepFiringFor), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1142,6 +1167,39 @@ func (m *RuleDesc) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepFiringFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.KeepFiringFor, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRules(dAtA[iNdEx:]) diff --git a/pkg/ruler/rulespb/rules.proto b/pkg/ruler/rulespb/rules.proto index de508848938..cb48572f0dc 100644 --- a/pkg/ruler/rulespb/rules.proto +++ b/pkg/ruler/rulespb/rules.proto @@ -44,4 +44,5 @@ message RuleDesc { (gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" ]; + google.protobuf.Duration keepFiringFor = 13 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true, (gogoproto.jsontag) = "keep_firing_for"]; } \ No newline at end of file diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index ac3fe6a2b07..2be473f76ec 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -82,7 +82,7 @@ func NewBucketStores(cfg tsdb.BlocksStorageConfig, shardingStrategy ShardingStra // The number of concurrent queries against the tenants BucketStores are limited. queryGateReg := extprom.WrapRegistererWithPrefix("cortex_bucket_stores_", reg) - queryGate := gate.New(queryGateReg, cfg.BucketStore.MaxConcurrent) + queryGate := gate.New(queryGateReg, cfg.BucketStore.MaxConcurrent, gate.Queries) promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "cortex_bucket_stores_gate_queries_concurrent_max", Help: "Number of maximum concurrent queries allowed.", diff --git a/pkg/tracing/migration/bridge_wrapper.go b/pkg/tracing/migration/bridge_wrapper.go index e751ca8a745..5a17cc7fec2 100644 --- a/pkg/tracing/migration/bridge_wrapper.go +++ b/pkg/tracing/migration/bridge_wrapper.go @@ -11,7 +11,7 @@ type CortexBridgeTracerWrapper struct { bt *bridge.BridgeTracer } -func NewCortexBridgeTracerWrapper(tracer trace.Tracer) (*CortexBridgeTracerWrapper, *bridge.WrapperTracerProvider) { +func NewCortexBridgeTracerWrapper(tracer trace.Tracer) (*CortexBridgeTracerWrapper, trace.TracerProvider) { bt, wp := bridge.NewTracerPair(tracer) return &CortexBridgeTracerWrapper{bt: bt}, wp } diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index efedadbea25..c9ba91825c2 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.13.0" +const Version = "1.14.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 6e3ee8d6ab1..06b957349af 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + ## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index d4aad9bf395..c17faa142a4 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -147,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index e930a16605d..80321d29a9b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,47 @@ # Release History +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + ## 1.1.0 (2022-06-03) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go index 7119699f9c6..28c64678c76 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -10,11 +10,11 @@ Package azcore implements an HTTP request/response middleware pipeline used by A The middleware consists of three components. - - One or more Policy instances. - - A Transporter instance. - - A Pipeline instance that combines the Policy and Transporter instances. + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. -Implementing the Policy Interface +# Implementing the Policy Interface A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share @@ -34,53 +34,53 @@ and error instances to its caller. Template for implementing a stateless Policy: - type policyFunc func(*policy.Request) (*http.Response, error) - // Do implements the Policy interface on policyFunc. + type policyFunc func(*policy.Request) (*http.Response, error) - func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { - return pf(req) - } + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } - func NewMyStatelessPolicy() policy.Policy { - return policyFunc(func(req *policy.Request) (*http.Response, error) { - // TODO: mutate/process Request here + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here - // forward Request to next Policy & get Response/error - resp, err := req.Next() + // forward Request to next Policy & get Response/error + resp, err := req.Next() - // TODO: mutate/process Response/error here + // TODO: mutate/process Response/error here - // return Response/error to previous Policy - return resp, err - }) - } + // return Response/error to previous Policy + return resp, err + }) + } Template for implementing a stateful Policy: - type MyStatefulPolicy struct { - // TODO: add configuration/setting fields here - } + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } - // TODO: add initialization args to NewMyStatefulPolicy() - func NewMyStatefulPolicy() policy.Policy { - return &MyStatefulPolicy{ - // TODO: initialize configuration/setting fields here - } - } + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } - func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { - // TODO: mutate/process Request here + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here - // forward Request to next Policy & get Response/error - resp, err := req.Next() + // forward Request to next Policy & get Response/error + resp, err := req.Next() - // TODO: mutate/process Response/error here + // TODO: mutate/process Response/error here - // return Response/error to previous Policy - return resp, err - } + // return Response/error to previous Policy + return resp, err + } -Implementing the Transporter Interface +# Implementing the Transporter Interface The Transporter interface is responsible for sending the HTTP request and returning the corresponding HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter @@ -88,66 +88,66 @@ implementation uses a shared http.Client from the standard library. The same stateful/stateless rules for Policy implementations apply to Transporter implementations. -Using Policy and Transporter Instances Via a Pipeline +# Using Policy and Transporter Instances Via a Pipeline To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. - func NewPipeline(transport Transporter, policies ...Policy) Pipeline + func NewPipeline(transport Transporter, policies ...Policy) Pipeline The specified Policy instances form a chain and are invoked in the order provided to NewPipeline followed by the Transporter. Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. - func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) - func (p Pipeline) Do(req *Request) (*http.Request, error) + func (p Pipeline) Do(req *Request) (*http.Request, error) The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter instances. The response/error is then sent through the same chain of Policy instances in reverse order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with TransportA. - pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) The flow of Request and Response looks like the following: - policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ - | - HTTP(S) endpoint - | - caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ -Creating a Request Instance +# Creating a Request Instance The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also contains some internal state and provides various convenience methods. You create a Request instance by calling the runtime.NewRequest function: - func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) If the Request should contain a body, call the SetBody method. - func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error A seekable stream is required so that upon retry, the retry Policy instance can seek the stream back to the beginning before retrying the network request and re-uploading the body. -Sending an Explicit Null +# Sending an Explicit Null Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. - { - "delete-me": null - } + { + "delete-me": null + } This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as a means to resolve the ambiguity between a field to be excluded and its zero-value. - type Widget struct { - Name *string `json:",omitempty"` - Count *int `json:",omitempty"` - } + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } In the above example, Name and Count are defined as pointer-to-type to disambiguate between a missing value (nil) and a zero-value (0) which might have semantic differences. @@ -157,18 +157,18 @@ a Widget's count, one simply specifies the new value for Count, leaving Name nil To fulfill the requirement for sending a JSON null, the NullValue() function can be used. - w := Widget{ - Count: azcore.NullValue[*int](), - } + w := Widget{ + Count: azcore.NullValue[*int](), + } This sends an explict "null" for Count, indicating that any current value for Count should be deleted. -Processing the Response +# Processing the Response When the HTTP response is received, the *http.Response is returned directly. Each Policy instance can inspect/mutate the *http.Response. -Built-in Logging +# Built-in Logging To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. @@ -178,40 +178,40 @@ own synchronization to handle concurrent invocations. See the docs for the log package for further details. -Pageable Operations +# Pageable Operations Pageable operations return potentially large data sets spread over multiple GET requests. The result of each GET is a "page" of data consisting of a slice of items. Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. - func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. - pager := widgetClient.NewListWidgetsPager(nil) - for pager.More() { - page, err := pager.NextPage(context.TODO()) - // handle err - for _, widget := range page.Values { - // process widget - } - } + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } -Long-Running Operations +# Long-Running Operations Long-running operations (LROs) are operations consisting of an initial request to start the operation followed by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one of the following values. - * Succeeded - the LRO completed successfully - * Failed - the LRO failed to complete - * Canceled - the LRO was canceled + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. - func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. It does _not_ mean that the widget has been created or updated (or failed to be created/updated). @@ -219,11 +219,11 @@ It does _not_ mean that the widget has been created or updated (or failed to be The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, call the PollUntilDone() method. - poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) - // handle err - result, err := poller.PollUntilDone(context.TODO(), nil) - // handle err - // use result + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the context is canceled/timed out. @@ -232,22 +232,22 @@ Note that LROs can take anywhere from several seconds to several minutes. The d this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation mechanism as required. -Resume Tokens +# Resume Tokens Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. - token, err := poller.ResumeToken() - // handle error + token, err := poller.ResumeToken() + // handle error Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls to poller.Poll() might change the poller's state. In this case, a new token should be created. After the token has been obtained, it can be used to recreate an instance of the originating poller. - poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ - ResumeToken: token, - }) + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index 8fca32a7d4c..6e029d493ce 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -8,7 +8,6 @@ package exported import ( "io" - "io/ioutil" "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -51,7 +50,7 @@ func Payload(resp *http.Response) ([]byte, error) { if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok { return buf.Bytes(), nil } - bytesBody, err := ioutil.ReadAll(resp.Body) + bytesBody, err := io.ReadAll(resp.Body) resp.Body.Close() if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index 0194b8b0114..d34f161c7b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -71,6 +71,13 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi if !pollers.IsValidURL(asyncURL) { return nil, fmt.Errorf("invalid polling URL %s", asyncURL) } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := pollers.GetProvisioningState(resp) + if state == "" { + state = pollers.StatusInProgress + } p := &Poller[T]{ pl: pl, resp: resp, @@ -79,7 +86,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi OrigURL: resp.Request.URL.String(), Method: resp.Request.Method, FinalState: finalState, - CurState: pollers.StatusInProgress, + CurState: state, } return p, nil } @@ -92,6 +99,10 @@ func (p *Poller[T]) Done() bool { // Poll retrieves the current state of the LRO. func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } state, err := pollers.GetStatus(resp) if err != nil { return "", err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 99e9f2f8d0a..7efdd8a0df1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -100,6 +100,10 @@ func (p *Poller[T]) Done() bool { func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } if resp.StatusCode == http.StatusNoContent { p.resp = resp p.CurState = pollers.StatusSucceeded diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index 56c2b902929..276685da443 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -64,12 +64,19 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { if !pollers.IsValidURL(locURL) { return nil, fmt.Errorf("invalid polling URL %s", locURL) } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := pollers.GetProvisioningState(resp) + if state == "" { + state = pollers.StatusInProgress + } return &Poller[T]{ pl: pl, resp: resp, Type: kind, PollURL: locURL, - CurState: pollers.StatusInProgress, + CurState: state, }, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index dd714e768c5..c3c648266a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -91,6 +91,10 @@ func (p *Poller[T]) Done() bool { func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } state, err := pollers.GetStatus(resp) if err != nil { return "", err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 4dd39e68ce8..75d241c5b42 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -30,5 +30,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.1.0" + Version = "v1.2.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index bfc71e9a002..27c30229883 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) // Policy represents an extensibility point for the Pipeline that can mutate the specified @@ -27,6 +28,9 @@ type Request = exported.Request // ClientOptions contains optional settings for a client's pipeline. // All zero-value fields will be initialized with default values. type ClientOptions struct { + // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. Cloud cloud.Configuration @@ -39,6 +43,10 @@ type ClientOptions struct { // Telemetry configures the built-in telemetry policy. Telemetry TelemetryOptions + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + // Transport sets the transport for HTTP requests. Transport Transporter @@ -69,7 +77,8 @@ type LogOptions struct { } // RetryOptions configures the retry policy's behavior. -// Call NewRetryOptions() to create an instance with default values. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. type RetryOptions struct { // MaxRetries specifies the maximum number of attempts a failed operation will be retried // before producing an error. @@ -82,6 +91,7 @@ type RetryOptions struct { TryTimeout time.Duration // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. // The default value is four seconds. A value less than zero means no delay between retries. RetryDelay time.Duration @@ -92,8 +102,15 @@ type RetryOptions struct { MaxRetryDelay time.Duration // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. - // The default value is the status codes in StatusCodesForRetry. - // Specifying an empty slice will cause retries to happen only for transport errors. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. StatusCodes []int } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go index ad75ae2ab24..a2906f51bca 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -16,6 +16,7 @@ import ( // PipelineOptions contains Pipeline options for SDK developers type PipelineOptions struct { AllowedHeaders, AllowedQueryParameters []string + APIVersion APIVersionOptions PerCall, PerRetry []policy.Policy } @@ -32,13 +33,13 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy cp = *options } if len(plOpts.AllowedHeaders) > 0 { - headers := make([]string, 0, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) copy(headers, plOpts.AllowedHeaders) headers = append(headers, cp.Logging.AllowedHeaders...) cp.Logging.AllowedHeaders = headers } if len(plOpts.AllowedQueryParameters) > 0 { - qp := make([]string, 0, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) copy(qp, plOpts.AllowedQueryParameters) qp = append(qp, cp.Logging.AllowedQueryParams...) cp.Logging.AllowedQueryParams = qp @@ -46,6 +47,9 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy // we put the includeResponsePolicy at the very beginning so that the raw response // is populated with the final response (some policies might mutate the response) policies := []policy.Policy{policyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } if !cp.Telemetry.Disabled { policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 00000000000..e5309aa6c15 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go index faf175e3fd2..30a02a7a41b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -9,7 +9,7 @@ package runtime import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "sort" "strings" @@ -210,7 +210,7 @@ func writeReqBody(req *policy.Request, b *bytes.Buffer) error { if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { return nil } - body, err := ioutil.ReadAll(req.Raw().Body) + body, err := io.ReadAll(req.Raw().Body) if err != nil { fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) return err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index 9d630e47125..b3300201872 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -31,18 +31,21 @@ func setDefaults(o *policy.RetryOptions) { } else if o.MaxRetries < 0 { o.MaxRetries = 0 } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds if o.MaxRetryDelay == 0 { - o.MaxRetryDelay = 120 * time.Second + o.MaxRetryDelay = 60 * time.Second } else if o.MaxRetryDelay < 0 { // not really an unlimited cap, but sufficiently large enough to be considered as such o.MaxRetryDelay = math.MaxInt64 } if o.RetryDelay == 0 { - o.RetryDelay = 4 * time.Second + o.RetryDelay = 800 * time.Millisecond } else if o.RetryDelay < 0 { o.RetryDelay = 0 } if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go o.StatusCodes = []int{ http.StatusRequestTimeout, // 408 http.StatusTooManyRequests, // 429 @@ -106,7 +109,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { try := int32(1) for { resp = nil // reset - log.Writef(log.EventRetryPolicy, "\n=====> Try=%d %s %s", try, req.Raw().Method, req.Raw().URL.String()) + log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because // the stream may not be at offset 0 when we first get it and we want the same behavior for the @@ -145,6 +148,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { if err == nil && !HasStatusCode(resp, options.StatusCodes...) { // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") return } else if ctxErr := req.Raw().Context().Err(); ctxErr != nil { // don't retry if the parent context has been cancelled or its deadline exceeded @@ -167,14 +171,19 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { return } - // drain before retrying so nothing is leaked - Drain(resp) - // use the delay from retry-after if available delay := shared.RetryAfter(resp) if delay <= 0 { delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return } + + // drain before retrying so nothing is leaked + Drain(resp) + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) select { case <-time.After(delay): diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 21e5c578d54..98e00718488 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -15,6 +15,8 @@ import ( "fmt" "io" "mime/multipart" + "os" + "path" "reflect" "strings" "time" @@ -37,6 +39,7 @@ const ( ) // NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { return exported.NewRequest(ctx, httpMethod, endpoint) } @@ -55,19 +58,23 @@ func JoinPaths(root string, paths ...string) string { root, qps = splitPath[0], splitPath[1] } - for i := 0; i < len(paths); i++ { - root = strings.TrimRight(root, "/") - paths[i] = strings.TrimLeft(paths[i], "/") - root += "/" + paths[i] + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" } if qps != "" { - if !strings.HasSuffix(root, "/") { - root += "/" - } - return root + "?" + qps + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p } - return root + return root + p } // EncodeByteArray will base-64 encode the byte slice v. @@ -88,7 +95,9 @@ func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) er // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. func MarshalAsJSON(req *policy.Request, v interface{}) error { - v = cloneWithoutReadOnlyFields(v) + if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { + v = cloneWithoutReadOnlyFields(v) + } b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -113,16 +122,30 @@ func MarshalAsXML(req *policy.Request, v interface{}) error { func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { body := bytes.Buffer{} writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + for k, v := range formData { if rsc, ok := v.(io.ReadSeekCloser); ok { - // this is the body to upload, the key is its file name - fd, err := writer.CreateFormFile(k, k) - if err != nil { + if err := writeContent(k, k, rsc); err != nil { return err } - // copy the data to the form file - if _, err = io.Copy(fd, rsc); err != nil { - return err + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } } continue } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index 2322f0a201b..f86ec0b95ea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -13,7 +13,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -86,7 +85,7 @@ func UnmarshalAsXML(resp *http.Response, v interface{}) error { // Drain reads the response body to completion then closes it. The bytes read are discarded. func Drain(resp *http.Response) { if resp != nil && resp.Body != nil { - _, _ = io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) resp.Body.Close() } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 00000000000..80282d4ab0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 00000000000..75f757cedd3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified name and version. +// - name - the name of the tracer object, typically the fully qualified name of the service client +// - version - the version of the module in which the service client resides +func (p Provider) NewTracer(name, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(name, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // for future expansion +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + return Tracer{ + newSpanFn: newSpanFn, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + return t.newSpanFn(ctx, spanName, options) + } + return ctx, Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // AddError contains the implementation for the Span.AddError method. + AddError func(err error) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// AddError adds the specified error event to the span. +func (s Span) AddError(err error) { + if s.impl.AddError != nil { + s.impl.AddError(err) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 670839fd441..5877e476f6f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,11 +1,41 @@ # Release History +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + ## 1.1.0 (2022-06-07) ### Features Added * `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) ## 1.0.1 (2022-06-07) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 68b35a545c3..2df42c813a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -133,8 +133,9 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- -|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.2.0-beta.2#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion |[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret ### Authenticating Users @@ -168,7 +169,8 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |-|- |`AZURE_CLIENT_ID`|ID of an Azure Active Directory application |`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant -|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key (without password protection) +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any #### Username and password diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 1e28d181fef..affa91d0874 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -88,6 +88,7 @@ azlog.SetEvents(azidentity.EventAuthentication) |---|---|---| |Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + ## Troubleshoot ClientSecretCredential authentication issues | Error Code | Issue | Mitigation | @@ -96,6 +97,7 @@ azlog.SetEvents(azidentity.EventAuthentication) |AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| |AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | @@ -103,12 +105,14 @@ azlog.SetEvents(azidentity.EventAuthentication) |AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| |AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + ## Troubleshoot UsernamePasswordCredential authentication issues | Error Code | Issue | Mitigation | |---|---|---| |AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + ## Troubleshoot ManagedIdentityCredential authentication issues `ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. @@ -164,6 +168,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio |---|---|---| |"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + ## Troubleshoot AzureCliCredential authentication issues | Error Message |Description| Mitigation | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 0faee55ef04..60c3b9a1ec6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -11,7 +11,6 @@ import ( "context" "errors" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -26,9 +25,15 @@ import ( ) const ( - azureAuthorityHost = "AZURE_AUTHORITY_HOST" - azureClientID = "AZURE_CLIENT_ID" - azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" organizationsTenantID = "organizations" developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" @@ -36,6 +41,37 @@ const ( tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" ) +func getConfidentialClient(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidential.Client, error) { + if !validTenantID(tenantID) { + return confidential.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return confidential.Client{}, err + } + o := []confidential.Option{ + confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + confidential.WithHTTPClient(newPipelineAdapter(co)), + } + o = append(o, additionalOpts...) + return confidential.New(clientID, cred, o...) +} + +func getPublicClient(clientID, tenantID string, co *azcore.ClientOptions) (public.Client, error) { + if !validTenantID(tenantID) { + return public.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return public.Client{}, err + } + return public.New(clientID, + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(co)), + ) +} + // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user // 2. value of AZURE_AUTHORITY_HOST @@ -94,7 +130,7 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { if rsc, ok := r.Body.(io.ReadSeekCloser); ok { body = rsc } else { - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 00000000000..ffcf2094be2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,74 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. ClientCertificateCredential has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Azure AD documentation] for details of the assertion format. +// +// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client confidentialClient +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions) + if err != nil { + return nil, err + } + return &ClientAssertionCredential{client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameAssertion + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameAssertion, err) + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index e50157b104d..a61d824ef59 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -9,17 +9,12 @@ package azidentity import ( "context" "crypto" - "crypto/rsa" - "crypto/sha1" "crypto/x509" - "encoding/base64" "encoding/pem" "errors" - "os" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "golang.org/x/crypto/pkcs12" ) @@ -46,37 +41,18 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x if len(certs) == 0 { return nil, errors.New("at least one certificate is required") } - pk, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("'key' must be an *rsa.PrivateKey") - } - if !validTenantID(tenantID) { - return nil, errors.New(tenantIDValidationErr) - } if options == nil { options = &ClientCertificateCredentialOptions{} } - authorityHost, err := setAuthorityHost(options.Cloud) - if err != nil { - return nil, err - } - cert, err := newCertContents(certs, pk, options.SendCertificateChain) + cred, err := confidential.NewCredFromCertChain(certs, key) if err != nil { return nil, err } - cred := confidential.NewCredFromCert(cert.c, key) // TODO: NewCredFromCert should take a slice - if err != nil { - return nil, err - } - o := []confidential.Option{ - confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), - confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), - } + var o []confidential.Option if options.SendCertificateChain { o = append(o, confidential.WithX5C()) } - c, err := confidential.New(clientID, cred, o...) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) if err != nil { return nil, err } @@ -156,36 +132,6 @@ func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, c return certs, pk, nil } -type certContents struct { - c *x509.Certificate // the signing cert - fp []byte // the signing cert's fingerprint, a SHA-1 digest - pk *rsa.PrivateKey // the signing key - x5c []string // concatenation of every provided cert, base64 encoded -} - -func newCertContents(certs []*x509.Certificate, key *rsa.PrivateKey, sendCertificateChain bool) (*certContents, error) { - cc := certContents{pk: key} - // need the the signing cert's fingerprint: identify that cert by matching its public key to the private key - for _, cert := range certs { - certKey, ok := cert.PublicKey.(*rsa.PublicKey) - if ok && key.E == certKey.E && key.N.Cmp(certKey.N) == 0 { - fp := sha1.Sum(cert.Raw) - cc.fp = fp[:] - cc.c = cert - if sendCertificateChain { - // signing cert must be first in x5c - cc.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cc.x5c...) - } - } else if sendCertificateChain { - cc.x5c = append(cc.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) - } - } - if len(cc.fp) == 0 || cc.c == nil { - return nil, errors.New("found no certificate matching 'key'") - } - return &cc, nil -} - func loadPEMCert(certData []byte) ([]*pem.Block, error) { blocks := []*pem.Block{} for { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index 6ecb8f4db81..1c3a516601b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -9,11 +9,9 @@ package azidentity import ( "context" "errors" - "os" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -31,25 +29,14 @@ type ClientSecretCredential struct { // NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { - if !validTenantID(tenantID) { - return nil, errors.New(tenantIDValidationErr) - } if options == nil { options = &ClientSecretCredentialOptions{} } - authorityHost, err := setAuthorityHost(options.Cloud) - if err != nil { - return nil, err - } cred, err := confidential.NewCredFromSecret(clientSecret) if err != nil { return nil, err } - c, err := confidential.New(clientID, cred, - confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), - confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), - ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 7358558acb5..c2b801c4a6d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -31,9 +31,11 @@ type DefaultAzureCredentialOptions struct { // DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. // It combines credentials suitable for deployment with credentials suitable for local development. // It attempts to authenticate with each of these credential types, in the following order, stopping when one provides a token: -// EnvironmentCredential -// ManagedIdentityCredential -// AzureCLICredential +// +// EnvironmentCredential +// ManagedIdentityCredential +// AzureCLICredential +// // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for // every subsequent authentication. @@ -65,7 +67,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default msiCred, err := NewManagedIdentityCredential(o) if err == nil { creds = append(creds, msiCred) - msiCred.client.imdsTimeout = time.Second + msiCred.mic.imdsTimeout = time.Second } else { errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index d0c72c34854..2e9b5438dbd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -79,17 +78,7 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp = *options } cp.init() - if !validTenantID(cp.TenantID) { - return nil, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(cp.Cloud) - if err != nil { - return nil, err - } - c, err := public.New(cp.ClientID, - public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)), - public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)), - ) + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 16c595d1d37..b1871b4d4d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -28,7 +28,7 @@ type EnvironmentCredentialOptions struct { // EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending // on environment variable configuration. It reads configuration from these variables, in the following order: // -// Service principal with client secret +// # Service principal with client secret // // AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. // @@ -36,15 +36,17 @@ type EnvironmentCredentialOptions struct { // // AZURE_CLIENT_SECRET: one of the service principal's client secrets // -// Service principal with certificate +// # Service principal with certificate // // AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. // // AZURE_CLIENT_ID: the service principal's client ID // -// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the unencrypted private key. +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. // -// User with username and password +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// # User with username and password // // AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". // @@ -62,7 +64,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme if options == nil { options = &EnvironmentCredentialOptions{} } - tenantID := os.Getenv("AZURE_TENANT_ID") + tenantID := os.Getenv(azureTenantID) if tenantID == "" { return nil, errors.New("missing environment variable AZURE_TENANT_ID") } @@ -70,7 +72,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme if clientID == "" { return nil, errors.New("missing environment variable " + azureClientID) } - if clientSecret := os.Getenv("AZURE_CLIENT_SECRET"); clientSecret != "" { + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") o := &ClientSecretCredentialOptions{ClientOptions: options.ClientOptions} cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) @@ -79,13 +81,17 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme } return &EnvironmentCredential{cred: cred}, nil } - if certPath := os.Getenv("AZURE_CLIENT_CERTIFICATE_PATH"); certPath != "" { + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") certData, err := os.ReadFile(certPath) if err != nil { return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) } - certs, key, err := ParseCertificates(certData, nil) + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) if err != nil { return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) } @@ -99,8 +105,8 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme } return &EnvironmentCredential{cred: cred}, nil } - if username := os.Getenv("AZURE_USERNAME"); username != "" { - if password := os.Getenv("AZURE_PASSWORD"); password != "" { + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") o := &UsernamePasswordCredentialOptions{ClientOptions: options.ClientOptions} cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index c60d13d0071..6695f1b70e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -75,6 +75,22 @@ func (e *AuthenticationFailedError) Error() string { fmt.Fprint(msg, "Response contained no body") } fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } return msg.String() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index e4aaf45b6dd..9032ae9886a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -56,17 +55,7 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp = *options } cp.init() - if !validTenantID(cp.TenantID) { - return nil, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(cp.Cloud) - if err != nil { - return nil, err - } - c, err := public.New(cp.ClientID, - public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)), - public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)), - ) + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index ce6e1e61474..c9b72663c27 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -11,7 +11,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" "net/url" "os" @@ -24,6 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) const ( @@ -149,10 +149,18 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag return &c, nil } -// authenticate creates an authentication request for a Managed Identity and returns the resulting Access Token if successful. -// ctx: The current context for controlling the request lifetime. -// clientID: The client (application) ID of the service principal. -// scopes: The scopes required for the token. +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { var cancel context.CancelFunc if c.imdsTimeout > 0 && c.msiType == msiTypeIMDS { @@ -338,7 +346,7 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour if pos == -1 { return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) } - key, err := ioutil.ReadFile(header[pos+1:]) + key, err := os.ReadFile(header[pos+1:]) if err != nil { return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index f17ada1c3ed..18078171ee8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) const credNameManagedIdentity = "ManagedIdentityCredential" @@ -70,8 +71,8 @@ type ManagedIdentityCredentialOptions struct { // user-assigned identity. See Azure Active Directory documentation for more information about managed identities: // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { - id ManagedIDKind - client *managedIdentityClient + client confidentialClient + mic *managedIdentityClient } // NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. @@ -79,11 +80,25 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M if options == nil { options = &ManagedIdentityCredentialOptions{} } - client, err := newManagedIdentityClient(options) + mic, err := newManagedIdentityClient(options) if err != nil { return nil, err } - return &ManagedIdentityCredential{id: options.ID, client: client}, nil + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + if err != nil { + return nil, err + } + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + c, err := confidential.New(clientID, cred) + if err != nil { + return nil, err + } + return &ManagedIdentityCredential{client: c, mic: mic}, nil } // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. @@ -94,12 +109,17 @@ func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.To } // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here scopes := []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - tk, err := c.client.authenticate(ctx, c.id, scopes) + ar, err := c.client.AcquireTokenSilent(ctx, scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, nil + } + ar, err = c.client.AcquireTokenByCredential(ctx, scopes) if err != nil { return azcore.AccessToken{}, err } logGetTokenSuccess(c, opts) - return tk, err + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 8b02e7b47ba..2ab248c3c61 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -37,20 +36,10 @@ type UsernamePasswordCredential struct { // NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user // will authenticate to. Pass nil for options to accept defaults. func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { - if !validTenantID(tenantID) { - return nil, errors.New(tenantIDValidationErr) - } if options == nil { options = &UsernamePasswordCredentialOptions{} } - authorityHost, err := setAuthorityHost(options.Cloud) - if err != nil { - return nil, err - } - c, err := public.New(clientID, - public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - public.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), - ) + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 0fb125ace9e..9757589d166 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -11,5 +11,5 @@ const ( component = "azidentity" // Version is the semantic version (see http://semver.org) of this module. - version = "v1.1.0" + version = "v1.2.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go index 1fdc53615b6..245af7d2bec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -15,7 +15,7 @@ import ( // Caller returns the file and line number of a frame on the caller's stack. // If the funtion fails an empty string is returned. // skipFrames - the number of frames to skip when determining the caller. -// Passing a value of 0 will return the immediate caller of this function. +// Passing a value of 0 will return the immediate caller of this function. func Caller(skipFrames int) string { if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { // the skipFrames + 1 is to skip ourselves diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go index b23f3860c5e..238ef42ed03 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -49,9 +49,12 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration const backoff = 30 * time.Second // Minimum wait time between eager update attempts - now, acquire, expired, resource := time.Now(), false, false, er.resource + now, acquire, expired := time.Now(), false, false + // acquire exclusive lock er.cond.L.Lock() + resource := er.resource + for { expired = er.expiration.IsZero() || er.expiration.Before(now) if expired { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index db095b3a26a..2c2008679be 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,26 +1,59 @@ # Release History +## 0.5.1 (2022-10-11) + +### Bugs Fixed + +* `GetSASURL()`: for container and blob clients, don't add a forward slash before the query string +* Fixed issue [#19249](https://github.com/Azure/azure-sdk-for-go/issues/19249) by increasing service version to '2020-02-10'. + +### Other Changes + +* Improved docs for client constructors. +* Updating azcore version to 1.1.4 + +## 0.5.0 (2022-09-29) + +### Breaking Changes + +* Complete architectural change for better user experience. Please view the [README](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme) + +### Features Added + +* Added [UserDelegationCredential](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas) which resolves [#18976](https://github.com/Azure/azure-sdk-for-go/issues/18976), [#16916](https://github.com/Azure/azure-sdk-for-go/issues/16916), [#18977](https://github.com/Azure/azure-sdk-for-go/issues/18977) +* Added [Restore Container API](https://learn.microsoft.com/rest/api/storageservices/restore-container). + +### Bugs Fixed + +* Fixed issue [#18767](https://github.com/Azure/azure-sdk-for-go/issues/18767) +* Fix deadlock when error writes are slow [#16937](https://github.com/Azure/azure-sdk-for-go/pull/16937) + ## 0.4.1 (2022-05-12) ### Other Changes + * Updated to latest `azcore` and `internal` modules ## 0.4.0 (2022-04-19) ### Breaking Changes + * Fixed Issue #17150 : Renaming/refactoring high level methods. * Fixed Issue #16972 : Constructors should return clients by reference. -* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags remains the same. +* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags + remains the same. ### Bugs Fixed + * Fixed Issue #17515 : SetTags options bag missing leaseID. * Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. * Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. * Fixed Issue #17188 : `BlobURLParts` not supporting VersionID -* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods ignoring the options bag. +* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods + ignoring the options bag. * Fixed Issue #16920 : Fixing error handling example. * Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. -* Fixed Issue #16679 : Response parsing issue in List blobs API. +* Fixed Issue #16679 : Response parsing issue in List blobs API. ## 0.3.0 (2022-02-09) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 32a10a005c1..467fe36cd90 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,397 +1,274 @@ # Azure Blob Storage SDK for Go -## Introduction +> Server Version: 2020-10-02 -The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud -storage. This is the new beta client module for Azure Blob Storage, which follows -our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the -previous beta [azblob package](https://github.com/azure/azure-storage-blob-go). +Azure Blob storage is Microsoft's object storage solution for the cloud. Blob +storage is optimized for storing massive amounts of unstructured data. +Unstructured data is data that does not adhere to a particular data model or +definition, such as text or binary data. -## Getting Started +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] -The Azure Blob SDK can access an Azure Storage account. - -### Prerequisites - -* Go versions 1.18 or higher -* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use - the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group` - and `mystorageaccount` with your own unique names): - (Optional) if you want a new resource group to hold the Storage Account: - ``` - az group create --name my-resource-group --location westus2 - ``` - Create the storage account: - ``` - az storage account create --resource-group my-resource-group --name mystorageaccount - ``` - - The storage account name can be queried with: - ``` - az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" - ``` - You can set this as an environment variable with: - ```bash - # PowerShell - $ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" - # bash - export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" - ``` - - Query your storage account keys: - ``` - az storage account keys list --resource-group my-resource-group -n mystorageaccount - ``` - - Output: - ```json - [ - { - "creationTime": "2022-02-07T17:18:44.088870+00:00", - "keyName": "key1", - "permissions": "FULL", - "value": "..." - }, - { - "creationTime": "2022-02-07T17:18:44.088870+00:00", - "keyName": "key2", - "permissions": "FULL", - "value": "..." - } - ] - ``` - - ```bash - # PowerShell - $ENV:AZURE_STORAGE_ACCOUNT_KEY="" - # Bash - export AZURE_STORAGE_ACCOUNT_KEY="" - ``` - > You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account. - -#### Create account - -* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account] - , [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account]. +## Getting started ### Install the package -* Install the Azure Blob Storage client module for Go with `go get`: +Install the Azure Blob Storage SDK for Go with [go get][goget]: -```bash +```Powershell go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob ``` -> Optional: If you are going to use AAD authentication, install the `azidentity` package: - -```bash +If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. +```Powershell go get github.com/Azure/azure-sdk-for-go/sdk/azidentity ``` -#### Create the client - -`azblob` allows you to interact with three types of resources :- - -* [Azure storage accounts][azure_storage_account]. -* [Containers](https://azure.microsoft.com/en-in/overview/what-is-a-container/#overview) within those storage accounts. -* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs) - within those containers. +### Prerequisites -Interaction with these resources starts with an instance of a [client](#clients). To create a client object, you will -need the account's blob service endpoint URL and a credential that allows you to access the account. The `endpoint` can -be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" -section or by running the following Azure CLI command: +A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). -```bash -# Get the blob service URL for the account -az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" -``` +You need an [Azure subscription][azure_sub] and a +[Storage Account][storage_account_docs] to use this package. -Once you have the account URL, it can be used to create the service client: +To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: -```golang -cred, err := azblob.NewSharedKeyCredential("myAccountName", "myAccountKey") -handle(err) -serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", cred, nil) -handle(err) +```Powershell +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS ``` -For more information about blob service URL's and how to configure custom domain names for Azure Storage check out -the [official documentation][azure_portal_account_url] - -#### Types of credentials +### Authenticate the client -The azblob clients support authentication via Shared Key Credential, Connection String, Shared Access Signature, or any -of the `azidentity` types that implement the `azcore.TokenCredential` interface. +In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. -##### 1. Creating the client from a shared key - -To use an account [shared key][azure_shared_key] (aka account key or access key), provide the key as a string. This can -be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" section or by -running the following Azure CLI command: +```go +// create a credential for authenticating with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle err -```bash -az storage account keys list -g my-resource-group -n mystorageaccount +// create an azblob.Client for the specified storage account that uses the above credential +client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/", cred, nil) +// TODO: handle err ``` -Use Shared Key authentication as the credential parameter to authenticate the client: +Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). -```golang -credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") -handle(err) -serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", credential, nil) -handle(err) -``` +## Key concepts -##### 2. Creating the client from a connection string +Blob storage is designed for: -You can use connection string, instead of providing the account URL and credential separately, for authentication as -well. To do this, pass the connection string to the client's `NewServiceClientFromConnectionString` method. The -connection string can be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access -Keys" section or with the following Azure CLI command: +- Serving images or documents directly to a browser. +- Storing files for distributed access. +- Streaming video and audio. +- Writing to log files. +- Storing data for backup and restore, disaster recovery, and archiving. +- Storing data for analysis by an on-premises or Azure-hosted service. -```bash -az storage account show-connection-string -g my-resource-group -n mystorageaccount -``` +Blob storage offers three types of resources: -```golang -connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" -serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) -``` +- The _storage account_ +- One or more _containers_ in a storage account +- One ore more _blobs_ in a container -##### 3. Creating the client from a SAS token - -To use a [shared access signature (SAS) token][azure_sas_token], provide the token as a string. You can generate a SAS -token from the Azure Portal -under [Shared access signature](https://docs.microsoft.com/rest/api/storageservices/create-service-sas) or use -the `ServiceClient.GetSASToken` or `ContainerClient.GetSASToken()` methods. - -```golang -credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") -handle(err) -serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), credential, nil) -handle(err) -// Provide the convenience function with relevant info (services, resource types, permissions, and duration) -// The SAS token will be valid from this moment onwards. -accountSAS, err := serviceClient.GetSASToken(AccountSASResourceTypes{Object: true, Service: true, Container: true}, -AccountSASPermissions{Read: true, List: true}, AccountSASServices{Blob: true}, time.Now(), time.Now().Add(48*time.Hour)) -handle(err) -sasURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", accountName, accountSAS) - -// The sasURL can be used to authenticate a client without need for a credential -serviceClient, err = NewServiceClientWithNoCredential(sasURL, nil) -handle(err) -``` +Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. +The storage account is specified when the `azblob.Client` is constructed. +Use the appropriate client constructor function for the authentication mechanism you wish to use. -### Clients +Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go) -Three different clients are provided to interact with the various components of the Blob Service: +### Goroutine safety +We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. -1. **`ServiceClient`** - * Get and set account settings. - * Query, create, and delete containers within the account. +### About blob metadata +Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. -2. **`ContainerClient`** - * Get and set container access settings, properties, and metadata. - * Create, delete, and query blobs within the container. - * `ContainerLeaseClient` to support container lease management. +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + -3. **`BlobClient`** - * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` - * Get and set blob properties. - * Perform CRUD operations on a given blob. - * `BlobLeaseClient` to support blob lease management. +## Examples -### Example +### Uploading a blob ```go -// Use your storage account's name and key to create a credential object, used to access your account. -// You can obtain these details from the Azure Portal. -accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") -if !ok { - handle(errors.New("AZURE_STORAGE_ACCOUNT_NAME could not be found")) -} - -accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") -if !ok { - handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found")) -} -cred, err := NewSharedKeyCredential(accountName, accountKey) -handle(err) - -// Open up a service client. -// You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://.blob.core.windows.net/ -service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) -handle(err) - -// All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout. -ctx := context.Background() // This example has no expiry. - -// This example showcases several common operations to help you get started, such as: - -// ===== 1. Creating a container ===== - -// First, branch off of the service client and create a container client. -container := service.NewContainerClient("mycontainer") - -// Then, fire off a create operation on the container client. -// Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc. -// Specifying nil omits all options. -_, err = container.Create(ctx, nil) -handle(err) - -// ===== 2. Uploading/downloading a block blob ===== -// We'll specify our data up-front, rather than reading a file for simplicity's sake. -data := "Hello world!" - -// Branch off of the container into a block blob client -blockBlob := container.NewBlockBlobClient("HelloWorld.txt") - -// Upload data to the block blob -_, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil) -handle(err) - -// Download the blob's contents and ensure that the download worked properly -get, err := blockBlob.Download(ctx, nil) -handle(err) - -// Open a buffer, reader, and then download! -downloadedData := &bytes.Buffer{} -// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. -reader := get.Body(RetryReaderOptions{}) -_, err = downloadedData.ReadFrom(reader) -handle(err) -err = reader.Close() -handle(err) -if data != downloadedData.String() { - handle(errors.New("downloaded data doesn't match uploaded data")) -} +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" + blobName = "sample-blob" + sampleFile = "path/to/sample/file" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// open the file for reading +file, err := os.OpenFile(sampleFile, os.O_RDONLY, 0) +// TODO: handle error +defer file.Close() + +// upload the file to the specified container with the specified blob name +_, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) +// TODO: handle error +``` -// ===== 3. list blobs ===== -// The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel. -// You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel. -// The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout. -pager := container.ListBlobsFlat(nil) +### Downloading a blob -for pager.NextPage(ctx) { - resp := pager.PageResponse() +```go +// this example accesses a public blob via anonymous access, so no credentials are required +client, err := azblob.NewClientWithNoCredential("https://azurestoragesamples.blob.core.windows.net/", nil) +// TODO: handle error + +// create or open a local file where we can download the blob +file, err := os.Create("cloud.jpg") +// TODO: handle error +defer file.Close() + +// download the blob +_, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) +// TODO: handle error +``` - for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems { - fmt.Println(*v.Name) - } -} +### Enumerating blobs -if err = pager.Err(); err != nil { - handle(err) +```go +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// blob listings are returned across multiple pages +pager := client.NewListBlobsFlatPager(containerName, nil) + +// continue fetching pages until no more remain +for pager.More() { + // advance to the next page + page, err := pager.NextPage(context.TODO()) + // TODO: handle error + + // print the blob names for this page + for _, blob := range page.Segment.BlobItems { + fmt.Println(*blob.Name) + } } - -// Delete the blob we created earlier. -_, err = blockBlob.Delete(ctx, nil) -handle(err) - -// Delete the container we created earlier. -_, err = container.Delete(ctx, nil) -handle(err) ``` ## Troubleshooting -### Error Handling - -All I/O operations will return an `error` that can be investigated to discover more information about the error. In -addition, you can investigate the raw response of any response object: +All Blob service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [bloberror][blob_error] package provides the possible Storage error codes +along with various helper facilities for error handling. -```golang -var storageErr *azblob.StorageError -resp, err := serviceClient.CreateContainer(context.Background(), "testcontainername", nil) -if err != nil && errors.As(err, &storageErr) { - // do something with storageErr.Response() +```go +const ( + connectionString = "" + containerName = "sample-container" +) + +// create a client with the provided connection string +client, err := azblob.NewClientFromConnectionString(connectionString, nil) +// TODO: handle error + +// try to delete the container, avoiding any potential race conditions with an in-progress or completed deletion +_, err = client.DeleteContainer(context.TODO(), containerName, nil) + +if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNotFound) { + // ignore any errors if the container is being deleted or already has been deleted +} else if err != nil { + // TODO: some other error } ``` -### Logging - -This module uses the classification based logging implementation in azcore. To turn on logging -set `AZURE_SDK_GO_LOGGING` to `all`. +## Next steps -If you only want to include logs for `azblob`, you must create your own logger and set the log classification -as `LogCredential`. +Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. -To obtain more detailed logging, including request/response bodies and header values, make sure to leave the logger as -default or enable the `LogRequest` and/or `LogResponse` classificatons. A logger that only includes credential logs can -be like the following: +### Specialized clients -```golang -import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" -// Set log to output to the console -azlog.SetListener(func (cls azlog.Classification, msg string) { - fmt.Println(msg) // printing log out to the console -}) +The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages. +Use these clients when you need to interact with a specific kind of blob. +Learn more about the various types of blobs from the following links. -// Includes only requests and responses in credential logs -azlog.SetClassifications(azlog.Request, azlog.Response) -``` +- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs) +- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs) +- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs) -> CAUTION: logs from credentials contain sensitive information. -> These logs must be protected to avoid compromising account security. -> +The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. -## License +The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. -This project is licensed under MIT. +The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more. -## Provide Feedback +The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more. -If you encounter bugs or have suggestions, please -[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Azure.AzBlob` label. +The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens. +See the package's documentation for more information. ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License -Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For -details, visit https://cla.microsoft.com. +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate -the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to -do this once across all repos using our CLA. +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. For +details, visit [cla.microsoft.com][cla]. -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. +For more information see the [Code of Conduct FAQ][coc_faq] +or contact [opencode@microsoft.com][coc_contact] with any +additional questions or comments. +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png) - -[azure_subscription]:https://azure.microsoft.com/free/ - -[azure_storage_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal - -[azure_portal_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal - -[azure_powershell_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-powershell - -[azure_cli_create_account]: https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-cli - -[azure_cli_account_url]:https://docs.microsoft.com/cli/azure/storage/account?view=azure-cli-latest#az-storage-account-show - -[azure_powershell_account_url]:https://docs.microsoft.com/powershell/module/az.storage/get-azstorageaccount?view=azps-4.6.1 - -[azure_portal_account_url]:https://docs.microsoft.com/azure/storage/common/storage-account-overview#storage-account-endpoints - -[azure_sas_token]:https://docs.microsoft.com/azure/storage/common/storage-sas-overview - -[azure_shared_key]:https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key - -[azure_core_ref_docs]:https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore - -[azure_core_readme]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azcore/README.md - -[blobs_error_codes]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes - -[msft_oss_coc]:https://opensource.microsoft.com/codeofconduct/ - -[msft_oss_coc_faq]:https://opensource.microsoft.com/codeofconduct/faq/ - -[contact_msft_oss]:mailto:opencode@microsoft.com - -[blobs_rest]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://docs.microsoft.com/cli/azure +[azure_sub]: https://azure.microsoft.com/free/ +[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go +[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go +[blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blob/client.go +[blob_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/bloberror/error_codes.go +[block_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blockblob/client.go +[container]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/container/client.go +[lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/lease +[page_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/pageblob/client.go +[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/sas +[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/service/client.go +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go new file mode 100644 index 00000000000..a6c204ac661 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -0,0 +1,276 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "context" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a client to an Azure Storage append blob; +type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (ab *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return (*blob.Client)(innerBlob) +} + +func (ab *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) +} + +func (ab *Client) generated() *generated.AppendBlobClient { + _, appendBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return appendBlob +} + +// URL returns the URL endpoint used by the Client object. +func (ab *Client) URL() string { + return ab.generated().Endpoint() +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (ab *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil +} + +// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab *Client) Create(ctx context.Context, o *CreateOptions) (CreateResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + resp, err := ab.generated().Create(ctx, 0, opts, httpHeaders, leaseAccessConditions, cpkInfo, + cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *AppendBlockOptions) (AppendBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return AppendBlockResponse{}, nil + } + + appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format() + + resp, err := ab.generated().AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + + return resp, err +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) { + appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + // content length should be 0 on * from URL. always. It's a 400 if it isn't. + resp, err := ab.generated().AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, + leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + return resp, err +} + +// Seal - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. +// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal +func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) { + leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format() + resp, err := ab.generated().Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) + return resp, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (ab *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return ab.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return ab.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return ab.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return ab.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return ab.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (ab *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return ab.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (ab *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return ab.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (ab *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return ab.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (ab *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return ab.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return ab.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return ab.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (ab *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return ab.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (ab *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return ab.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (ab *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return ab.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go new file mode 100644 index 00000000000..69faf6ad654 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// AppendPositionAccessConditions contains a group of parameters for the Client.AppendBlock method. +type AppendPositionAccessConditions = generated.AppendPositionAccessConditions + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions provides set of configurations for Create Append Blob operation +type CreateOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + AccessConditions *blob.AccessConditions + + HTTPHeaders *blob.HTTPHeaders + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string +} + +func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := generated.AppendBlobClientCreateOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method. +type AppendBlockOptions struct { + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + AccessConditions *blob.AccessConditions +} + +func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &generated.AppendBlobClientAppendBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. +type AppendBlockFromURLOptions struct { + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + AccessConditions *blob.AccessConditions + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange +} + +func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, + *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &generated.AppendBlobClientAppendBlockFromURLOptions{ + SourceRange: exported.FormatHTTPRange(o.Range), + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SealOptions provides set of configurations for SealAppendBlob operation +type SealOptions struct { + AccessConditions *blob.AccessConditions + AppendPositionAccessConditions *AppendPositionAccessConditions +} + +func (o *SealOptions) format() (*generated.LeaseAccessConditions, + *generated.ModifiedAccessConditions, *generated.AppendPositionAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, modifiedAccessConditions, o.AppendPositionAccessConditions + +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go new file mode 100644 index 00000000000..e9ab4a3ccdc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go @@ -0,0 +1,23 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.AppendBlobClientCreateResponse + +// AppendBlockResponse contains the response from method Client.AppendBlock. +type AppendBlockResponse = generated.AppendBlobClientAppendBlockResponse + +// AppendBlockFromURLResponse contains the response from method Client.AppendBlockFromURL. +type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLResponse + +// SealResponse contains the response from method Client.Seal. +type SealResponse = generated.AppendBlobClientSealResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md deleted file mode 100644 index 0a391904aac..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md +++ /dev/null @@ -1,171 +0,0 @@ -# Code Generation - Azure Blob SDK for Golang - - - -```bash -cd swagger -autorest autorest.md -gofmt -w generated/* -``` - -### Settings - -```yaml -go: true -clear-output-folder: false -version: "^3.0.0" -license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" -module: "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" -credential-scope: "https://storage.azure.com/.default" -output-folder: internal/ -file-prefix: "zz_generated_" -openapi-type: "data-plane" -verbose: true -security: AzureKey -module-version: "0.3.0" -modelerfour: - group-parameters: false - seal-single-value-enum-by-default: true - lenient-model-deduplication: true -export-clients: false -use: "@autorest/go@4.0.0-preview.36" -``` - -### Fix BlobMetadata. - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.BlobMetadata["properties"]; - -``` - -### Don't include container name or blob in path - we have direct URIs. - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"] - transform: > - for (const property in $) - { - if (property.includes('/{containerName}/{blob}')) - { - $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); - } - else if (property.includes('/{containerName}')) - { - $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); - } - } -``` - -### Remove DataLake stuff. - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"] - transform: > - for (const property in $) - { - if (property.includes('filesystem')) - { - delete $[property]; - } - } -``` - -### Remove DataLakeStorageError - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.DataLakeStorageError; -``` - -### Fix 304s - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"]["/{containerName}/{blob}"] - transform: > - $.get.responses["304"] = { - "description": "The condition specified using HTTP conditional header(s) is not met.", - "x-az-response-name": "ConditionNotMetError", - "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } - }; -``` - -### Fix GeoReplication - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.GeoReplication.properties.Status["x-ms-enum"]; - $.GeoReplication.properties.Status["x-ms-enum"] = { - "name": "BlobGeoReplicationStatus", - "modelAsString": false - }; -``` - -### Fix RehydratePriority - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.RehydratePriority["x-ms-enum"]; - $.RehydratePriority["x-ms-enum"] = { - "name": "RehydratePriority", - "modelAsString": false - }; -``` - -### Fix BlobDeleteType - -``` yaml -directive: -- from: swagger-document - where: $.parameters - transform: > - delete $.BlobDeleteType.enum; - $.BlobDeleteType.enum = [ - "None", - "Permanent" - ]; -``` - -### Fix EncryptionAlgorithm - -``` yaml -directive: -- from: swagger-document - where: $.parameters - transform: > - delete $.EncryptionAlgorithm.enum; - $.EncryptionAlgorithm.enum = [ - "None", - "AES256" - ]; -``` - -### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; - delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; -``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go new file mode 100644 index 00000000000..1b7cf6d865a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -0,0 +1,423 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "errors" + "io" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client base.Client[generated.BlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (b *Client) generated() *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (b *Client) URL() string { + return b.generated().Endpoint() +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (b *Client) WithVersionID(versionID string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) { + deleteOptions, leaseInfo, accessConditions := o.format() + resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions) + return resp, err +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) { + undeleteOptions := o.format() + resp, err := b.generated().Undelete(ctx, undeleteOptions) + return resp, err +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format() + resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + return resp, err +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *SetMetadataOptions) (SetMetadataResponse, error) { + basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} + leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() + resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + return resp, err +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this + // performance hit. + opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions) + + return resp, err +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { + opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions) + return resp, err +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) { + serializedTags := shared.SerializeBlobTags(tags) + blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) { + blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err + +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { + copyOptions, smac, mac, lac := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, expiry time.Time) (string, error) { + if b.sharedKey() == nil { + return "", errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := ParseURL(b.URL()) + if err != nil { + return "", err + } + + t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) + + if err != nil { + t = time.Time{} + } + + qps, err := sas.BlobSignatureValues{ + ContainerName: urlParts.ContainerName, + BlobName: urlParts.BlobName, + SnapshotTime: t, + Version: sas.Version, + + Permissions: permissions.String(), + + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(b.sharedKey()) + + if err != nil { + return "", err + } + + endpoint := b.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// download downloads an Azure blob to a WriterAt in parallel. +func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + count := o.Range.Count + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return 0, err + } + count = *dr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + Concurrency: o.Concurrency, + Operation: func(chunkStart int64, count int64, ctx context.Context) error { + + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: count, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return err + } + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return 0, err + } + return count, nil +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format() + if o == nil { + o = &DownloadStreamOptions{} + } + + dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return DownloadStreamResponse{}, err + } + + return DownloadStreamResponse{ + client: b, + BlobClientDownloadResponse: dr, + getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + cpkInfo: o.CpkInfo, + cpkScope: o.CpkScopeInfo, + }, err +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + if o == nil { + o = &DownloadBufferOptions{} + } + return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + if o == nil { + o = &DownloadFileOptions{} + } + do := (*downloadOptions)(o) + + // 1. Calculate the size of the destination file + var size int64 + + count := do.Range.Count + if count == CountToEnd { + // Try to get Azure blob's size + getBlobPropertiesOptions := do.getBlobPropertiesOptions() + props, err := b.GetProperties(ctx, getBlobPropertiesOptions) + if err != nil { + return 0, err + } + size = *props.ContentLength - do.Range.Offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return 0, err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return 0, err + } + } + + if size > 0 { + return b.download(ctx, file, *do) + } else { // if the blob's size is 0, there is no need in downloading it + return 0, nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go new file mode 100644 index 00000000000..d0e5d7d825c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -0,0 +1,241 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + CountToEnd = 0 + + SnapshotTimeFormat = exported.SnapshotTimeFormat + + // DefaultDownloadBlockSize is default block size + DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB +) + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} + +// AccessTier defines values for Blob Access Tier +type AccessTier = generated.AccessTier + +const ( + AccessTierArchive AccessTier = generated.AccessTierArchive + AccessTierCool AccessTier = generated.AccessTierCool + AccessTierHot AccessTier = generated.AccessTierHot + AccessTierP10 AccessTier = generated.AccessTierP10 + AccessTierP15 AccessTier = generated.AccessTierP15 + AccessTierP20 AccessTier = generated.AccessTierP20 + AccessTierP30 AccessTier = generated.AccessTierP30 + AccessTierP4 AccessTier = generated.AccessTierP4 + AccessTierP40 AccessTier = generated.AccessTierP40 + AccessTierP50 AccessTier = generated.AccessTierP50 + AccessTierP6 AccessTier = generated.AccessTierP6 + AccessTierP60 AccessTier = generated.AccessTierP60 + AccessTierP70 AccessTier = generated.AccessTierP70 + AccessTierP80 AccessTier = generated.AccessTierP80 + AccessTierPremium AccessTier = generated.AccessTierPremium +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleAccessTierValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type. +type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return generated.PossibleImmutabilityPolicySettingValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType +type EncryptionAlgorithmType = generated.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256 +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return generated.PossibleEncryptionAlgorithmTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// DeleteType defines values for DeleteType +type DeleteType = generated.DeleteType + +const ( + DeleteTypeNone DeleteType = generated.DeleteTypeNone + DeleteTypePermanent DeleteType = generated.DeleteTypePermanent +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return generated.PossibleDeleteTypeValues() +} + +// ExpiryOptions defines values for ExpiryOptions +type ExpiryOptions = generated.ExpiryOptions + +const ( + ExpiryOptionsAbsolute ExpiryOptions = generated.ExpiryOptionsAbsolute + ExpiryOptionsNeverExpire ExpiryOptions = generated.ExpiryOptionsNeverExpire + ExpiryOptionsRelativeToCreation ExpiryOptions = generated.ExpiryOptionsRelativeToCreation + ExpiryOptionsRelativeToNow ExpiryOptions = generated.ExpiryOptionsRelativeToNow +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return generated.PossibleExpiryOptionsValues() +} + +// QueryFormatType - The quick query format type. +type QueryFormatType = generated.QueryFormatType + +const ( + QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited + QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON + QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow + QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return generated.PossibleQueryFormatTypeValues() +} + +// LeaseDurationType defines values for LeaseDurationType +type LeaseDurationType = generated.LeaseDurationType + +const ( + LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite + LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// LeaseStateType defines values for LeaseStateType +type LeaseStateType = generated.LeaseStateType + +const ( + LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable + LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased + LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired + LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking + LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return generated.PossibleLeaseStateTypeValues() +} + +// LeaseStatusType defines values for LeaseStatusType +type LeaseStatusType = generated.LeaseStatusType + +const ( + LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked + LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return generated.PossibleLeaseStatusTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go new file mode 100644 index 00000000000..86af620c357 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -0,0 +1,492 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Type Declarations --------------------------------------------------------------------- + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// CpkInfo contains a group of parameters for client provided encryption key. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for client provided encryption scope. +type CpkScopeInfo = generated.CpkScopeInfo + +// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type HTTPHeaders = generated.BlobHTTPHeaders + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// Tags represent map of blob index tags +type Tags = generated.BlobTag + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// DownloadStreamOptions contains the optional parameters for the Client.Download method. +type DownloadStreamOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientDownloadOptions{ + RangeGetContentMD5: o.RangeGetContentMD5, + Range: exported.FormatHTTPRange(o.Range), + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions. +type downloadOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { + if o == nil { + return nil + } + return &GetPropertiesOptions{ + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + } +} + +func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions { + if o == nil { + return nil + } + return &DownloadStreamOptions{ + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + Range: rnge, + RangeGetContentMD5: rangeGetContentMD5, + } +} + +// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // CpkInfo contains a group of parameters for client provided encryption key. + CpkInfo *CpkInfo + + // CpkScopeInfo contains a group of parameters for client provided encryption scope. + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// DownloadFileOptions contains the optional parameters for the DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself + DeleteSnapshots *DeleteSnapshotsOptionType + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + } + + if o.AccessConditions == nil { + return &basics, nil, nil + } + + return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UndeleteOptions contains the optional parameters for the Client.Undelete method. +type UndeleteOptions struct { + // placeholder for future options +} + +func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTierOptions contains the optional parameters for the Client.SetTier method. +type SetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + AccessConditions *AccessConditions +} + +func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method +type GetPropertiesOptions struct { + AccessConditions *AccessConditions + CpkInfo *CpkInfo +} + +func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, + *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + AccessConditions *AccessConditions +} + +func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions provides set of configurations for Set Metadata on blob operation +type SetMetadataOptions struct { + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CpkInfo, + *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. +type CreateSnapshotOptions struct { + Metadata map[string]string + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + + return &generated.BlobClientCreateSnapshotOptions{ + Metadata: o.Metadata, + }, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. +type StartCopyFromURLOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + AccessConditions *AccessConditions +} + +func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions, + *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientStartCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + Metadata: o.Metadata, + RehydratePriority: o.RehydratePriority, + SealBlob: o.SealBlob, + Tier: o.Tier, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method. +type AbortCopyFromURLOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTagsOptions contains the optional parameters for the Client.SetTags method. +type SetTagsOptions struct { + // The version id parameter is an opaque DateTime value that, when present, + // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + VersionID *string + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Optional header, Specifies the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AccessConditions *AccessConditions +} + +func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientSetTagsOptions{ + TransactionalContentMD5: o.TransactionalContentMD5, + TransactionalContentCRC64: o.TransactionalContentCRC64, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetTagsOptions contains the optional parameters for the Client.GetTags method. +type GetTagsOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. + Snapshot *string + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string + + BlobAccessConditions *AccessConditions +} + +func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientGetTagsOptions{ + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. +type CopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *AccessConditions +} + +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &generated.BlobClientCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + CopySourceAuthorization: o.CopySourceAuthorization, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go new file mode 100644 index 00000000000..c13d332d719 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// DownloadStreamResponse contains the response from the DownloadStream method. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + generated.BlobClientDownloadResponse + ObjectReplicationRules []ObjectReplicationPolicy + + client *Client + getInfo httpGetterInfo + cpkInfo *CpkInfo + cpkScope *CpkScopeInfo +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DowloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + accessConditions := &AccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag}, + } + options := DownloadStreamOptions{ + Range: getInfo.Range, + AccessConditions: accessConditions, + CpkInfo: r.cpkInfo, + CpkScopeInfo: r.cpkScope, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// DeleteResponse contains the response from method BlobClient.Delete. +type DeleteResponse = generated.BlobClientDeleteResponse + +// UndeleteResponse contains the response from method BlobClient.Undelete. +type UndeleteResponse = generated.BlobClientUndeleteResponse + +// SetTierResponse contains the response from method BlobClient.SetTier. +type SetTierResponse = generated.BlobClientSetTierResponse + +// GetPropertiesResponse contains the response from method BlobClient.GetProperties. +type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse + +// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse + +// SetMetadataResponse contains the response from method BlobClient.SetMetadata. +type SetMetadataResponse = generated.BlobClientSetMetadataResponse + +// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse + +// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse + +// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse + +// SetTagsResponse contains the response from method BlobClient.SetTags. +type SetTagsResponse = generated.BlobClientSetTagsResponse + +// GetTagsResponse contains the response from method BlobClient.GetTags. +type GetTagsResponse = generated.BlobClientGetTagsResponse + +// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse + +// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse + +// BreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse + +// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse + +// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse + +// RenewLeaseResponse contains the response from method BlobClient.RenewLease. +type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go similarity index 57% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index 3179138f111..cc1b1365d7c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -2,57 +2,46 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package blob import ( "context" "io" "net" - "net/http" "strings" "sync" -) -const CountToEnd = 0 + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) // HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. -type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) // HTTPGetterInfo is passed to an HTTPGetter function passing it parameters // that should be used to make an HTTP GET request. -type HTTPGetterInfo struct { - // Offset specifies the start offset that should be used when - // creating the HTTP GET request's Range header - Offset int64 - - // Count specifies the count of bytes that should be used to calculate - // the end offset when creating the HTTP GET request's Range header - Count int64 +type httpGetterInfo struct { + Range HTTPRange // ETag specifies the resource's etag that should be used when creating // the HTTP GET request's If-Match header - ETag string + ETag *azcore.ETag } -// FailedReadNotifier is a function type that represents the notification function called when a read fails -type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) - -// RetryReaderOptions contains properties which can help to decide when to do retry. +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. type RetryReaderOptions struct { - // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made - // while reading from a RetryReader. A value of zero means that no additional HTTP - // GET requests will be made. - MaxRetryRequests int - doInjectError bool - doInjectErrorRound int - injectedError error + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 - // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. - NotifyFailedRead FailedReadNotifier + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) - // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, // retryReader has the following special behaviour: closing the response body before it is all read is treated as a // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If @@ -61,52 +50,59 @@ type RetryReaderOptions struct { // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors // which will be retried. - TreatEarlyCloseAsError bool + // The default value is false. + EarlyCloseAsError bool - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + doInjectError bool + doInjectErrorRound int32 + injectedError error } -// retryReader implements io.ReaderCloser methods. -// retryReader tries to read from response, and if there is retriable network error +// RetryReader attempts to read from response, and if there is retriable network error // returned during reading, it will retry according to retry reader option through executing // user defined action with provided data to get a new response, and continue the overall reading process // through reading from the new response. -type retryReader struct { - ctx context.Context - info HTTPGetterInfo - countWasBounded bool - o RetryReaderOptions - getter HTTPGetter +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response responseMu *sync.Mutex - response *http.Response + response io.ReadCloser } -// NewRetryReader creates a retry reader. -func NewRetryReader(ctx context.Context, initialResponse *http.Response, - info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { - return &retryReader{ - ctx: ctx, - getter: getter, - info: info, - countWasBounded: info.Count != CountToEnd, - response: initialResponse, - responseMu: &sync.Mutex{}, - o: o} +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } } -func (s *retryReader) setResponse(r *http.Response) { +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { s.responseMu.Lock() defer s.responseMu.Unlock() s.response = r } -func (s *retryReader) Read(p []byte) (n int, err error) { - for try := 0; ; try++ { +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { //fmt.Println(try) // Comment out for debugging. - if s.countWasBounded && s.info.Count == CountToEnd { + if s.countWasBounded && s.info.Range.Count == CountToEnd { // User specified an original count and the remaining bytes are 0, return 0, EOF return 0, io.EOF } @@ -123,12 +119,12 @@ func (s *retryReader) Read(p []byte) (n int, err error) { s.setResponse(newResponse) resp = newResponse } - n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) // Injection mechanism for testing. - if s.o.doInjectError && try == s.o.doInjectErrorRound { - if s.o.injectedError != nil { - err = s.o.injectedError + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError } else { err = &net.DNSError{IsTemporary: true} } @@ -136,9 +132,9 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // We successfully read data or end EOF. if err == nil || err == io.EOF { - s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future - if s.info.Count != CountToEnd { - s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future } return n, err // Return the return to the caller } @@ -147,15 +143,15 @@ func (s *retryReader) Read(p []byte) (n int, err error) { s.setResponse(nil) // Our stream is no longer good // Check the retry count and error code, and decide whether to retry. - retriesExhausted := try >= s.o.MaxRetryRequests + retriesExhausted := try >= s.retryReaderOptions.MaxRetries _, isNetError := err.(net.Error) isUnexpectedEOF := err == io.ErrUnexpectedEOF willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted // Notify, for logging purposes, of any failures - if s.o.NotifyFailedRead != nil { + if s.retryReaderOptions.OnFailedRead != nil { failureCount := try + 1 // because try is zero-based - s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) + s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) } if willRetry { @@ -174,21 +170,23 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // then there are two different types of error that may happen - either the one one we check for here, // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine // to check for one, since the other is a net.Error, which our main Read retry loop is already handing. -func (s *retryReader) wasRetryableEarlyClose(err error) bool { - if s.o.TreatEarlyCloseAsError { +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { return false // user wants all early closes to be errors, and so not retryable } // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) } +// ReadOnClosedBodyMessage of retry reader const ReadOnClosedBodyMessage = "read on closed response body" -func (s *retryReader) Close() error { +// Close retry reader +func (s *RetryReader) Close() error { s.responseMu.Lock() defer s.responseMu.Unlock() - if s.response != nil && s.response.Body != nil { - return s.response.Body.Close() + if s.response != nil { + return s.response.Close() } return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go new file mode 100644 index 00000000000..7b4c8e24899 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ObjectReplicationRules struct +type ObjectReplicationRules struct { + RuleId string + Status string +} + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy struct { + PolicyId *string + Rules *[]ObjectReplicationRules +} + +// deserializeORSPolicies is utility function to deserialize ORS Policies +func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { + if policies == nil { + return nil + } + // For source blobs (blobs that have policy ids and rule ids applied to them), + // the header will be formatted as "x-ms-or-_: {Complete, Failed}". + // The value of this header is the status of the replication. + orPolicyStatusHeader := make(map[string]string) + for key, value := range policies { + if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { + orPolicyStatusHeader[key] = value + } + } + + parsedResult := make(map[string][]ObjectReplicationRules) + for key, value := range orPolicyStatusHeader { + policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") + policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] + + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) + } + + for policyId, rules := range parsedResult { + objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ + PolicyId: &policyId, + Rules: &rules, + }) + } + return +} + +// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders +func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { + return HTTPHeaders{ + BlobContentType: resp.ContentType, + BlobContentEncoding: resp.ContentEncoding, + BlobContentLanguage: resp.ContentLanguage, + BlobContentDisposition: resp.ContentDisposition, + BlobCacheControl: resp.CacheControl, + BlobContentMD5: resp.ContentMD5, + } +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go new file mode 100644 index 00000000000..6a1db8045c5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package bloberror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + BlobAlreadyExists Code = "BlobAlreadyExists" + BlobArchived Code = "BlobArchived" + BlobBeingRehydrated Code = "BlobBeingRehydrated" + BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" + BlobNotArchived Code = "BlobNotArchived" + BlobNotFound Code = "BlobNotFound" + BlobOverwritten Code = "BlobOverwritten" + BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" + BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit Code = "BlockCountExceedsLimit" + BlockListTooLong Code = "BlockListTooLong" + CannotChangeToLowerTier Code = "CannotChangeToLowerTier" + CannotVerifyCopySource Code = "CannotVerifyCopySource" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + ContainerAlreadyExists Code = "ContainerAlreadyExists" + ContainerBeingDeleted Code = "ContainerBeingDeleted" + ContainerDisabled Code = "ContainerDisabled" + ContainerNotFound Code = "ContainerNotFound" + ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" + CopyIDMismatch Code = "CopyIdMismatch" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" + IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidBlobOrBlock Code = "InvalidBlobOrBlock" + InvalidBlobTier Code = "InvalidBlobTier" + InvalidBlobType Code = "InvalidBlobType" + InvalidBlockID Code = "InvalidBlockId" + InvalidBlockList Code = "InvalidBlockList" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidOperation Code = "InvalidOperation" + InvalidPageRange Code = "InvalidPageRange" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidSourceBlobType Code = "InvalidSourceBlobType" + InvalidSourceBlobURL Code = "InvalidSourceBlobUrl" + InvalidURI Code = "InvalidUri" + InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + LeaseAlreadyBroken Code = "LeaseAlreadyBroken" + LeaseAlreadyPresent Code = "LeaseAlreadyPresent" + LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" + LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" + LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing Code = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost Code = "LeaseLost" + LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" + LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" + LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch Code = "Md5Mismatch" + MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation Code = "NoAuthenticationInformation" + NoPendingCopyOperation Code = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PendingCopyOperation Code = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" + ServerBusy Code = "ServerBusy" + SnapshotCountExceeded Code = "SnapshotCountExceeded" + SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" + SnapshotsPresent Code = "SnapshotsPresent" + SourceConditionNotMet Code = "SourceConditionNotMet" + SystemInUse Code = "SystemInUse" + TargetConditionNotMet Code = "TargetConditionNotMet" + UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go similarity index 82% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index d5ccdfb4076..16927ecf895 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package blockblob import ( "bytes" @@ -13,19 +13,19 @@ import ( "encoding/binary" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" "io" "sync" "sync/atomic" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) // blockWriter provides methods to upload blocks that represent a file to a server and commit them. // This allows us to provide a local implementation that fakes the server for hermetic testing. type blockWriter interface { - StageBlock(context.Context, string, io.ReadSeekCloser, *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) - CommitBlockList(context.Context, []string, *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) + StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error) + CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) } // copyFromReader copies a source io.Reader to blob storage using concurrent uploads. @@ -36,9 +36,9 @@ type blockWriter interface { // well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can // choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). // We can even provide a utility to dial this number in for customer networks to optimize their copies. -func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { - if err := o.defaults(); err != nil { - return BlockBlobCommitBlockListResponse{}, err +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (CommitBlockListResponse, error) { + if err := o.format(); err != nil { + return CommitBlockListResponse{}, err } ctx, cancel := context.WithCancel(ctx) @@ -47,7 +47,7 @@ func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o Uploa var err error generatedUuid, err := uuid.New() if err != nil { - return BlockBlobCommitBlockListResponse{}, err + return CommitBlockListResponse{}, err } cp := &copier{ @@ -68,12 +68,12 @@ func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o Uploa } // If the error is not EOF, then we have a problem. if err != nil && !errors.Is(err, io.EOF) { - return BlockBlobCommitBlockListResponse{}, err + return CommitBlockListResponse{}, err } // Close out our upload. if err := cp.close(); err != nil { - return BlockBlobCommitBlockListResponse{}, err + return CommitBlockListResponse{}, err } return cp.result, nil @@ -109,9 +109,10 @@ type copier struct { wg sync.WaitGroup // result holds the final result from blob storage after we have submitted all chunks. - result BlockBlobCommitBlockListResponse + result CommitBlockListResponse } +// copierChunk contains buffer type copierChunk struct { buffer []byte id string @@ -136,17 +137,17 @@ func (c *copier) sendChunk() error { return err } - buffer := c.o.TransferManager.Get() + buffer := c.o.transferManager.Get() if len(buffer) == 0 { - return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") + return fmt.Errorf("transferManager returned a 0 size buffer, this is a bug in the manager") } n, err := io.ReadFull(c.reader, buffer) if n > 0 { - // Some data was read, schedule the write. + // Some data was read, schedule the Write. id := c.id.next() c.wg.Add(1) - c.o.TransferManager.Run( + c.o.transferManager.Run( func() { defer c.wg.Done() c.write(copierChunk{buffer: buffer, id: id, length: n}) @@ -154,7 +155,7 @@ func (c *copier) sendChunk() error { ) } else { // Return the unused buffer to the manager. - c.o.TransferManager.Put(buffer) + c.o.transferManager.Put(buffer) } if err == nil { @@ -172,16 +173,20 @@ func (c *copier) sendChunk() error { // write uploads a chunk to blob storage. func (c *copier) write(chunk copierChunk) { - defer c.o.TransferManager.Put(chunk.buffer) + defer c.o.transferManager.Put(chunk.buffer) if err := c.ctx.Err(); err != nil { return } stageBlockOptions := c.o.getStageBlockOptions() - _, err := c.to.StageBlock(c.ctx, chunk.id, internal.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) + _, err := c.to.StageBlock(c.ctx, chunk.id, shared.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) if err != nil { - c.errCh <- fmt.Errorf("write error: %w", err) - return + select { + case c.errCh <- err: + // failed to stage block, cancel the copy + default: + // don't block the goroutine if there's a pending error + } } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go new file mode 100644 index 00000000000..4af33356e29 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -0,0 +1,481 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "io" + "os" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client defines a set of operations applicable to block blobs. +type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (bb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) +} + +func (bb *Client) generated() *generated.BlockBlobClient { + _, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return blockBlob +} + +// URL returns the URL endpoint used by the Client object. +func (bb *Client) URL() string { + return bb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (bb *Client) BlobClient() *blob.Client { + blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return (*blob.Client)(blobClient) +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (bb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return UploadResponse{}, err + } + + opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) + return resp, err +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return StageBlockResponse{}, err + } + + opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + + resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo) + return resp, err +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, + contentLength int64, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { + + stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, + cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) { + // this is a code smell in the generated code + blockIds := make([]*string, len(base64BlockIDs)) + for k, v := range base64BlockIDs { + blockIds[k] = to.Ptr(v) + } + + blockLookupList := generated.BlockLookupList{Latest: blockIds} + + var commitOptions *generated.BlockBlobClientCommitBlockListOptions + var headers *generated.BlobHTTPHeaders + var leaseAccess *blob.LeaseAccessConditions + var cpkInfo *generated.CpkInfo + var cpkScope *generated.CpkScopeInfo + var modifiedAccess *generated.ModifiedAccessConditions + + if options != nil { + commitOptions = &generated.BlockBlobClientCommitBlockListOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags), + Metadata: options.Metadata, + RequestID: options.RequestID, + Tier: options.Tier, + Timeout: options.Timeout, + TransactionalContentCRC64: options.TransactionalContentCRC64, + TransactionalContentMD5: options.TransactionalContentMD5, + } + + headers = options.HTTPHeaders + leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) + cpkInfo = options.CpkInfo + cpkScope = options.CpkScopeInfo + } + + resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) + return resp, err +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) { + o, lac, mac := options.format() + + resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac) + + return resp, err +} + +// Redeclared APIs ----- Copy over to Append blob and Page blob as well. + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return bb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return bb.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return bb.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return bb.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return bb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return bb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return bb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return bb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return bb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return bb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in blocks to a block blob. +func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, readerSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error + if readerSize > MaxStageBlockBytes*MaxBlocks { + return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= MaxUploadBlobBytes { + o.BlockSize = MaxUploadBlobBytes // Default if unspecified + } else { + if remainder := readerSize % MaxBlocks; remainder > 0 { + // ensure readerSize is a multiple of MaxBlocks + readerSize += (MaxBlocks - remainder) + } + o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = blob.DefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). + } + } + + if readerSize <= MaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) + } + + uploadBlockBlobOptions := o.getUploadBlockBlobOptions() + resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions) + + return toUploadReaderAtResponseFromUploadResponse(resp), err + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + if numBlocks > MaxBlocks { + // prevent any math bugs from attempting to upload too many blocks which will always fail + return uploadFromReaderResponse{}, errors.New("block limit exceeded") + } + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Concurrency: o.Concurrency, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = streaming.NewRequestProgress(shared.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + generatedUuid, err := uuid.New() + if err != nil { + return err + } + blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) + stageBlockOptions := o.getStageBlockOptions() + _, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions) + return err + }, + }) + if err != nil { + return uploadFromReaderResponse{}, err + } + // All put blocks were successful, call Put Block List to finalize the blob + commitBlockListOptions := o.getCommitBlockListOptions() + resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) + + return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) +} + +// UploadFile uploads a file in blocks to a block blob. +func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + stat, err := file.Stat() + if err != nil { + return uploadFromReaderResponse{}, err + } + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + if err := o.format(); err != nil { + return CommitBlockListResponse{}, err + } + + if o == nil { + o = &UploadStreamOptions{} + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.transferManager.Close() + } + + result, err := copyFromReader(ctx, body, bb, *o) + if err != nil { + return CommitBlockListResponse{}, err + } + + return result, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return bb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return bb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go new file mode 100644 index 00000000000..46de1ede7e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +// nolint +const ( + // CountToEnd specifies the end of the file + CountToEnd = 0 + + _1MiB = 1024 * 1024 + + // MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB + + // MaxBlocks indicates the maximum number of blocks allowed in a block blob. + MaxBlocks = 50000 +) + +// BlockListType defines values for BlockListType +type BlockListType = generated.BlockListType + +const ( + BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted + BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted + BlockListTypeAll BlockListType = generated.BlockListTypeAll +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return generated.PossibleBlockListTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go new file mode 100644 index 00000000000..1e93143642d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -0,0 +1,311 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block = generated.Block + +// BlockList - type of blocklist (committed/uncommitted) +type BlockList = generated.BlockList + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// UploadOptions contains the optional parameters for the Client.Upload method. +type UploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + HTTPHeaders *blob.HTTPHeaders + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions +} + +func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + basics := generated.BlockBlobClientUploadOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +type StageBlockOptions struct { + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + LeaseAccessConditions *blob.LeaseAccessConditions + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo) { + if o == nil { + return nil, nil, nil, nil + } + + return &generated.BlockBlobClientStageBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method. +type StageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + LeaseAccessConditions *blob.LeaseAccessConditions + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo +} + +func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &generated.BlockBlobClientStageBlockFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + SourceRange: exported.FormatHTTPRange(o.Range), + } + + return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. +type CommitBlockListOptions struct { + Tags map[string]string + Metadata map[string]string + RequestID *string + Tier *blob.AccessTier + Timeout *int32 + TransactionalContentCRC64 []byte + TransactionalContentMD5 []byte + HTTPHeaders *blob.HTTPHeaders + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method. +type GetBlockListOptions struct { + Snapshot *string + AccessConditions *blob.AccessConditions +} + +func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions +} + +// ------------------------------------------------------------ + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // HTTPHeaders indicates the HTTP headers to be associated with the blob. + HTTPHeaders *blob.HTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata map[string]string + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions *blob.AccessConditions + + // AccessTier indicates the tier of blob + AccessTier *blob.AccessTier + + // BlobTags + Tags map[string]string + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) + Concurrency uint16 + + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 *[]byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 *[]byte +} + +// UploadBufferOptions provides set of configurations for UploadBuffer operation +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for UploadFile operation +type UploadFileOptions = uploadFromReaderOptions + +func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) + return &StageBlockOptions{ + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { + return &UploadOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions { + return &CommitBlockListOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions struct { + // transferManager provides a transferManager that controls buffer allocation/reuse and + // concurrency. This overrides BlockSize and MaxConcurrency if set. + transferManager shared.TransferManager + transferMangerNotSet bool + + // BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB. + BlockSize int + + // Concurrency defines the number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size BlockSize. The default value is one. + Concurrency int + + HTTPHeaders *blob.HTTPHeaders + Metadata map[string]string + AccessConditions *blob.AccessConditions + AccessTier *blob.AccessTier + Tags map[string]string + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo +} + +func (u *UploadStreamOptions) format() error { + if u == nil || u.transferManager != nil { + return nil + } + + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.BlockSize < _1MiB { + u.BlockSize = _1MiB + } + + var err error + u.transferManager, err = shared.NewStaticBuffer(u.BlockSize, u.Concurrency) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) + return &StageBlockOptions{ + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions { + options := &CommitBlockListOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + AccessConditions: u.AccessConditions, + } + + return options +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go new file mode 100644 index 00000000000..dab1873b1a9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -0,0 +1,111 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// UploadResponse contains the response from method Client.Upload. +type UploadResponse = generated.BlockBlobClientUploadResponse + +// StageBlockResponse contains the response from method Client.StageBlock. +type StageBlockResponse = generated.BlockBlobClientStageBlockResponse + +// CommitBlockListResponse contains the response from method Client.CommitBlockList. +type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse + +// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL. +type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse + +// GetBlockListResponse contains the response from method Client.GetBlockList. +type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse + +// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type uploadFromReaderResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + // Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB) + ContentCRC64 []byte +} + +func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + } +} + +func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + ContentCRC64: resp.XMSContentCRC64, + } +} + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = uploadFromReaderResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = uploadFromReaderResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = CommitBlockListResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go new file mode 100644 index 00000000000..4cc9a51bb40 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -0,0 +1,171 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "context" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client struct { + svc *service.Client +} + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + var clientOptions *service.ClientOptions + if options != nil { + clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} + } + svcClient, err := service.NewClient(serviceURL, cred, clientOptions) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + var clientOptions *service.ClientOptions + if options != nil { + clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} + } + svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + return &Client{ + svc: containerClient, + }, nil +} + +// URL returns the URL endpoint used by the BlobClient object. +func (c *Client) URL() string { + return c.svc.URL() +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) { + return c.svc.CreateContainer(ctx, containerName, o) +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (c *Client) DeleteContainer(ctx context.Context, containerName string, o *DeleteContainerOptions) (DeleteContainerResponse, error) { + return c.svc.DeleteContainer(ctx, containerName, o) +} + +// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName string, o *DeleteBlobOptions) (DeleteBlobResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o) +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o) +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (c *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + return c.svc.NewListContainersPager(o) +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (c *Client) UploadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadBuffer(ctx, buffer, o) +} + +// UploadFile uploads a file in blocks to a block blob. +func (c *Client) UploadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadFile(ctx, file, o) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (c *Client) UploadStream(ctx context.Context, containerName string, blobName string, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadStream(ctx, body, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (c *Client) DownloadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *DownloadBufferOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (c *Client) DownloadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *DownloadFileOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadFile(ctx, file, o) +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (c *Client) DownloadStream(ctx context.Context, containerName string, blobName string, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + o = shared.CopyOptions(o) + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadStream(ctx, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go new file mode 100644 index 00000000000..560e151d553 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go deleted file mode 100644 index c5d501c6610..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -type connection struct { - u string - p runtime.Pipeline -} - -// newConnection creates an instance of the connection type with the specified endpoint. -// Pass nil to accept the default options; this is the same as passing a zero-value options. -func newConnection(endpoint string, options *azcore.ClientOptions) *connection { - cp := azcore.ClientOptions{} - if options != nil { - cp = *options - } - return &connection{u: endpoint, p: runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &cp)} -} - -// Endpoint returns the connection's endpoint. -func (c *connection) Endpoint() string { - return c.u -} - -// Pipeline returns the connection's pipeline. -func (c *connection) Pipeline() runtime.Pipeline { - return c.p -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go index c1c336ed466..d4c3262d932 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -2,45 +2,36 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. package azblob -var SASVersion = "2019-12-12" - -//nolint -const ( - // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. - BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) - // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. - BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType - // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. - BlockBlobMaxBlocks = 50000 +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) - // PageBlobPageBytes indicates the number of bytes in a page (512). - PageBlobPageBytes = 512 +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} - // BlobDefaultDownloadBlockSize is default block size - BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB -) +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType const ( - headerAuthorization = "Authorization" - headerXmsDate = "x-ms-date" - headerContentLength = "Content-Length" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly ) -const ( - tokenScope = "https://storage.azure.com/.default" -) +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go new file mode 100644 index 00000000000..e4afad9f84f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -0,0 +1,334 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "context" + "errors" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type Client base.Client[generated.ContainerClient] + +// NewClient creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a container or with a shared access signature (SAS) token. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - a SharedKeyCredential created with the matching container's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (c *Client) generated() *generated.ContainerClient { + return base.InnerClient((*base.Client[generated.ContainerClient])(c)) +} + +func (c *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ContainerClient])(c)) +} + +// URL returns the URL endpoint used by the Client object. +func (c *Client) URL() string { + return c.generated().Endpoint() +} + +// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of +// Client's URL. The new BlobClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's +// NewBlobClient method. +func (c *Client) NewBlobClient(blobName string) *blob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of +// Client's URL. The new AppendBlobURL uses the same request policy pipeline as the Client. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's +// NewAppendBlobClient method. +func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of +// Client's URL. The new BlockBlobClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's +// NewBlockBlobClient method. +func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of Client's URL. The new PageBlobURL uses the same request policy pipeline as the Client. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's +// NewPageBlobClient method. +func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + var opts *generated.ContainerClientCreateOptions + var cpkScopes *generated.ContainerCpkScopeInfo + if options != nil { + opts = &generated.ContainerClientCreateOptions{ + Access: options.Access, + Metadata: options.Metadata, + } + cpkScopes = options.CpkScopeInfo + } + resp, err := c.generated().Create(ctx, opts, cpkScopes) + + return resp, err +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := options.format() + resp, err := c.generated().Delete(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// Restore operation restore the contents and properties of a soft deleted container to a specified container. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/restore-container. +func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, options *RestoreOptions) (RestoreResponse, error) { + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return RestoreResponse{}, err + } + + opts := &generated.ContainerClientRestoreOptions{ + DeletedContainerName: &urlParts.ContainerName, + DeletedContainerVersion: &deletedContainerVersion, + } + resp, err := c.generated().Restore(ctx, opts) + + return resp, err +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + // The optionals are nil, like they were in track 1.5 + opts, leaseAccessConditions := o.format() + + resp, err := c.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c *Client) SetMetadata(ctx context.Context, o *SetMetadataOptions) (SetMetadataResponse, error) { + metadataOptions, lac, mac := o.format() + resp, err := c.generated().SetMetadata(ctx, metadataOptions, lac, mac) + + return resp, err +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + options, ac := o.format() + resp, err := c.generated().GetAccessPolicy(ctx, options, ac) + return resp, err +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c *Client) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + accessPolicy, mac, lac := o.format() + resp, err := c.generated().SetAccessPolicy(ctx, containerACL, accessPolicy, mac, lac) + return resp, err +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + listOptions := generated.ContainerClientListBlobFlatSegmentOptions{} + if o != nil { + listOptions.Include = o.Include.format() + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListBlobsFlatResponse]{ + More: func(page ListBlobsFlatResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsFlatResponse) (ListBlobsFlatResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListBlobsFlatResponse{}, err + } + resp, err := c.generated().Pipeline().Do(req) + if err != nil { + return ListBlobsFlatResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + // TOOD: storage error? + return ListBlobsFlatResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobFlatSegmentHandleResponse(resp) + }, + }) +} + +// NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. +// AutoPagerBufferSize specifies the channel's buffer size. +// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) +func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] { + listOptions := o.format() + return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{ + More: func(page ListBlobsHierarchyResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsHierarchyResponse) (ListBlobsHierarchyResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + resp, err := c.generated().Pipeline().Do(req) + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListBlobsHierarchyResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time, expiry time.Time) (string, error) { + if c.sharedKey() == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return "", err + } + + // Containers do not have snapshots, nor versions. + qps, err := sas.BlobSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + ContainerName: urlParts.ContainerName, + Permissions: permissions.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(c.sharedKey()) + if err != nil { + return "", err + } + + endpoint := c.URL() + "?" + qps.Encode() + + return endpoint, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go new file mode 100644 index 00000000000..ce6e67b5ed4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// LeaseStatusType defines values for LeaseStatusType +type LeaseStatusType = generated.LeaseStatusType + +const ( + LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked + LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return generated.PossibleLeaseStatusTypeValues() +} + +// LeaseDurationType defines values for LeaseDurationType +type LeaseDurationType = generated.LeaseDurationType + +const ( + LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite + LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// LeaseStateType defines values for LeaseStateType +type LeaseStateType = generated.LeaseStateType + +const ( + LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable + LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased + LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired + LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking + LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return generated.PossibleLeaseStateTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go new file mode 100644 index 00000000000..eb65d5fc3c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -0,0 +1,263 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CpkScopeInfo = generated.ContainerCpkScopeInfo + +// BlobProperties - Properties of a blob +type BlobProperties = generated.BlobPropertiesInternal + +// BlobItem - An Azure Storage blob +type BlobItem = generated.BlobItemInternal + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// AccessPolicy - An Access policy +type AccessPolicy = generated.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// SignedIdentifier - signed identifier +type SignedIdentifier = generated.SignedIdentifier + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Specifies the encryption scope settings to set on the container. + CpkScopeInfo *CpkScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() (*generated.ContainerClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatContainerAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// RestoreOptions contains the optional parameters for the Client.Restore method. +type RestoreOptions struct { + // placeholder for future options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type GetPropertiesOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetPropertiesOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool +} + +func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { + if reflect.ValueOf(l).IsZero() { + return nil + } + + include := []generated.ListBlobsIncludeItem{} + + if l.Copy { + include = append(include, generated.ListBlobsIncludeItemCopy) + } + if l.Deleted { + include = append(include, generated.ListBlobsIncludeItemDeleted) + } + if l.DeletedWithVersions { + include = append(include, generated.ListBlobsIncludeItemDeletedwithversions) + } + if l.ImmutabilityPolicy { + include = append(include, generated.ListBlobsIncludeItemImmutabilitypolicy) + } + if l.LegalHold { + include = append(include, generated.ListBlobsIncludeItemLegalhold) + } + if l.Metadata { + include = append(include, generated.ListBlobsIncludeItemMetadata) + } + if l.Snapshots { + include = append(include, generated.ListBlobsIncludeItemSnapshots) + } + if l.Tags { + include = append(include, generated.ListBlobsIncludeItemTags) + } + if l.UncommittedBlobs { + include = append(include, generated.ListBlobsIncludeItemUncommittedblobs) + } + if l.Versions { + include = append(include, generated.ListBlobsIncludeItemVersions) + } + + return include +} + +// ListBlobsFlatOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment method. +type ListBlobsFlatOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsHierarchyOptions provides set of configurations for Client.NewListBlobsHierarchyPager +type ListBlobsHierarchyOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment method. +func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHierarchySegmentOptions { + if o == nil { + return generated.ContainerClientListBlobHierarchySegmentOptions{} + } + + return generated.ContainerClientListBlobHierarchySegmentOptions{ + Include: o.Include.format(), + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + Metadata map[string]string + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.ContainerClientSetMetadataOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return &generated.ContainerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. +type GetAccessPolicyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation +type SetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + AccessConditions *AccessConditions +} + +func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions) + return &generated.ContainerClientSetAccessPolicyOptions{ + Access: o.Access, + }, lac, mac +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go new file mode 100644 index 00000000000..9d8672b135b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.ContainerClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.ContainerClientDeleteResponse + +// RestoreResponse contains the response from method Client.Restore. +type RestoreResponse = generated.ContainerClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse + +// ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse + +// ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment. +type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.ContainerClientSetMetadataResponse + +// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy. +type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse + +// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. +type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go index c2426eb7005..d5b6ed6a704 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -1,9 +1,8 @@ //go:build go1.18 // +build go1.18 -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. /* @@ -167,13 +166,13 @@ Examples handle(err) // Download the blob's contents and ensure that the download worked properly - blobDownloadResponse, err := blockBlobClient.Download(context.TODO(), nil) + blobDownloadResponse, err := blockBlobClient.DownloadStream(context.TODO(), nil) handle(err) // Use the bytes.Buffer object to read the downloaded data. // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. reader := blobDownloadResponse.Body(nil) - downloadData, err := ioutil.ReadAll(reader) + downloadData, err := io.ReadAll(reader) handle(err) if string(downloadData) != uploadData { handle(errors.New("Uploaded data should be same as downloaded data")) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go deleted file mode 100644 index 28725003981..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go +++ /dev/null @@ -1,316 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "encoding/base64" - "io" - "net/http" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" - - "bytes" - "errors" - "os" -) - -// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. -func (bb *BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o UploadOption) (*http.Response, error) { - if o.BlockSize == 0 { - // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error - if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { - return nil, errors.New("buffer is too large to upload to a block blob") - } - // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if readerSize <= BlockBlobMaxUploadBlobBytes { - o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified - } else { - o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks - if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB - o.BlockSize = BlobDefaultDownloadBlockSize - } - // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). - } - } - - if readerSize <= BlockBlobMaxUploadBlobBytes { - // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) - if o.Progress != nil { - body = streaming.NewRequestProgress(internal.NopCloser(body), o.Progress) - } - - uploadBlockBlobOptions := o.getUploadBlockBlobOptions() - resp, err := bb.Upload(ctx, internal.NopCloser(body), uploadBlockBlobOptions) - - return resp.RawResponse, err - } - - var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) - - blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs - progress := int64(0) - progressLock := &sync.Mutex{} - - err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "uploadReaderAtToBlockBlob", - TransferSize: readerSize, - ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, - Operation: func(offset int64, count int64, ctx context.Context) error { - // This function is called once per block. - // It is passed this block's offset within the buffer and its count of bytes - // Prepare to read the proper block/section of the buffer - var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) - blockNum := offset / o.BlockSize - if o.Progress != nil { - blockProgress := int64(0) - body = streaming.NewRequestProgress(internal.NopCloser(body), - func(bytesTransferred int64) { - diff := bytesTransferred - blockProgress - blockProgress = bytesTransferred - progressLock.Lock() // 1 goroutine at a time gets progress report - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - - // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks - // at the same time causing PutBlockList to get a mix of blocks from all the clients. - generatedUuid, err := uuid.New() - if err != nil { - return err - } - blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) - stageBlockOptions := o.getStageBlockOptions() - _, err = bb.StageBlock(ctx, blockIDList[blockNum], internal.NopCloser(body), stageBlockOptions) - return err - }, - }) - if err != nil { - return nil, err - } - // All put blocks were successful, call Put Block List to finalize the blob - commitBlockListOptions := o.getCommitBlockListOptions() - resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) - - return resp.RawResponse, err -} - -// UploadBuffer uploads a buffer in blocks to a block blob. -func (bb *BlockBlobClient) UploadBuffer(ctx context.Context, b []byte, o UploadOption) (*http.Response, error) { - return bb.uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), o) -} - -// UploadFile uploads a file in blocks to a block blob. -func (bb *BlockBlobClient) UploadFile(ctx context.Context, file *os.File, o UploadOption) (*http.Response, error) { - - stat, err := file.Stat() - if err != nil { - return nil, err - } - return bb.uploadReaderAtToBlockBlob(ctx, file, stat.Size(), o) -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. -// A Context deadline or cancellation will cause this to error. -func (bb *BlockBlobClient) UploadStream(ctx context.Context, body io.Reader, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { - if err := o.defaults(); err != nil { - return BlockBlobCommitBlockListResponse{}, err - } - - // If we used the default manager, we need to close it. - if o.transferMangerNotSet { - defer o.TransferManager.Close() - } - - result, err := copyFromReader(ctx, body, bb, o) - if err != nil { - return BlockBlobCommitBlockListResponse{}, err - } - - return result, nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DownloadToWriterAt downloads an Azure blob to a WriterAt with parallel. -// Offset and count are optional, pass 0 for both to download the entire blob. -func (b *BlobClient) DownloadToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o DownloadOptions) error { - if o.BlockSize == 0 { - o.BlockSize = BlobDefaultDownloadBlockSize - } - - if count == CountToEnd { // If size not specified, calculate it - // If we don't have the length at all, get it - downloadBlobOptions := o.getDownloadBlobOptions(0, CountToEnd, nil) - dr, err := b.Download(ctx, downloadBlobOptions) - if err != nil { - return err - } - count = *dr.ContentLength - offset - } - - if count <= 0 { - // The file is empty, there is nothing to download. - return nil - } - - // Prepare and do parallel download. - progress := int64(0) - progressLock := &sync.Mutex{} - - err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "downloadBlobToWriterAt", - TransferSize: count, - ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, - Operation: func(chunkStart int64, count int64, ctx context.Context) error { - - downloadBlobOptions := o.getDownloadBlobOptions(chunkStart+offset, count, nil) - dr, err := b.Download(ctx, downloadBlobOptions) - if err != nil { - return err - } - body := dr.Body(&o.RetryReaderOptionsPerBlock) - if o.Progress != nil { - rangeProgress := int64(0) - body = streaming.NewResponseProgress( - body, - func(bytesTransferred int64) { - diff := bytesTransferred - rangeProgress - rangeProgress = bytesTransferred - progressLock.Lock() - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) - if err != nil { - return err - } - err = body.Close() - return err - }, - }) - if err != nil { - return err - } - return nil -} - -// DownloadToBuffer downloads an Azure blob to a buffer with parallel. -// Offset and count are optional, pass 0 for both to download the entire blob. -func (b *BlobClient) DownloadToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o DownloadOptions) error { - return b.DownloadToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o) -} - -// DownloadToFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -// Offset and count are optional, pass 0 for both to download the entire blob. -func (b *BlobClient) DownloadToFile(ctx context.Context, offset int64, count int64, file *os.File, o DownloadOptions) error { - // 1. Calculate the size of the destination file - var size int64 - - if count == CountToEnd { - // Try to get Azure blob's size - getBlobPropertiesOptions := o.getBlobPropertiesOptions() - props, err := b.GetProperties(ctx, getBlobPropertiesOptions) - if err != nil { - return err - } - size = *props.ContentLength - offset - } else { - size = count - } - - // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. - stat, err := file.Stat() - if err != nil { - return err - } - if stat.Size() != size { - if err = file.Truncate(size); err != nil { - return err - } - } - - if size > 0 { - return b.DownloadToWriterAt(ctx, offset, size, file, o) - } else { // if the blob's size is 0, there is no need in downloading it - return nil - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DoBatchTransfer helps to execute operations in a batch manner. -// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) -func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { - if o.ChunkSize == 0 { - return errors.New("ChunkSize cannot be 0") - } - - if o.Parallelism == 0 { - o.Parallelism = 5 // default Parallelism - } - - // Prepare and do parallel operations. - numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) - operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently - operationResponseChannel := make(chan error, numChunks) // Holds each response - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Create the goroutines that process each operation (in parallel). - for g := uint16(0); g < o.Parallelism; g++ { - //grIndex := g - go func() { - for f := range operationChannel { - err := f() - operationResponseChannel <- err - } - }() - } - - // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { - curChunkSize := o.ChunkSize - - if chunkNum == numChunks-1 { // Last chunk - curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total - } - offset := int64(chunkNum) * o.ChunkSize - - operationChannel <- func() error { - return o.Operation(offset, curChunkSize, ctx) - } - } - close(operationChannel) - - // Wait for the operations to complete. - var firstErr error = nil - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { - responseError := <-operationResponseChannel - // record the first error (the original error which should cause the other chunks to fail with canceled context) - if responseError != nil && firstErr == nil { - cancel() // As soon as any operation fails, cancel all remaining operation calls - firstErr = responseError - } - } - return firstErr -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go new file mode 100644 index 00000000000..16e6cac066d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -0,0 +1,89 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +type Client[T any] struct { + inner *T + sharedKey *exported.SharedKeyCredential +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + return client.sharedKey +} + +func NewClient[T any](inner *T) *Client[T] { + return &Client[T]{inner: inner} +} + +func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(containerURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] { + return &Client[generated.ContainerClient]{ + inner: generated.NewContainerClient(containerURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] { + return &Client[generated.BlobClient]{ + inner: generated.NewBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +type CompositeClient[T, U any] struct { + innerT *T + innerU *U + sharedKey *exported.SharedKeyCredential +} + +func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { + return &Client[T]{inner: client.innerT}, client.innerU +} + +func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { + return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewAppendBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { + return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewBlockBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { + return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewPageBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential { + return client.sharedKey +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go new file mode 100644 index 00000000000..96d188fa567 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func FormatContainerAccessConditions(b *ContainerAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func FormatBlobAccessConditions(b *BlobAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = generated.ModifiedAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go similarity index 88% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go index 612bc784c37..14c293cf656 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package exported import ( "bytes" @@ -19,7 +19,7 @@ type AccessPolicyPermission struct { // String produces the access policy permission string for an Azure Storage container. // Call this method to set AccessPolicy's Permission field. -func (p AccessPolicyPermission) String() string { +func (p *AccessPolicyPermission) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go new file mode 100644 index 00000000000..9bc1ca47df8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -0,0 +1,33 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "fmt" + "strconv" +) + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange struct { + Offset int64 + Count int64 +} + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go similarity index 73% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index 60b1e5a76b6..d1563105423 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package exported import ( "bytes" @@ -22,6 +22,7 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) // NewSharedKeyCredential creates an immutable SharedKeyCredential containing the @@ -35,7 +36,6 @@ func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCr } // SharedKeyCredential contains an account's name and its primary or secondary key. -// It is immutable making it shareable and goroutine-safe. type SharedKeyCredential struct { // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only accountName string @@ -58,7 +58,7 @@ func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { } // ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. -func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) { +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) _, err := h.Write([]byte(message)) return base64.StdEncoding.EncodeToString(h.Sum(nil)), err @@ -67,7 +67,7 @@ func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services headers := req.Header - contentLength := headers.Get(headerContentLength) + contentLength := getHeader(shared.HeaderContentLength, headers) if contentLength == "0" { contentLength = "" } @@ -79,23 +79,36 @@ func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, erro stringToSign := strings.Join([]string{ req.Method, - headers.Get(headerContentEncoding), - headers.Get(headerContentLanguage), + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), contentLength, - headers.Get(headerContentMD5), - headers.Get(headerContentType), + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), "", // Empty date because x-ms-date is expected (as per web page above) - headers.Get(headerIfModifiedSince), - headers.Get(headerIfMatch), - headers.Get(headerIfNoneMatch), - headers.Get(headerIfUnmodifiedSince), - headers.Get(headerRange), + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), c.buildCanonicalizedHeader(headers), canonicalizedResource, }, "\n") return stringToSign, nil } +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { cm := map[string][]string{} for k, v := range headers { @@ -165,33 +178,41 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er return cr.String(), nil } -type sharedKeyCredPolicy struct { +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { cred *SharedKeyCredential } -func newSharedKeyCredPolicy(cred *SharedKeyCredential) *sharedKeyCredPolicy { - return &sharedKeyCredPolicy{cred: cred} +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} } -func (s *sharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { - if d := req.Raw().Header.Get(headerXmsDate); d == "" { - req.Raw().Header.Set(headerXmsDate, time.Now().UTC().Format(http.TimeFormat)) +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) } stringToSign, err := s.cred.buildStringToSign(req.Raw()) if err != nil { return nil, err } - signature, err := s.cred.ComputeHMACSHA256(stringToSign) + signature, err := s.cred.computeHMACSHA256(stringToSign) if err != nil { return nil, err } authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") - req.Raw().Header.Set(headerAuthorization, authHeader) + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) response, err := req.Next() if err != nil && response != nil && response.StatusCode == http.StatusForbidden { // Service failed to authenticate request, log it - log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n") + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") } return response, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go new file mode 100644 index 00000000000..2e2dd16e426 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go @@ -0,0 +1,64 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it +func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential { + return &UserDelegationCredential{ + accountName: accountName, + userDelegationKey: udk, + } +} + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential struct { + accountName string + userDelegationKey UserDelegationKey +} + +// getAccountName returns the Storage account's Name +func (f *UserDelegationCredential) getAccountName() string { + return f.accountName +} + +// GetAccountName is a helper method for accessing the user delegation key parameters outside this package. +func GetAccountName(udc *UserDelegationCredential) string { + return udc.getAccountName() +} + +// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) { + bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value) + h := hmac.New(sha256.New, bytes) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package. +func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) { + return udc.computeHMACSHA256(message) +} + +// getUDKParams returns UserDelegationKey +func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.userDelegationKey +} + +// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package. +func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey { + return udc.getUDKParams() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go new file mode 100644 index 00000000000..7b8ee601dc8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "azblob" + ModuleVersion = "v0.5.1" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go new file mode 100644 index 00000000000..3b6184fea67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *AppendBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *AppendBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md new file mode 100644 index 00000000000..d4e2100e197 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -0,0 +1,304 @@ +# Code Generation - Azure Blob SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: . +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.43" +``` + +### Remove pager methods and export various generated methods in container client + +``` yaml +directive: + - from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` + +# Export various createRequest/HandleResponse methods + +``` yaml +directive: +- from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }). + replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` }); + +- from: zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }). + replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` }); +``` + +### Clean up some const type names so they don't stutter + +``` yaml +directive: +- from: swagger-document + where: $.parameters['BlobDeleteType'] + transform: > + $["x-ms-enum"].name = "DeleteType"; + $["x-ms-client-name"] = "DeleteType"; + +- from: swagger-document + where: $.parameters['BlobExpiryOptions'] + transform: > + $["x-ms-enum"].name = "ExpiryOptions"; + $["x-ms-client-name"].name = "ExpiryOptions"; + +- from: swagger-document + where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"] + transform: > + $["x-ms-client-name"].name = "ImmutabilityPolicyMode"; + $.enum = [ "Mutable", "Unlocked", "Locked"]; + $["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false }; + +- from: swagger-document + where: $.parameters['ImmutabilityPolicyMode'] + transform: > + $["x-ms-enum"].name = "ImmutabilityPolicySetting"; + $["x-ms-client-name"].name = "ImmutabilityPolicySetting"; + +- from: swagger-document + where: $.definitions['BlobPropertiesInternal'] + transform: > + $.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). + replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). + replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). + replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). + replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). + replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). + replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); +``` + +### Unsure why this casing changed, but fixing it + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/SignedOid\s+\*string/g, `SignedOID *string`). + replace(/SignedTid\s+\*string/g, `SignedTID *string`); +``` + +### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed + +``` yaml +directive: +- from: zz_constants.go + where: $ + transform: >- + return $. + replace(/StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEralierVersionSnapshotNotAllowed"\n, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEarlierVersionSnapshotNotAllowed"\ + replace(/StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g) +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go new file mode 100644 index 00000000000..c3d3c2607c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *BlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go new file mode 100644 index 00000000000..a43e327ec44 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *BlockBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlockBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go new file mode 100644 index 00000000000..bbbf828a07a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ContainerClient) Endpoint() string { + return client.endpoint +} + +func (client *ContainerClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go new file mode 100644 index 00000000000..8a212cc3d4e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *PageBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *PageBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go new file mode 100644 index 00000000000..1f449b955e8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go new file mode 100644 index 00000000000..d0fe18c5d27 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -0,0 +1,653 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// AppendBlobClient contains the methods for the AppendBlob group. +// Don't use this type directly, use NewAppendBlobClient() instead. +type AppendBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { + client := &AppendBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockHandleResponse(resp) +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { + result := AppendBlobClientAppendBlockResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sourceURL - Specify a URL to the copy source. +// contentLength - The length of the request. +// options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockFromURLHandleResponse(resp) +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { + result := AppendBlobClientAppendBlockFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { + result := AppendBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + } + return client.sealHandleResponse(resp) +} + +// sealCreateRequest creates the Seal request. +func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { + result := AppendBlobClientSealResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go similarity index 52% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 607c6a714dc..713fb52794b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -5,12 +5,14 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "context" "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "net/http" @@ -19,16 +21,18 @@ import ( "time" ) -type blobClient struct { +// BlobClient contains the methods for the Blob group. +// Don't use this type directly, use NewBlobClient() instead. +type BlobClient struct { endpoint string pl runtime.Pipeline } -// newBlobClient creates a new instance of blobClient with the specified values. +// NewBlobClient creates a new instance of BlobClient with the specified values. // endpoint - The URL of the service account, container, or blob that is the target of the desired operation. // pl - the pipeline used for sending requests and handling responses. -func newBlobClient(endpoint string, pl runtime.Pipeline) *blobClient { - client := &blobClient{ +func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { + client := &BlobClient{ endpoint: endpoint, pl: pl, } @@ -38,27 +42,27 @@ func newBlobClient(endpoint string, pl runtime.Pipeline) *blobClient { // AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. -// blobClientAbortCopyFromURLOptions - blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) AbortCopyFromURL(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (blobClientAbortCopyFromURLResponse, error) { - req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, blobClientAbortCopyFromURLOptions, leaseAccessConditions) +// options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { - return blobClientAbortCopyFromURLResponse{}, err + return BlobClientAbortCopyFromURLResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientAbortCopyFromURLResponse{}, err + return BlobClientAbortCopyFromURLResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return blobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) } return client.abortCopyFromURLHandleResponse(resp) } // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. -func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -66,25 +70,25 @@ func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop reqQP := req.Raw().URL.Query() reqQP.Set("comp", "copy") reqQP.Set("copyid", copyID) - if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAbortCopyFromURLOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-copy-action", "abort") + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientAbortCopyFromURLOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. -func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (blobClientAbortCopyFromURLResponse, error) { - result := blobClientAbortCopyFromURLResponse{RawResponse: resp} +func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobClientAbortCopyFromURLResponse, error) { + result := BlobClientAbortCopyFromURLResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -97,7 +101,7 @@ func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (b if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientAbortCopyFromURLResponse{}, err + return BlobClientAbortCopyFromURLResponse{}, err } result.Date = &date } @@ -106,76 +110,76 @@ func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (b // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// blobClientAcquireLeaseOptions - blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) AcquireLease(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, blobClientAcquireLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) } return client.acquireLeaseHandleResponse(resp) } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *blobClient) acquireLeaseCreateRequest(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "acquire") - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Duration != nil { - req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Duration), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + if options != nil && options.Duration != nil { + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} } - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.ProposedLeaseID != nil { - req.Raw().Header.Set("x-ms-proposed-lease-id", *blobClientAcquireLeaseOptions.ProposedLeaseID) + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientAcquireLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // acquireLeaseHandleResponse handles the AcquireLease response. -func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobClientAcquireLeaseResponse, error) { - result := blobClientAcquireLeaseResponse{RawResponse: resp} +func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { + result := BlobClientAcquireLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } result.LastModified = &lastModified } @@ -194,7 +198,7 @@ func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobC if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } result.Date = &date } @@ -203,73 +207,73 @@ func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobC // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// blobClientBreakLeaseOptions - blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) BreakLease(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientBreakLeaseResponse, error) { - req, err := client.breakLeaseCreateRequest(ctx, blobClientBreakLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) } return client.breakLeaseHandleResponse(resp) } // breakLeaseCreateRequest creates the BreakLease request. -func (client *blobClient) breakLeaseCreateRequest(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "break") - if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.BreakPeriod != nil { - req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.BreakPeriod), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientBreakLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // breakLeaseHandleResponse handles the BreakLease response. -func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobClientBreakLeaseResponse, error) { - result := blobClientBreakLeaseResponse{RawResponse: resp} +func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { + result := BlobClientBreakLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } result.LastModified = &lastModified } @@ -277,7 +281,7 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobCli leaseTime32, err := strconv.ParseInt(val, 10, 32) leaseTime := int32(leaseTime32) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } result.LeaseTime = &leaseTime } @@ -293,7 +297,7 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobCli if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } result.Date = &date } @@ -302,76 +306,76 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobCli // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. // proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID // string formats. -// blobClientChangeLeaseOptions - blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientChangeLeaseResponse, error) { - req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, blobClientChangeLeaseOptions, modifiedAccessConditions) +// options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) } return client.changeLeaseHandleResponse(resp) } // changeLeaseCreateRequest creates the ChangeLease request. -func (client *blobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientChangeLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "change") - req.Raw().Header.Set("x-ms-lease-id", leaseID) - req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientChangeLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // changeLeaseHandleResponse handles the ChangeLease response. -func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobClientChangeLeaseResponse, error) { - result := blobClientChangeLeaseResponse{RawResponse: resp} +func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { + result := BlobClientChangeLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } result.LastModified = &lastModified } @@ -390,7 +394,7 @@ func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } result.Date = &date } @@ -400,117 +404,117 @@ func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobCl // CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. -// blobClientCopyFromURLOptions - blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL +// options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) CopyFromURL(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCopyFromURLResponse, error) { - req, err := client.copyFromURLCreateRequest(ctx, copySource, blobClientCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) } return client.copyFromURLHandleResponse(resp) } // copyFromURLCreateRequest creates the CopyFromURL request. -func (client *blobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCopyFromURLOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-requires-sync", "true") - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Metadata != nil { - for k, v := range blobClientCopyFromURLOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blobClientCopyFromURLOptions.Tier)) + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-copy-source", copySource) + req.Raw().Header["x-ms-copy-source"] = []string{copySource} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientCopyFromURLOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blobClientCopyFromURLOptions.SourceContentMD5)) + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blobClientCopyFromURLOptions.BlobTagsString) + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientCopyFromURLOptions.ImmutabilityPolicyMode)) + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientCopyFromURLOptions.LegalHold)) + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *blobClientCopyFromURLOptions.CopySourceAuthorization) + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // copyFromURLHandleResponse handles the CopyFromURL response. -func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobClientCopyFromURLResponse, error) { - result := blobClientCopyFromURLResponse{RawResponse: resp} +func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { + result := BlobClientCopyFromURLResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.LastModified = &lastModified } @@ -529,7 +533,7 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.Date = &date } @@ -542,14 +546,14 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.ContentMD5 = contentMD5 } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.XMSContentCRC64 = xMSContentCRC64 } @@ -558,95 +562,95 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobCl // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. -// blobClientCreateSnapshotOptions - blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot -// method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) CreateSnapshot(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCreateSnapshotResponse, error) { - req, err := client.createSnapshotCreateRequest(ctx, blobClientCreateSnapshotOptions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) } return client.createSnapshotHandleResponse(resp) } // createSnapshotCreateRequest creates the CreateSnapshot request. -func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "snapshot") - if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCreateSnapshotOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Metadata != nil { - for k, v := range blobClientCreateSnapshotOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientCreateSnapshotOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // createSnapshotHandleResponse handles the CreateSnapshot response. -func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blobClientCreateSnapshotResponse, error) { - result := blobClientCreateSnapshotResponse{RawResponse: resp} +func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { + result := BlobClientCreateSnapshotResponse{} if val := resp.Header.Get("x-ms-snapshot"); val != "" { result.Snapshot = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } result.LastModified = &lastModified } @@ -665,14 +669,14 @@ func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -691,76 +695,77 @@ func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blo // All other operations on a soft-deleted blob or snapshot causes the service to // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. -// blobClientDeleteOptions - blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) Delete(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDeleteResponse, error) { - req, err := client.deleteCreateRequest(ctx, blobClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return blobClientDeleteResponse{}, err + return BlobClientDeleteResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientDeleteResponse{}, err + return BlobClientDeleteResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientDeleteResponse{}, runtime.NewResponseError(resp) + return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) } return client.deleteHandleResponse(resp) } // deleteCreateRequest creates the Delete request. -func (client *blobClient) deleteCreateRequest(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientDeleteOptions != nil && blobClientDeleteOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientDeleteOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientDeleteOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDeleteOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.BlobDeleteType != nil { - reqQP.Set("deletetype", string(*blobClientDeleteOptions.BlobDeleteType)) + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.DeleteSnapshots != nil { - req.Raw().Header.Set("x-ms-delete-snapshots", string(*blobClientDeleteOptions.DeleteSnapshots)) + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientDeleteOptions != nil && blobClientDeleteOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientDeleteOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // deleteHandleResponse handles the Delete response. -func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientDeleteResponse, error) { - result := blobClientDeleteResponse{RawResponse: resp} +func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientDeleteResponse, error) { + result := BlobClientDeleteResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -773,7 +778,7 @@ func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientD if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDeleteResponse{}, err + return BlobClientDeleteResponse{}, err } result.Date = &date } @@ -782,25 +787,26 @@ func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientD // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. -// options - blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy +// Generated from API version 2020-10-02 +// options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. -func (client *blobClient) DeleteImmutabilityPolicy(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (blobClientDeleteImmutabilityPolicyResponse, error) { +func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { - return blobClientDeleteImmutabilityPolicyResponse{}, err + return BlobClientDeleteImmutabilityPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientDeleteImmutabilityPolicyResponse{}, err + return BlobClientDeleteImmutabilityPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) } return client.deleteImmutabilityPolicyHandleResponse(resp) } // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. -func (client *blobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { +func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) if err != nil { return nil, err @@ -811,17 +817,17 @@ func (client *blobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. -func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientDeleteImmutabilityPolicyResponse, error) { - result := blobClientDeleteImmutabilityPolicyResponse{RawResponse: resp} +func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientDeleteImmutabilityPolicyResponse, error) { + result := BlobClientDeleteImmutabilityPolicyResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -834,7 +840,7 @@ func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDeleteImmutabilityPolicyResponse{}, err + return BlobClientDeleteImmutabilityPolicyResponse{}, err } result.Date = &date } @@ -844,94 +850,95 @@ func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientDownloadOptions - blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) Download(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDownloadResponse, error) { - req, err := client.downloadCreateRequest(ctx, blobClientDownloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - return blobClientDownloadResponse{}, runtime.NewResponseError(resp) + return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) } return client.downloadHandleResponse(resp) } // downloadCreateRequest creates the Download request. -func (client *blobClient) downloadCreateRequest(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientDownloadOptions != nil && blobClientDownloadOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientDownloadOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientDownloadOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDownloadOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if blobClientDownloadOptions != nil && blobClientDownloadOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *blobClientDownloadOptions.Range) + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentMD5 != nil { - req.Raw().Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentMD5)) + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentCRC64 != nil { - req.Raw().Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentCRC64)) + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientDownloadOptions != nil && blobClientDownloadOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientDownloadOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // downloadHandleResponse handles the Download response. -func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClientDownloadResponse, error) { - result := blobClientDownloadResponse{RawResponse: resp} +func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { + result := BlobClientDownloadResponse{Body: resp.Body} if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.LastModified = &lastModified } @@ -957,7 +964,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ContentLength = &contentLength } @@ -968,12 +975,12 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien result.ContentRange = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ContentMD5 = contentMD5 } @@ -992,7 +999,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -1002,7 +1009,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.CopyCompletionTime = ©CompletionTime } @@ -1045,7 +1052,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.IsCurrentVersion = &isCurrentVersion } @@ -1055,7 +1062,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.Date = &date } @@ -1063,14 +1070,14 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -1083,52 +1090,52 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { blobContentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.BlobContentMD5 = blobContentMD5 } if val := resp.Header.Get("x-ms-tag-count"); val != "" { tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.TagCount = &tagCount } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.IsSealed = &isSealed } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.LastAccessed = &lastAccessed } if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.LegalHold = &legalHold } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ContentCRC64 = contentCRC64 } @@ -1140,24 +1147,25 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// options - blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. -func (client *blobClient) GetAccountInfo(ctx context.Context, options *blobClientGetAccountInfoOptions) (blobClientGetAccountInfoResponse, error) { +// Generated from API version 2020-10-02 +// options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { - return blobClientGetAccountInfoResponse{}, err + return BlobClientGetAccountInfoResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientGetAccountInfoResponse{}, err + return BlobClientGetAccountInfoResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) } return client.getAccountInfoHandleResponse(resp) } // getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, options *blobClientGetAccountInfoOptions) (*policy.Request, error) { +func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobClientGetAccountInfoOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -1166,14 +1174,14 @@ func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, optio reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blobClientGetAccountInfoResponse, error) { - result := blobClientGetAccountInfoResponse{RawResponse: resp} +func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { + result := BlobClientGetAccountInfoResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1186,7 +1194,7 @@ func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetAccountInfoResponse{}, err + return BlobClientGetAccountInfoResponse{}, err } result.Date = &date } @@ -1202,92 +1210,92 @@ func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blo // GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientGetPropertiesOptions - blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) GetProperties(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientGetPropertiesResponse, error) { - req, err := client.getPropertiesCreateRequest(ctx, blobClientGetPropertiesOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.getPropertiesHandleResponse(resp) } // getPropertiesCreateRequest creates the GetProperties request. -func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientGetPropertiesOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientGetPropertiesOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetPropertiesOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetPropertiesOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. -func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blobClientGetPropertiesResponse, error) { - result := blobClientGetPropertiesResponse{RawResponse: resp} +func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { + result := BlobClientGetPropertiesResponse{} if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-creation-time"); val != "" { creationTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.CreationTime = &creationTime } @@ -1316,7 +1324,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.CopyCompletionTime = ©CompletionTime } @@ -1338,7 +1346,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { isIncrementalCopy, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsIncrementalCopy = &isIncrementalCopy } @@ -1357,7 +1365,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ContentLength = &contentLength } @@ -1365,12 +1373,12 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob result.ContentType = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ContentMD5 = contentMD5 } @@ -1389,7 +1397,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -1405,7 +1413,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.Date = &date } @@ -1416,14 +1424,14 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -1439,7 +1447,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { accessTierInferred, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.AccessTierInferred = &accessTierInferred } @@ -1449,7 +1457,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { accessTierChangeTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.AccessTierChangeTime = &accessTierChangeTime } @@ -1459,28 +1467,28 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsCurrentVersion = &isCurrentVersion } if val := resp.Header.Get("x-ms-tag-count"); val != "" { tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.TagCount = &tagCount } if val := resp.Header.Get("x-ms-expiry-time"); val != "" { expiresOn, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ExpiresOn = &expiresOn } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsSealed = &isSealed } @@ -1490,24 +1498,24 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.LastAccessed = &lastAccessed } if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.LegalHold = &legalHold } @@ -1516,59 +1524,60 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientGetTagsOptions - blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) GetTags(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientGetTagsResponse, error) { - req, err := client.getTagsCreateRequest(ctx, blobClientGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientGetTagsResponse{}, runtime.NewResponseError(resp) + return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) } return client.getTagsHandleResponse(resp) } // getTagsCreateRequest creates the GetTags request. -func (client *blobClient) getTagsCreateRequest(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tags") - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetTagsOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientGetTagsOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientGetTagsOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetTagsOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getTagsHandleResponse handles the GetTags response. -func (client *blobClient) getTagsHandleResponse(resp *http.Response) (blobClientGetTagsResponse, error) { - result := blobClientGetTagsResponse{RawResponse: resp} +func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClientGetTagsResponse, error) { + result := BlobClientGetTagsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1581,98 +1590,99 @@ func (client *blobClient) getTagsHandleResponse(resp *http.Response) (blobClient if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } return result, nil } // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientQueryOptions - blobClientQueryOptions contains the optional parameters for the blobClient.Query method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) Query(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientQueryResponse, error) { - req, err := client.queryCreateRequest(ctx, blobClientQueryOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { - return blobClientQueryResponse{}, runtime.NewResponseError(resp) + return BlobClientQueryResponse{}, runtime.NewResponseError(resp) } return client.queryHandleResponse(resp) } // queryCreateRequest creates the Query request. -func (client *blobClient) queryCreateRequest(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "query") - if blobClientQueryOptions != nil && blobClientQueryOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientQueryOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientQueryOptions != nil && blobClientQueryOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientQueryOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientQueryOptions != nil && blobClientQueryOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientQueryOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") - if blobClientQueryOptions != nil && blobClientQueryOptions.QueryRequest != nil { - return req, runtime.MarshalAsXML(req, *blobClientQueryOptions.QueryRequest) + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.QueryRequest != nil { + return req, runtime.MarshalAsXML(req, *options.QueryRequest) } return req, nil } // queryHandleResponse handles the Query response. -func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQueryResponse, error) { - result := blobClientQueryResponse{RawResponse: resp} +func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { + result := BlobClientQueryResponse{Body: resp.Body} if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.LastModified = &lastModified } @@ -1687,7 +1697,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.ContentLength = &contentLength } @@ -1698,12 +1708,12 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu result.ContentRange = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.ContentMD5 = contentMD5 } @@ -1722,7 +1732,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -1732,7 +1742,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.CopyCompletionTime = ©CompletionTime } @@ -1775,7 +1785,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.Date = &date } @@ -1783,14 +1793,14 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -1803,14 +1813,14 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { blobContentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.BlobContentMD5 = blobContentMD5 } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.ContentCRC64 = contentCRC64 } @@ -1819,72 +1829,72 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// blobClientReleaseLeaseOptions - blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) ReleaseLease(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientReleaseLeaseResponse, error) { - req, err := client.releaseLeaseCreateRequest(ctx, leaseID, blobClientReleaseLeaseOptions, modifiedAccessConditions) +// options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) } return client.releaseLeaseHandleResponse(resp) } // releaseLeaseCreateRequest creates the ReleaseLease request. -func (client *blobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientReleaseLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "release") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientReleaseLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // releaseLeaseHandleResponse handles the ReleaseLease response. -func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobClientReleaseLeaseResponse, error) { - result := blobClientReleaseLeaseResponse{RawResponse: resp} +func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { + result := BlobClientReleaseLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } result.LastModified = &lastModified } @@ -1900,7 +1910,7 @@ func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobC if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } result.Date = &date } @@ -1909,72 +1919,72 @@ func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobC // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// blobClientRenewLeaseOptions - blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) RenewLease(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientRenewLeaseResponse, error) { - req, err := client.renewLeaseCreateRequest(ctx, leaseID, blobClientRenewLeaseOptions, modifiedAccessConditions) +// options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) } return client.renewLeaseHandleResponse(resp) } // renewLeaseCreateRequest creates the RenewLease request. -func (client *blobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientRenewLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "renew") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientRenewLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // renewLeaseHandleResponse handles the RenewLease response. -func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobClientRenewLeaseResponse, error) { - result := blobClientRenewLeaseResponse{RawResponse: resp} +func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { + result := BlobClientRenewLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } result.LastModified = &lastModified } @@ -1993,7 +2003,7 @@ func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobCli if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } result.Date = &date } @@ -2002,25 +2012,26 @@ func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobCli // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // expiryOptions - Required. Indicates mode of the expiry time -// options - blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. -func (client *blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (blobClientSetExpiryResponse, error) { +// options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) } return client.setExpiryHandleResponse(resp) } // setExpiryCreateRequest creates the SetExpiry request. -func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (*policy.Request, error) { +func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2031,28 +2042,28 @@ func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("x-ms-expiry-option", string(expiryOptions)) + req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} if options != nil && options.ExpiresOn != nil { - req.Raw().Header.Set("x-ms-expiry-time", *options.ExpiresOn) + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setExpiryHandleResponse handles the SetExpiry response. -func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClientSetExpiryResponse, error) { - result := blobClientSetExpiryResponse{RawResponse: resp} +func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { + result := BlobClientSetExpiryResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } result.LastModified = &lastModified } @@ -2068,7 +2079,7 @@ func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClie if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } result.Date = &date } @@ -2077,99 +2088,99 @@ func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClie // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetHTTPHeadersOptions - blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetHTTPHeaders(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetHTTPHeadersResponse, error) { - req, err := client.setHTTPHeadersCreateRequest(ctx, blobClientSetHTTPHeadersOptions, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) } return client.setHTTPHeadersHandleResponse(resp) } // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. -func (client *blobClient) setHTTPHeadersCreateRequest(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "properties") - if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetHTTPHeadersOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetHTTPHeadersOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. -func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blobClientSetHTTPHeadersResponse, error) { - result := blobClientSetHTTPHeadersResponse{RawResponse: resp} +func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { + result := BlobClientSetHTTPHeadersResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -2185,7 +2196,7 @@ func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } result.Date = &date } @@ -2194,56 +2205,57 @@ func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blo // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetImmutabilityPolicyOptions - blobClientSetImmutabilityPolicyOptions contains the optional parameters for the -// blobClient.SetImmutabilityPolicy method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetImmutabilityPolicy(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetImmutabilityPolicyResponse, error) { - req, err := client.setImmutabilityPolicyCreateRequest(ctx, blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) } return client.setImmutabilityPolicyHandleResponse(resp) } // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. -func (client *blobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "immutabilityPolicies") - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetImmutabilityPolicyOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetImmutabilityPolicyOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} } - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode)) + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. -func (client *blobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientSetImmutabilityPolicyResponse, error) { - result := blobClientSetImmutabilityPolicyResponse{RawResponse: resp} +func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientSetImmutabilityPolicyResponse, error) { + result := BlobClientSetImmutabilityPolicyResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2256,44 +2268,45 @@ func (client *blobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry } if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } return result, nil } // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // legalHold - Specified if a legal hold should be set on the blob. -// options - blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. -func (client *blobClient) SetLegalHold(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (blobClientSetLegalHoldResponse, error) { +// options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) } return client.setLegalHoldHandleResponse(resp) } // setLegalHoldCreateRequest creates the SetLegalHold request. -func (client *blobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (*policy.Request, error) { +func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2304,18 +2317,18 @@ func (client *blobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold)) - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setLegalHoldHandleResponse handles the SetLegalHold response. -func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobClientSetLegalHoldResponse, error) { - result := blobClientSetLegalHoldResponse{RawResponse: resp} +func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobClientSetLegalHoldResponse, error) { + result := BlobClientSetLegalHoldResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2328,14 +2341,14 @@ func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobC if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } result.LegalHold = &legalHold } @@ -2345,92 +2358,92 @@ func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobC // SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value // pairs // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetMetadataOptions - blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetMetadata(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetMetadataResponse, error) { - req, err := client.setMetadataCreateRequest(ctx, blobClientSetMetadataOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) } return client.setMetadataHandleResponse(resp) } // setMetadataCreateRequest creates the SetMetadata request. -func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "metadata") - if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetMetadataOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Metadata != nil { - for k, v := range blobClientSetMetadataOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetMetadataOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setMetadataHandleResponse handles the SetMetadata response. -func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobClientSetMetadataResponse, error) { - result := blobClientSetMetadataResponse{RawResponse: resp} +func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { + result := BlobClientSetMetadataResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } result.LastModified = &lastModified } @@ -2449,14 +2462,14 @@ func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -2471,65 +2484,64 @@ func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetTagsOptions - blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) SetTags(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientSetTagsResponse, error) { - req, err := client.setTagsCreateRequest(ctx, blobClientSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// tags - Blob tags +// options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientSetTagsResponse{}, err + return BlobClientSetTagsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetTagsResponse{}, err + return BlobClientSetTagsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return blobClientSetTagsResponse{}, runtime.NewResponseError(resp) + return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) } return client.setTagsHandleResponse(resp) } // setTagsCreateRequest creates the SetTags request. -func (client *blobClient) setTagsCreateRequest(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tags") - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTagsOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientSetTagsOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentMD5)) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentCRC64)) + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTagsOptions.RequestID) + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("Accept", "application/xml") - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Tags != nil { - return req, runtime.MarshalAsXML(req, *blobClientSetTagsOptions.Tags) - } - return req, nil + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, tags) } // setTagsHandleResponse handles the SetTags response. -func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClientSetTagsResponse, error) { - result := blobClientSetTagsResponse{RawResponse: resp} +func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClientSetTagsResponse, error) { + result := BlobClientSetTagsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2542,7 +2554,7 @@ func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClient if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetTagsResponse{}, err + return BlobClientSetTagsResponse{}, err } result.Date = &date } @@ -2554,64 +2566,65 @@ func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClient // premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // tier - Indicates the tier to be set on the blob. -// blobClientSetTierOptions - blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetTier(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetTierResponse, error) { - req, err := client.setTierCreateRequest(ctx, tier, blobClientSetTierOptions, leaseAccessConditions, modifiedAccessConditions) +// options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return blobClientSetTierResponse{}, err + return BlobClientSetTierResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetTierResponse{}, err + return BlobClientSetTierResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { - return blobClientSetTierResponse{}, runtime.NewResponseError(resp) + return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) } return client.setTierHandleResponse(resp) } // setTierCreateRequest creates the SetTier request. -func (client *blobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tier") - if blobClientSetTierOptions != nil && blobClientSetTierOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientSetTierOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientSetTierOptions != nil && blobClientSetTierOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientSetTierOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientSetTierOptions != nil && blobClientSetTierOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTierOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-access-tier", string(tier)) - if blobClientSetTierOptions != nil && blobClientSetTierOptions.RehydratePriority != nil { - req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientSetTierOptions.RehydratePriority)) + req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetTierOptions != nil && blobClientSetTierOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTierOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setTierHandleResponse handles the SetTier response. -func (client *blobClient) setTierHandleResponse(resp *http.Response) (blobClientSetTierResponse, error) { - result := blobClientSetTierResponse{RawResponse: resp} +func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClientSetTierResponse, error) { + result := BlobClientSetTierResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2626,119 +2639,119 @@ func (client *blobClient) setTierHandleResponse(resp *http.Response) (blobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. -// blobClientStartCopyFromURLOptions - blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL +// options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) StartCopyFromURL(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientStartCopyFromURLResponse, error) { - req, err := client.startCopyFromURLCreateRequest(ctx, copySource, blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) } return client.startCopyFromURLHandleResponse(resp) } // startCopyFromURLCreateRequest creates the StartCopyFromURL request. -func (client *blobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientStartCopyFromURLOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Metadata != nil { - for k, v := range blobClientStartCopyFromURLOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blobClientStartCopyFromURLOptions.Tier)) + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RehydratePriority != nil { - req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientStartCopyFromURLOptions.RehydratePriority)) + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { - req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-copy-source", copySource) + req.Raw().Header["x-ms-copy-source"] = []string{copySource} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientStartCopyFromURLOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blobClientStartCopyFromURLOptions.BlobTagsString) + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.SealBlob != nil { - req.Raw().Header.Set("x-ms-seal-blob", strconv.FormatBool(*blobClientStartCopyFromURLOptions.SealBlob)) + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode)) + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientStartCopyFromURLOptions.LegalHold)) + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // startCopyFromURLHandleResponse handles the StartCopyFromURL response. -func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (blobClientStartCopyFromURLResponse, error) { - result := blobClientStartCopyFromURLResponse{RawResponse: resp} +func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { + result := BlobClientStartCopyFromURLResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } result.LastModified = &lastModified } @@ -2757,7 +2770,7 @@ func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (b if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } result.Date = &date } @@ -2772,24 +2785,25 @@ func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (b // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. -// options - blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. -func (client *blobClient) Undelete(ctx context.Context, options *blobClientUndeleteOptions) (blobClientUndeleteResponse, error) { +// Generated from API version 2020-10-02 +// options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { req, err := client.undeleteCreateRequest(ctx, options) if err != nil { - return blobClientUndeleteResponse{}, err + return BlobClientUndeleteResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientUndeleteResponse{}, err + return BlobClientUndeleteResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientUndeleteResponse{}, runtime.NewResponseError(resp) + return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) } return client.undeleteHandleResponse(resp) } // undeleteCreateRequest creates the Undelete request. -func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *blobClientUndeleteOptions) (*policy.Request, error) { +func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *BlobClientUndeleteOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2800,17 +2814,17 @@ func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // undeleteHandleResponse handles the Undelete response. -func (client *blobClient) undeleteHandleResponse(resp *http.Response) (blobClientUndeleteResponse, error) { - result := blobClientUndeleteResponse{RawResponse: resp} +func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClientUndeleteResponse, error) { + result := BlobClientUndeleteResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2823,7 +2837,7 @@ func (client *blobClient) undeleteHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientUndeleteResponse{}, err + return BlobClientUndeleteResponse{}, err } result.Date = &date } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go new file mode 100644 index 00000000000..5f2d1101613 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -0,0 +1,960 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// BlockBlobClient contains the methods for the BlockBlob group. +// Don't use this type directly, use NewBlockBlobClient() instead. +type BlockBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { + client := &BlockBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blocks - Blob Blocks. +// options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.commitBlockListHandleResponse(resp) +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, blocks) +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { + result := BlockBlobClientCommitBlockListResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.getBlockListHandleResponse(resp) +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + reqQP.Set("blocklisttype", string(listType)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { + result := BlockBlobClientGetBlockListResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.putBlobFromURLHandleResponse(resp) +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { + result := BlockBlobClientPutBlobFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// body - Initial data +// options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (BlockBlobClientStageBlockResponse, error) { + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockHandleResponse(resp) +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { + result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// sourceURL - Specify a URL to the copy source. +// options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockFromURLHandleResponse(resp) +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { + result := BlockBlobClientStageBlockFromURLResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + } + return client.uploadHandleResponse(resp) +} + +// uploadCreateRequest creates the Upload request. +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadHandleResponse handles the Upload response. +func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { + result := BlockBlobClientUploadResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go similarity index 84% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 2348df04a43..74e6cf1e859 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -5,15 +5,10 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated -const ( - moduleName = "azblob" - moduleVersion = "v0.4.1" -) - -// AccessTier enum type AccessTier string const ( @@ -31,6 +26,7 @@ const ( AccessTierP60 AccessTier = "P60" AccessTierP70 AccessTier = "P70" AccessTierP80 AccessTier = "P80" + AccessTierPremium AccessTier = "Premium" ) // PossibleAccessTierValues returns the possible values for the AccessTier const type. @@ -50,15 +46,10 @@ func PossibleAccessTierValues() []AccessTier { AccessTierP60, AccessTierP70, AccessTierP80, + AccessTierPremium, } } -// ToPtr returns a *AccessTier pointing to the current value. -func (c AccessTier) ToPtr() *AccessTier { - return &c -} - -// AccountKind enum type AccountKind string const ( @@ -80,12 +71,6 @@ func PossibleAccountKindValues() []AccountKind { } } -// ToPtr returns a *AccountKind pointing to the current value. -func (c AccountKind) ToPtr() *AccountKind { - return &c -} - -// ArchiveStatus enum type ArchiveStatus string const ( @@ -101,36 +86,6 @@ func PossibleArchiveStatusValues() []ArchiveStatus { } } -// ToPtr returns a *ArchiveStatus pointing to the current value. -func (c ArchiveStatus) ToPtr() *ArchiveStatus { - return &c -} - -// BlobExpiryOptions enum -type BlobExpiryOptions string - -const ( - BlobExpiryOptionsAbsolute BlobExpiryOptions = "Absolute" - BlobExpiryOptionsNeverExpire BlobExpiryOptions = "NeverExpire" - BlobExpiryOptionsRelativeToCreation BlobExpiryOptions = "RelativeToCreation" - BlobExpiryOptionsRelativeToNow BlobExpiryOptions = "RelativeToNow" -) - -// PossibleBlobExpiryOptionsValues returns the possible values for the BlobExpiryOptions const type. -func PossibleBlobExpiryOptionsValues() []BlobExpiryOptions { - return []BlobExpiryOptions{ - BlobExpiryOptionsAbsolute, - BlobExpiryOptionsNeverExpire, - BlobExpiryOptionsRelativeToCreation, - BlobExpiryOptionsRelativeToNow, - } -} - -// ToPtr returns a *BlobExpiryOptions pointing to the current value. -func (c BlobExpiryOptions) ToPtr() *BlobExpiryOptions { - return &c -} - // BlobGeoReplicationStatus - The status of the secondary location type BlobGeoReplicationStatus string @@ -149,35 +104,6 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { } } -// ToPtr returns a *BlobGeoReplicationStatus pointing to the current value. -func (c BlobGeoReplicationStatus) ToPtr() *BlobGeoReplicationStatus { - return &c -} - -// BlobImmutabilityPolicyMode enum -type BlobImmutabilityPolicyMode string - -const ( - BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyMode = "Mutable" - BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyMode = "Unlocked" - BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyMode = "Locked" -) - -// PossibleBlobImmutabilityPolicyModeValues returns the possible values for the BlobImmutabilityPolicyMode const type. -func PossibleBlobImmutabilityPolicyModeValues() []BlobImmutabilityPolicyMode { - return []BlobImmutabilityPolicyMode{ - BlobImmutabilityPolicyModeMutable, - BlobImmutabilityPolicyModeUnlocked, - BlobImmutabilityPolicyModeLocked, - } -} - -// ToPtr returns a *BlobImmutabilityPolicyMode pointing to the current value. -func (c BlobImmutabilityPolicyMode) ToPtr() *BlobImmutabilityPolicyMode { - return &c -} - -// BlobType enum type BlobType string const ( @@ -195,12 +121,6 @@ func PossibleBlobTypeValues() []BlobType { } } -// ToPtr returns a *BlobType pointing to the current value. -func (c BlobType) ToPtr() *BlobType { - return &c -} - -// BlockListType enum type BlockListType string const ( @@ -218,12 +138,6 @@ func PossibleBlockListTypeValues() []BlockListType { } } -// ToPtr returns a *BlockListType pointing to the current value. -func (c BlockListType) ToPtr() *BlockListType { - return &c -} - -// CopyStatusType enum type CopyStatusType string const ( @@ -243,12 +157,6 @@ func PossibleCopyStatusTypeValues() []CopyStatusType { } } -// ToPtr returns a *CopyStatusType pointing to the current value. -func (c CopyStatusType) ToPtr() *CopyStatusType { - return &c -} - -// DeleteSnapshotsOptionType enum type DeleteSnapshotsOptionType string const ( @@ -264,12 +172,21 @@ func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { } } -// ToPtr returns a *DeleteSnapshotsOptionType pointing to the current value. -func (c DeleteSnapshotsOptionType) ToPtr() *DeleteSnapshotsOptionType { - return &c +type DeleteType string + +const ( + DeleteTypeNone DeleteType = "None" + DeleteTypePermanent DeleteType = "Permanent" +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return []DeleteType{ + DeleteTypeNone, + DeleteTypePermanent, + } } -// EncryptionAlgorithmType enum type EncryptionAlgorithmType string const ( @@ -285,12 +202,57 @@ func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { } } -// ToPtr returns a *EncryptionAlgorithmType pointing to the current value. -func (c EncryptionAlgorithmType) ToPtr() *EncryptionAlgorithmType { - return &c +type ExpiryOptions string + +const ( + ExpiryOptionsAbsolute ExpiryOptions = "Absolute" + ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" + ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" + ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return []ExpiryOptions{ + ExpiryOptionsAbsolute, + ExpiryOptionsNeverExpire, + ExpiryOptionsRelativeToCreation, + ExpiryOptionsRelativeToNow, + } +} + +type ImmutabilityPolicyMode string + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeMutable, + ImmutabilityPolicyModeUnlocked, + ImmutabilityPolicyModeLocked, + } +} + +type ImmutabilityPolicySetting string + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return []ImmutabilityPolicySetting{ + ImmutabilityPolicySettingUnlocked, + ImmutabilityPolicySettingLocked, + } } -// LeaseDurationType enum type LeaseDurationType string const ( @@ -306,12 +268,6 @@ func PossibleLeaseDurationTypeValues() []LeaseDurationType { } } -// ToPtr returns a *LeaseDurationType pointing to the current value. -func (c LeaseDurationType) ToPtr() *LeaseDurationType { - return &c -} - -// LeaseStateType enum type LeaseStateType string const ( @@ -333,12 +289,6 @@ func PossibleLeaseStateTypeValues() []LeaseStateType { } } -// ToPtr returns a *LeaseStateType pointing to the current value. -func (c LeaseStateType) ToPtr() *LeaseStateType { - return &c -} - -// LeaseStatusType enum type LeaseStatusType string const ( @@ -354,12 +304,6 @@ func PossibleLeaseStatusTypeValues() []LeaseStatusType { } } -// ToPtr returns a *LeaseStatusType pointing to the current value. -func (c LeaseStatusType) ToPtr() *LeaseStatusType { - return &c -} - -// ListBlobsIncludeItem enum type ListBlobsIncludeItem string const ( @@ -391,12 +335,6 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { } } -// ToPtr returns a *ListBlobsIncludeItem pointing to the current value. -func (c ListBlobsIncludeItem) ToPtr() *ListBlobsIncludeItem { - return &c -} - -// ListContainersIncludeType enum type ListContainersIncludeType string const ( @@ -414,12 +352,6 @@ func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { } } -// ToPtr returns a *ListContainersIncludeType pointing to the current value. -func (c ListContainersIncludeType) ToPtr() *ListContainersIncludeType { - return &c -} - -// PremiumPageBlobAccessTier enum type PremiumPageBlobAccessTier string const ( @@ -453,12 +385,6 @@ func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { } } -// ToPtr returns a *PremiumPageBlobAccessTier pointing to the current value. -func (c PremiumPageBlobAccessTier) ToPtr() *PremiumPageBlobAccessTier { - return &c -} - -// PublicAccessType enum type PublicAccessType string const ( @@ -474,11 +400,6 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { } } -// ToPtr returns a *PublicAccessType pointing to the current value. -func (c PublicAccessType) ToPtr() *PublicAccessType { - return &c -} - // QueryFormatType - The quick query format type. type QueryFormatType string @@ -499,11 +420,6 @@ func PossibleQueryFormatTypeValues() []QueryFormatType { } } -// ToPtr returns a *QueryFormatType pointing to the current value. -func (c QueryFormatType) ToPtr() *QueryFormatType { - return &c -} - // RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. // Valid values are High and Standard. type RehydratePriority string @@ -521,12 +437,6 @@ func PossibleRehydratePriorityValues() []RehydratePriority { } } -// ToPtr returns a *RehydratePriority pointing to the current value. -func (c RehydratePriority) ToPtr() *RehydratePriority { - return &c -} - -// SKUName enum type SKUName string const ( @@ -548,12 +458,6 @@ func PossibleSKUNameValues() []SKUName { } } -// ToPtr returns a *SKUName pointing to the current value. -func (c SKUName) ToPtr() *SKUName { - return &c -} - -// SequenceNumberActionType enum type SequenceNumberActionType string const ( @@ -571,11 +475,6 @@ func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { } } -// ToPtr returns a *SequenceNumberActionType pointing to the current value. -func (c SequenceNumberActionType) ToPtr() *SequenceNumberActionType { - return &c -} - // StorageErrorCode - Error codes returned by the service type StorageErrorCode string @@ -616,7 +515,7 @@ const ( StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" - StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" @@ -734,7 +633,7 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode { StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, - StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, @@ -813,29 +712,3 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode { StorageErrorCodeUnsupportedXMLNode, } } - -// ToPtr returns a *StorageErrorCode pointing to the current value. -func (c StorageErrorCode) ToPtr() *StorageErrorCode { - return &c -} - -// BlobDeleteType enum -type BlobDeleteType string - -const ( - BlobDeleteTypeNone BlobDeleteType = "None" - BlobDeleteTypePermanent BlobDeleteType = "Permanent" -) - -// PossibleBlobDeleteTypeValues returns the possible values for the BlobDeleteType const type. -func PossibleBlobDeleteTypeValues() []BlobDeleteType { - return []BlobDeleteType{ - BlobDeleteTypeNone, - BlobDeleteTypePermanent, - } -} - -// ToPtr returns a *BlobDeleteType pointing to the current value. -func (c BlobDeleteType) ToPtr() *BlobDeleteType { - return &c -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go similarity index 50% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index c9245ce10d4..31e671f1484 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -5,13 +5,15 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "context" "encoding/xml" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "io" @@ -21,16 +23,18 @@ import ( "time" ) -type containerClient struct { +// ContainerClient contains the methods for the Container group. +// Don't use this type directly, use NewContainerClient() instead. +type ContainerClient struct { endpoint string pl runtime.Pipeline } -// newContainerClient creates a new instance of containerClient with the specified values. +// NewContainerClient creates a new instance of ContainerClient with the specified values. // endpoint - The URL of the service account, container, or blob that is the target of the desired operation. // pl - the pipeline used for sending requests and handling responses. -func newContainerClient(endpoint string, pl runtime.Pipeline) *containerClient { - client := &containerClient{ +func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { + client := &ContainerClient{ endpoint: endpoint, pl: pl, } @@ -40,26 +44,26 @@ func newContainerClient(endpoint string, pl runtime.Pipeline) *containerClient { // AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// containerClientAcquireLeaseOptions - containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) AcquireLease(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, containerClientAcquireLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return containerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) } return client.acquireLeaseHandleResponse(resp) } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -67,41 +71,41 @@ func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, co reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "acquire") - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Duration != nil { - req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Duration), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + if options != nil && options.Duration != nil { + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} } - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.ProposedLeaseID != nil { - req.Raw().Header.Set("x-ms-proposed-lease-id", *containerClientAcquireLeaseOptions.ProposedLeaseID) + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientAcquireLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // acquireLeaseHandleResponse handles the AcquireLease response. -func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) (containerClientAcquireLeaseResponse, error) { - result := containerClientAcquireLeaseResponse{RawResponse: resp} +func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { + result := ContainerClientAcquireLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } result.LastModified = &lastModified } @@ -120,7 +124,7 @@ func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } result.Date = &date } @@ -130,26 +134,26 @@ func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) ( // BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// containerClientBreakLeaseOptions - containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) BreakLease(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientBreakLeaseResponse, error) { - req, err := client.breakLeaseCreateRequest(ctx, containerClientBreakLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return containerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) } return client.breakLeaseHandleResponse(resp) } // breakLeaseCreateRequest creates the BreakLease request. -func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -157,38 +161,38 @@ func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, cont reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "break") - if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.BreakPeriod != nil { - req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.BreakPeriod), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientBreakLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // breakLeaseHandleResponse handles the BreakLease response. -func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (containerClientBreakLeaseResponse, error) { - result := containerClientBreakLeaseResponse{RawResponse: resp} +func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { + result := ContainerClientBreakLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } result.LastModified = &lastModified } @@ -196,7 +200,7 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (co leaseTime32, err := strconv.ParseInt(val, 10, 32) leaseTime := int32(leaseTime32) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } result.LeaseTime = &leaseTime } @@ -212,7 +216,7 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (co if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } result.Date = &date } @@ -222,30 +226,30 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (co // ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. // proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID // string formats. -// containerClientChangeLeaseOptions - containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientChangeLeaseResponse, error) { - req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, containerClientChangeLeaseOptions, modifiedAccessConditions) +// options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) } return client.changeLeaseHandleResponse(resp) } // changeLeaseCreateRequest creates the ChangeLease request. -func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -253,37 +257,37 @@ func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, lea reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientChangeLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "change") - req.Raw().Header.Set("x-ms-lease-id", leaseID) - req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientChangeLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // changeLeaseHandleResponse handles the ChangeLease response. -func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (containerClientChangeLeaseResponse, error) { - result := containerClientChangeLeaseResponse{RawResponse: resp} +func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { + result := ContainerClientChangeLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } result.LastModified = &lastModified } @@ -302,7 +306,7 @@ func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (c if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } result.Date = &date } @@ -312,68 +316,68 @@ func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (c // Create - creates a new container under the specified account. If the container with the same name already exists, the operation // fails // If the operation fails it returns an *azcore.ResponseError type. -// containerClientCreateOptions - containerClientCreateOptions contains the optional parameters for the containerClient.Create -// method. -// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. -func (client *containerClient) Create(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (containerClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, containerClientCreateOptions, containerCpkScopeInfo) +// Generated from API version 2020-10-02 +// options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (ContainerClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options, containerCpkScopeInfo) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return containerClientCreateResponse{}, runtime.NewResponseError(resp) + return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) } return client.createHandleResponse(resp) } // createCreateRequest creates the Create request. -func (client *containerClient) createCreateRequest(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") - if containerClientCreateOptions != nil && containerClientCreateOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientCreateOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if containerClientCreateOptions != nil && containerClientCreateOptions.Metadata != nil { - for k, v := range containerClientCreateOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } - if containerClientCreateOptions != nil && containerClientCreateOptions.Access != nil { - req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientCreateOptions.Access)) + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientCreateOptions != nil && containerClientCreateOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientCreateOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil { - req.Raw().Header.Set("x-ms-default-encryption-scope", *containerCpkScopeInfo.DefaultEncryptionScope) + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCpkScopeInfo.DefaultEncryptionScope} } if containerCpkScopeInfo != nil && containerCpkScopeInfo.PreventEncryptionScopeOverride != nil { - req.Raw().Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)) + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // createHandleResponse handles the Create response. -func (client *containerClient) createHandleResponse(resp *http.Response) (containerClientCreateResponse, error) { - result := containerClientCreateResponse{RawResponse: resp} +func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { + result := ContainerClientCreateResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } result.LastModified = &lastModified } @@ -389,7 +393,7 @@ func (client *containerClient) createHandleResponse(resp *http.Response) (contai if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } result.Date = &date } @@ -399,57 +403,57 @@ func (client *containerClient) createHandleResponse(resp *http.Response) (contai // Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. -// containerClientDeleteOptions - containerClientDeleteOptions contains the optional parameters for the containerClient.Delete -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) Delete(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientDeleteResponse, error) { - req, err := client.deleteCreateRequest(ctx, containerClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return containerClientDeleteResponse{}, err + return ContainerClientDeleteResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientDeleteResponse{}, err + return ContainerClientDeleteResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return containerClientDeleteResponse{}, runtime.NewResponseError(resp) + return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) } return client.deleteHandleResponse(resp) } // deleteCreateRequest creates the Delete request. -func (client *containerClient) deleteCreateRequest(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") - if containerClientDeleteOptions != nil && containerClientDeleteOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientDeleteOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientDeleteOptions != nil && containerClientDeleteOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientDeleteOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // deleteHandleResponse handles the Delete response. -func (client *containerClient) deleteHandleResponse(resp *http.Response) (containerClientDeleteResponse, error) { - result := containerClientDeleteResponse{RawResponse: resp} +func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (ContainerClientDeleteResponse, error) { + result := ContainerClientDeleteResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -462,7 +466,7 @@ func (client *containerClient) deleteHandleResponse(resp *http.Response) (contai if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientDeleteResponse{}, err + return ContainerClientDeleteResponse{}, err } result.Date = &date } @@ -472,26 +476,27 @@ func (client *containerClient) deleteHandleResponse(resp *http.Response) (contai // GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. -// containerClientGetAccessPolicyOptions - containerClientGetAccessPolicyOptions contains the optional parameters for the -// containerClient.GetAccessPolicy method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *containerClient) GetAccessPolicy(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetAccessPolicyResponse, error) { - req, err := client.getAccessPolicyCreateRequest(ctx, containerClientGetAccessPolicyOptions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) } return client.getAccessPolicyHandleResponse(resp) } // getAccessPolicyCreateRequest creates the GetAccessPolicy request. -func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -499,34 +504,34 @@ func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") reqQP.Set("comp", "acl") - if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetAccessPolicyOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetAccessPolicyOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccessPolicyHandleResponse handles the GetAccessPolicy response. -func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response) (containerClientGetAccessPolicyResponse, error) { - result := containerClientGetAccessPolicyResponse{RawResponse: resp} +func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerClientGetAccessPolicyResponse, error) { + result := ContainerClientGetAccessPolicyResponse{} if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { result.BlobPublicAccess = (*PublicAccessType)(&val) } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } result.LastModified = &lastModified } @@ -542,37 +547,38 @@ func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result); err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } return result, nil } // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// options - containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo +// Generated from API version 2020-10-02 +// options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. -func (client *containerClient) GetAccountInfo(ctx context.Context, options *containerClientGetAccountInfoOptions) (containerClientGetAccountInfoResponse, error) { +func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { - return containerClientGetAccountInfoResponse{}, err + return ContainerClientGetAccountInfoResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientGetAccountInfoResponse{}, err + return ContainerClientGetAccountInfoResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) } return client.getAccountInfoHandleResponse(resp) } // getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, options *containerClientGetAccountInfoOptions) (*policy.Request, error) { +func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -581,14 +587,14 @@ func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) (containerClientGetAccountInfoResponse, error) { - result := containerClientGetAccountInfoResponse{RawResponse: resp} +func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { + result := ContainerClientGetAccountInfoResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -601,7 +607,7 @@ func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetAccountInfoResponse{}, err + return ContainerClientGetAccountInfoResponse{}, err } result.Date = &date } @@ -617,50 +623,50 @@ func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) // GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. -// containerClientGetPropertiesOptions - containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *containerClient) GetProperties(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetPropertiesResponse, error) { - req, err := client.getPropertiesCreateRequest(ctx, containerClientGetPropertiesOptions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.getPropertiesHandleResponse(resp) } // getPropertiesCreateRequest creates the GetProperties request. -func (client *containerClient) getPropertiesCreateRequest(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") - if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetPropertiesOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetPropertiesOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. -func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) (containerClientGetPropertiesResponse, error) { - result := containerClientGetPropertiesResponse{RawResponse: resp} +func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { + result := ContainerClientGetPropertiesResponse{} for hh := range resp.Header { if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { if result.Metadata == nil { @@ -670,12 +676,12 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) } } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.LastModified = &lastModified } @@ -700,7 +706,7 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.Date = &date } @@ -710,14 +716,14 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { hasImmutabilityPolicy, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.HasImmutabilityPolicy = &hasImmutabilityPolicy } if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { hasLegalHold, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.HasLegalHold = &hasLegalHold } @@ -727,38 +733,27 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { denyEncryptionScopeOverride, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride } if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled } return result, nil } -// ListBlobFlatSegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // If the operation fails it returns an *azcore.ResponseError type. -// options - containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment +// Generated from API version 2020-10-02 +// options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment // method. -func (client *containerClient) ListBlobFlatSegment(options *containerClientListBlobFlatSegmentOptions) *containerClientListBlobFlatSegmentPager { - return &containerClientListBlobFlatSegmentPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.listBlobFlatSegmentCreateRequest(ctx, options) - }, - advancer: func(ctx context.Context, resp containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsFlatSegmentResponse.NextMarker) - }, - } -} - // listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. -func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Context, options *containerClientListBlobFlatSegmentOptions) (*policy.Request, error) { +func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -782,17 +777,17 @@ func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. -func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Response) (containerClientListBlobFlatSegmentResponse, error) { - result := containerClientListBlobFlatSegmentResponse{RawResponse: resp} +func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { + result := ContainerClientListBlobFlatSegmentResponse{} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } @@ -808,37 +803,54 @@ func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Resp if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientListBlobFlatSegmentResponse{}, err + return ContainerClientListBlobFlatSegmentResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { - return containerClientListBlobFlatSegmentResponse{}, err + return ContainerClientListBlobFlatSegmentResponse{}, err } return result, nil } -// ListBlobHierarchySegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that // acts as a placeholder for all blobs whose names begin with the same substring up to the // appearance of the delimiter character. The delimiter may be a single character or a string. -// options - containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment +// options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment // method. -func (client *containerClient) ListBlobHierarchySegment(delimiter string, options *containerClientListBlobHierarchySegmentOptions) *containerClientListBlobHierarchySegmentPager { - return &containerClientListBlobHierarchySegmentPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, options) +func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { + return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ + More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 }, - advancer: func(ctx context.Context, resp containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsHierarchySegmentResponse.NextMarker) + Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) + } + return client.ListBlobHierarchySegmentHandleResponse(resp) }, - } + }) } -// listBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. -func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *containerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { +// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -863,17 +875,17 @@ func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } -// listBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. -func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http.Response) (containerClientListBlobHierarchySegmentResponse, error) { - result := containerClientListBlobHierarchySegmentResponse{RawResponse: resp} +// ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { + result := ContainerClientListBlobHierarchySegmentResponse{} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } @@ -889,12 +901,12 @@ func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientListBlobHierarchySegmentResponse{}, err + return ContainerClientListBlobHierarchySegmentResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { - return containerClientListBlobHierarchySegmentResponse{}, err + return ContainerClientListBlobHierarchySegmentResponse{}, err } return result, nil } @@ -902,27 +914,27 @@ func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http // ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// containerClientReleaseLeaseOptions - containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) ReleaseLease(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientReleaseLeaseResponse, error) { - req, err := client.releaseLeaseCreateRequest(ctx, leaseID, containerClientReleaseLeaseOptions, modifiedAccessConditions) +// options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) } return client.releaseLeaseHandleResponse(resp) } // releaseLeaseCreateRequest creates the ReleaseLease request. -func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -930,36 +942,36 @@ func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, le reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientReleaseLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "release") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientReleaseLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // releaseLeaseHandleResponse handles the ReleaseLease response. -func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) (containerClientReleaseLeaseResponse, error) { - result := containerClientReleaseLeaseResponse{RawResponse: resp} +func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { + result := ContainerClientReleaseLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } result.LastModified = &lastModified } @@ -975,7 +987,7 @@ func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } result.Date = &date } @@ -984,25 +996,26 @@ func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) ( // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // sourceContainerName - Required. Specifies the name of the container to rename. -// options - containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. -func (client *containerClient) Rename(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (containerClientRenameResponse, error) { +// options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { - return containerClientRenameResponse{}, err + return ContainerClientRenameResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientRenameResponse{}, err + return ContainerClientRenameResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientRenameResponse{}, runtime.NewResponseError(resp) + return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) } return client.renameHandleResponse(resp) } // renameCreateRequest creates the Rename request. -func (client *containerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (*policy.Request, error) { +func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1014,21 +1027,21 @@ func (client *containerClient) renameCreateRequest(ctx context.Context, sourceCo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("x-ms-source-container-name", sourceContainerName) + req.Raw().Header["x-ms-source-container-name"] = []string{sourceContainerName} if options != nil && options.SourceLeaseID != nil { - req.Raw().Header.Set("x-ms-source-lease-id", *options.SourceLeaseID) + req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // renameHandleResponse handles the Rename response. -func (client *containerClient) renameHandleResponse(resp *http.Response) (containerClientRenameResponse, error) { - result := containerClientRenameResponse{RawResponse: resp} +func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) { + result := ContainerClientRenameResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1041,7 +1054,7 @@ func (client *containerClient) renameHandleResponse(resp *http.Response) (contai if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRenameResponse{}, err + return ContainerClientRenameResponse{}, err } result.Date = &date } @@ -1051,27 +1064,27 @@ func (client *containerClient) renameHandleResponse(resp *http.Response) (contai // RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// containerClientRenewLeaseOptions - containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) RenewLease(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientRenewLeaseResponse, error) { - req, err := client.renewLeaseCreateRequest(ctx, leaseID, containerClientRenewLeaseOptions, modifiedAccessConditions) +// options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) } return client.renewLeaseHandleResponse(resp) } // renewLeaseCreateRequest creates the RenewLease request. -func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1079,36 +1092,36 @@ func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leas reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientRenewLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "renew") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientRenewLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // renewLeaseHandleResponse handles the RenewLease response. -func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (containerClientRenewLeaseResponse, error) { - result := containerClientRenewLeaseResponse{RawResponse: resp} +func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { + result := ContainerClientRenewLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } result.LastModified = &lastModified } @@ -1127,7 +1140,7 @@ func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (co if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } result.Date = &date } @@ -1136,24 +1149,25 @@ func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (co // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. -// options - containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. -func (client *containerClient) Restore(ctx context.Context, options *containerClientRestoreOptions) (containerClientRestoreResponse, error) { +// Generated from API version 2020-10-02 +// options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { req, err := client.restoreCreateRequest(ctx, options) if err != nil { - return containerClientRestoreResponse{}, err + return ContainerClientRestoreResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientRestoreResponse{}, err + return ContainerClientRestoreResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return containerClientRestoreResponse{}, runtime.NewResponseError(resp) + return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) } return client.restoreHandleResponse(resp) } // restoreCreateRequest creates the Restore request. -func (client *containerClient) restoreCreateRequest(ctx context.Context, options *containerClientRestoreOptions) (*policy.Request, error) { +func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options *ContainerClientRestoreOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1165,23 +1179,23 @@ func (client *containerClient) restoreCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.DeletedContainerName != nil { - req.Raw().Header.Set("x-ms-deleted-container-name", *options.DeletedContainerName) + req.Raw().Header["x-ms-deleted-container-name"] = []string{*options.DeletedContainerName} } if options != nil && options.DeletedContainerVersion != nil { - req.Raw().Header.Set("x-ms-deleted-container-version", *options.DeletedContainerVersion) + req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // restoreHandleResponse handles the Restore response. -func (client *containerClient) restoreHandleResponse(resp *http.Response) (containerClientRestoreResponse, error) { - result := containerClientRestoreResponse{RawResponse: resp} +func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (ContainerClientRestoreResponse, error) { + result := ContainerClientRestoreResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1194,7 +1208,7 @@ func (client *containerClient) restoreHandleResponse(resp *http.Response) (conta if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRestoreResponse{}, err + return ContainerClientRestoreResponse{}, err } result.Date = &date } @@ -1204,27 +1218,29 @@ func (client *containerClient) restoreHandleResponse(resp *http.Response) (conta // SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. -// containerClientSetAccessPolicyOptions - containerClientSetAccessPolicyOptions contains the optional parameters for the -// containerClient.SetAccessPolicy method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) SetAccessPolicy(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetAccessPolicyResponse, error) { - req, err := client.setAccessPolicyCreateRequest(ctx, containerClientSetAccessPolicyOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// containerACL - the acls for the container +// options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) } return client.setAccessPolicyHandleResponse(resp) } // setAccessPolicyCreateRequest creates the SetAccessPolicy request. -func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1232,47 +1248,44 @@ func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") reqQP.Set("comp", "acl") - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetAccessPolicyOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Access != nil { - req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientSetAccessPolicyOptions.Access)) + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetAccessPolicyOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} type wrapper struct { XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` } - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.ContainerACL != nil { - return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerClientSetAccessPolicyOptions.ContainerACL}) - } - return req, nil + return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}) } // setAccessPolicyHandleResponse handles the SetAccessPolicy response. -func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response) (containerClientSetAccessPolicyResponse, error) { - result := containerClientSetAccessPolicyResponse{RawResponse: resp} +func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { + result := ContainerClientSetAccessPolicyResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } result.LastModified = &lastModified } @@ -1288,7 +1301,7 @@ func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } result.Date = &date } @@ -1297,27 +1310,27 @@ func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. -// containerClientSetMetadataOptions - containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) SetMetadata(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetMetadataResponse, error) { - req, err := client.setMetadataCreateRequest(ctx, containerClientSetMetadataOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) } return client.setMetadataHandleResponse(resp) } // setMetadataCreateRequest creates the SetMetadata request. -func (client *containerClient) setMetadataCreateRequest(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1325,39 +1338,39 @@ func (client *containerClient) setMetadataCreateRequest(ctx context.Context, con reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") reqQP.Set("comp", "metadata") - if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetMetadataOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Metadata != nil { - for k, v := range containerClientSetMetadataOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetMetadataOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setMetadataHandleResponse handles the SetMetadata response. -func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (containerClientSetMetadataResponse, error) { - result := containerClientSetMetadataResponse{RawResponse: resp} +func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { + result := ContainerClientSetMetadataResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } result.LastModified = &lastModified } @@ -1373,7 +1386,7 @@ func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (c if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } result.Date = &date } @@ -1382,28 +1395,29 @@ func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (c // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // contentLength - The length of the request. // multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // body - Initial data -// options - containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. -func (client *containerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (containerClientSubmitBatchResponse, error) { +// options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { - return containerClientSubmitBatchResponse{}, err + return ContainerClientSubmitBatchResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientSubmitBatchResponse{}, err + return ContainerClientSubmitBatchResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return containerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) } return client.submitBatchHandleResponse(resp) } // submitBatchCreateRequest creates the SubmitBatch request. -func (client *containerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (*policy.Request, error) { +func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -1416,19 +1430,19 @@ func (client *containerClient) submitBatchCreateRequest(ctx context.Context, con } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("Content-Type", multipartContentType) - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") - return req, runtime.MarshalAsXML(req, body) + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/xml") } // submitBatchHandleResponse handles the SubmitBatch response. -func (client *containerClient) submitBatchHandleResponse(resp *http.Response) (containerClientSubmitBatchResponse, error) { - result := containerClientSubmitBatchResponse{RawResponse: resp} +func (client *ContainerClient) submitBatchHandleResponse(resp *http.Response) (ContainerClientSubmitBatchResponse, error) { + result := ContainerClientSubmitBatchResponse{Body: resp.Body} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go similarity index 75% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index d40d63b1b0d..f8df7338b2f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -5,11 +5,12 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( - "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "time" ) @@ -25,40 +26,77 @@ type AccessPolicy struct { Start *time.Time `xml:"Start"` } -// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. -func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias AccessPolicy - aux := &struct { - *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` - }{ - alias: (*alias)(&a), - Expiry: (*timeRFC3339)(a.Expiry), - Start: (*timeRFC3339)(a.Start), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. -func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias AccessPolicy - aux := &struct { - *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` - }{ - alias: (*alias)(a), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - a.Expiry = (*time.Time)(aux.Expiry) - a.Start = (*time.Time)(aux.Start) - return nil -} - -// AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock method. +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. type AppendPositionAccessConditions struct { // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. // Append Block will succeed only if the append position is equal to this number. If @@ -77,21 +115,6 @@ type ArrowConfiguration struct { Schema []*ArrowField `xml:"Schema>Field"` } -// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. -func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias ArrowConfiguration - aux := &struct { - *alias - Schema *[]*ArrowField `xml:"Schema>Field"` - }{ - alias: (*alias)(&a), - } - if a.Schema != nil { - aux.Schema = &a.Schema - } - return e.EncodeElement(aux, start) -} - // ArrowField - Groups settings regarding specific field of an arrow schema type ArrowField struct { // REQUIRED @@ -101,127 +124,449 @@ type ArrowField struct { Scale *int32 `xml:"Scale"` } -// BlobFlatListSegment struct -type BlobFlatListSegment struct { - // REQUIRED - BlobItems []*BlobItemInternal `xml:"Blob"` +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. -func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlobFlatListSegment - aux := &struct { - *alias - BlobItems *[]*BlobItemInternal `xml:"Blob"` - }{ - alias: (*alias)(&b), - } - if b.BlobItems != nil { - aux.BlobItems = &b.BlobItems - } - return e.EncodeElement(aux, start) +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// BlobHierarchyListSegment struct -type BlobHierarchyListSegment struct { - // REQUIRED - BlobItems []*BlobItemInternal `xml:"Blob"` - BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. -func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlobHierarchyListSegment - aux := &struct { - *alias - BlobItems *[]*BlobItemInternal `xml:"Blob"` - BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` - }{ - alias: (*alias)(&b), - } - if b.BlobItems != nil { - aux.BlobItems = &b.BlobItems - } - if b.BlobPrefixes != nil { - aux.BlobPrefixes = &b.BlobPrefixes - } - return e.EncodeElement(aux, start) +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// BlobItemInternal - An Azure Storage blob -type BlobItemInternal struct { - // REQUIRED - Deleted *bool `xml:"Deleted"` - - // REQUIRED - Name *string `xml:"Name"` - - // REQUIRED; Properties of a blob - Properties *BlobPropertiesInternal `xml:"Properties"` - - // REQUIRED - Snapshot *string `xml:"Snapshot"` +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} - // Blob tags - BlobTags *BlobTags `xml:"Tags"` - HasVersionsOnly *bool `xml:"HasVersionsOnly"` - IsCurrentVersion *bool `xml:"IsCurrentVersion"` +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} - // Dictionary of - Metadata map[string]*string `xml:"Metadata"` +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} - // Dictionary of - OrMetadata map[string]*string `xml:"OrMetadata"` - VersionID *string `xml:"VersionId"` +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string } -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. -func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias BlobItemInternal - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - OrMetadata additionalProperties `xml:"OrMetadata"` - }{ - alias: (*alias)(b), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - b.Metadata = (map[string]*string)(aux.Metadata) - b.OrMetadata = (map[string]*string)(aux.OrMetadata) - return nil -} - -// BlobPrefix struct -type BlobPrefix struct { - // REQUIRED - Name *string `xml:"Name"` +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters } -// BlobPropertiesInternal - Properties of a blob -type BlobPropertiesInternal struct { - // REQUIRED - Etag *string `xml:"Etag"` +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +type BlobFlatListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +type BlobHierarchyListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +} + +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *BlobPropertiesInternal `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +type BlobPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// BlobPropertiesInternal - Properties of a blob +type BlobPropertiesInternal struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` // REQUIRED LastModified *time.Time `xml:"Last-Modified"` @@ -252,17 +597,17 @@ type BlobPropertiesInternal struct { DestinationSnapshot *string `xml:"DestinationSnapshot"` // The name of the encryption scope under which the blob is encrypted. - EncryptionScope *string `xml:"EncryptionScope"` - ExpiresOn *time.Time `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` - IncrementalCopy *bool `xml:"IncrementalCopy"` - IsSealed *bool `xml:"Sealed"` - LastAccessedOn *time.Time `xml:"LastAccessTime"` - LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` - LeaseState *LeaseStateType `xml:"LeaseState"` - LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` - LegalHold *bool `xml:"LegalHold"` + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High // and Standard. @@ -272,69 +617,6 @@ type BlobPropertiesInternal struct { TagCount *int32 `xml:"TagCount"` } -// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. -func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlobPropertiesInternal - aux := &struct { - *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *[]byte `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(&b), - AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), - CreationTime: (*timeRFC1123)(b.CreationTime), - DeletedTime: (*timeRFC1123)(b.DeletedTime), - ExpiresOn: (*timeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), - LastModified: (*timeRFC1123)(b.LastModified), - } - if b.ContentMD5 != nil { - aux.ContentMD5 = &b.ContentMD5 - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. -func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias BlobPropertiesInternal - aux := &struct { - *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *[]byte `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(b), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) - b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) - b.CreationTime = (*time.Time)(aux.CreationTime) - b.DeletedTime = (*time.Time)(aux.DeletedTime) - b.ExpiresOn = (*time.Time)(aux.ExpiresOn) - b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) - b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) - b.LastModified = (*time.Time)(aux.LastModified) - return nil -} - -// BlobTag struct type BlobTag struct { // REQUIRED Key *string `xml:"Key"` @@ -349,22 +631,6 @@ type BlobTags struct { BlobTagSet []*BlobTag `xml:"TagSet>Tag"` } -// MarshalXML implements the xml.Marshaller interface for type BlobTags. -func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "Tags" - type alias BlobTags - aux := &struct { - *alias - BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` - }{ - alias: (*alias)(&b), - } - if b.BlobTagSet != nil { - aux.BlobTagSet = &b.BlobTagSet - } - return e.EncodeElement(aux, start) -} - // Block - Represents a single block in a block blob. It describes the block's ID and size. type Block struct { // REQUIRED; The base64 encoded block ID. @@ -374,1142 +640,14 @@ type Block struct { Size *int64 `xml:"Size"` } -// BlockList struct -type BlockList struct { - CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` -} - -// MarshalXML implements the xml.Marshaller interface for type BlockList. -func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlockList - aux := &struct { - *alias - CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` - }{ - alias: (*alias)(&b), - } - if b.CommittedBlocks != nil { - aux.CommittedBlocks = &b.CommittedBlocks - } - if b.UncommittedBlocks != nil { - aux.UncommittedBlocks = &b.UncommittedBlocks - } - return e.EncodeElement(aux, start) -} - -// BlockLookupList struct -type BlockLookupList struct { - Committed []*string `xml:"Committed"` - Latest []*string `xml:"Latest"` - Uncommitted []*string `xml:"Uncommitted"` -} - -// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. -func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "BlockList" - type alias BlockLookupList - aux := &struct { - *alias - Committed *[]*string `xml:"Committed"` - Latest *[]*string `xml:"Latest"` - Uncommitted *[]*string `xml:"Uncommitted"` - }{ - alias: (*alias)(&b), - } - if b.Committed != nil { - aux.Committed = &b.Committed - } - if b.Latest != nil { - aux.Latest = &b.Latest - } - if b.Uncommitted != nil { - aux.Uncommitted = &b.Uncommitted - } - return e.EncodeElement(aux, start) -} - -// ClearRange enum -type ClearRange struct { - // REQUIRED - End *int64 `xml:"End"` - - // REQUIRED - Start *int64 `xml:"Start"` -} - -// ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. -type ContainerCpkScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - -// ContainerItem - An Azure Storage container -type ContainerItem struct { - // REQUIRED - Name *string `xml:"Name"` - - // REQUIRED; Properties of a container - Properties *ContainerProperties `xml:"Properties"` - Deleted *bool `xml:"Deleted"` - - // Dictionary of - Metadata map[string]*string `xml:"Metadata"` - Version *string `xml:"Version"` -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. -func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias ContainerItem - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - }{ - alias: (*alias)(c), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - c.Metadata = (map[string]*string)(aux.Metadata) - return nil -} - -// ContainerProperties - Properties of a container -type ContainerProperties struct { - // REQUIRED - Etag *string `xml:"Etag"` - - // REQUIRED - LastModified *time.Time `xml:"Last-Modified"` - DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` - DeletedTime *time.Time `xml:"DeletedTime"` - HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` - HasLegalHold *bool `xml:"HasLegalHold"` - - // Indicates if version level worm is enabled on this container. - IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` - LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` - LeaseState *LeaseStateType `xml:"LeaseState"` - LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` - PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` - PublicAccess *PublicAccessType `xml:"PublicAccess"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` -} - -// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. -func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias ContainerProperties - aux := &struct { - *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(&c), - DeletedTime: (*timeRFC1123)(c.DeletedTime), - LastModified: (*timeRFC1123)(c.LastModified), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. -func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias ContainerProperties - aux := &struct { - *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(c), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - c.DeletedTime = (*time.Time)(aux.DeletedTime) - c.LastModified = (*time.Time)(aux.LastModified) - return nil -} - -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another -// domain. Web browsers implement a security restriction known as same-origin policy that -// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin -// domain) to call APIs in another domain -type CorsRule struct { - // REQUIRED; the request headers that the origin domain may specify on the CORS request. - AllowedHeaders *string `xml:"AllowedHeaders"` - - // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) - AllowedMethods *string `xml:"AllowedMethods"` - - // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain - // is the domain from which the request originates. Note that the origin must be an exact - // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' - // to allow all origin domains to make requests via CORS. - AllowedOrigins *string `xml:"AllowedOrigins"` - - // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request - // issuer - ExposedHeaders *string `xml:"ExposedHeaders"` - - // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. - MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` -} - -// CpkInfo contains a group of parameters for the blobClient.Download method. -type CpkInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -type CpkScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - -// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. -type DelimitedTextConfiguration struct { - // The string used to separate columns. - ColumnSeparator *string `xml:"ColumnSeparator"` - - // The string used as an escape character. - EscapeChar *string `xml:"EscapeChar"` - - // The string used to quote a specific field. - FieldQuote *string `xml:"FieldQuote"` - - // Represents whether the data has headers. - HeadersPresent *bool `xml:"HasHeaders"` - - // The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` -} - -// FilterBlobItem - Blob info from a Filter Blobs API call -type FilterBlobItem struct { - // REQUIRED - ContainerName *string `xml:"ContainerName"` - - // REQUIRED - Name *string `xml:"Name"` - - // Blob tags - Tags *BlobTags `xml:"Tags"` -} - -// FilterBlobSegment - The result of a Filter Blobs API call -type FilterBlobSegment struct { - // REQUIRED - Blobs []*FilterBlobItem `xml:"Blobs>Blob"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - - // REQUIRED - Where *string `xml:"Where"` - NextMarker *string `xml:"NextMarker"` -} - -// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. -func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias FilterBlobSegment - aux := &struct { - *alias - Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` - }{ - alias: (*alias)(&f), - } - if f.Blobs != nil { - aux.Blobs = &f.Blobs - } - return e.EncodeElement(aux, start) -} - -// GeoReplication - Geo-Replication information for the Secondary Storage Service -type GeoReplication struct { - // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available - // for read operations at the secondary. Primary writes after this point in time may or may - // not be available for reads. - LastSyncTime *time.Time `xml:"LastSyncTime"` - - // REQUIRED; The status of the secondary location - Status *BlobGeoReplicationStatus `xml:"Status"` -} - -// MarshalXML implements the xml.Marshaller interface for type GeoReplication. -func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias GeoReplication - aux := &struct { - *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` - }{ - alias: (*alias)(&g), - LastSyncTime: (*timeRFC1123)(g.LastSyncTime), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. -func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias GeoReplication - aux := &struct { - *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` - }{ - alias: (*alias)(g), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - g.LastSyncTime = (*time.Time)(aux.LastSyncTime) - return nil -} - -// JSONTextConfiguration - json text configuration -type JSONTextConfiguration struct { - // The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` -} - -// KeyInfo - Key information -type KeyInfo struct { - // REQUIRED; The date-time the key expires in ISO 8601 UTC time - Expiry *string `xml:"Expiry"` - - // REQUIRED; The date-time the key is active in ISO 8601 UTC time - Start *string `xml:"Start"` -} - -// LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - -// ListBlobsFlatSegmentResponse - An enumeration of blobs -type ListBlobsFlatSegmentResponse struct { - // REQUIRED - ContainerName *string `xml:"ContainerName,attr"` - - // REQUIRED - Segment *BlobFlatListSegment `xml:"Blobs"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// ListBlobsHierarchySegmentResponse - An enumeration of blobs -type ListBlobsHierarchySegmentResponse struct { - // REQUIRED - ContainerName *string `xml:"ContainerName,attr"` - - // REQUIRED - Segment *BlobHierarchyListSegment `xml:"Blobs"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Delimiter *string `xml:"Delimiter"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// ListContainersSegmentResponse - An enumeration of containers -type ListContainersSegmentResponse struct { - // REQUIRED - ContainerItems []*ContainerItem `xml:"Containers>Container"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. -func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias ListContainersSegmentResponse - aux := &struct { - *alias - ContainerItems *[]*ContainerItem `xml:"Containers>Container"` - }{ - alias: (*alias)(&l), - } - if l.ContainerItems != nil { - aux.ContainerItems = &l.ContainerItems - } - return e.EncodeElement(aux, start) -} - -// Logging - Azure Analytics Logging settings. -type Logging struct { - // REQUIRED; Indicates whether all delete requests should be logged. - Delete *bool `xml:"Delete"` - - // REQUIRED; Indicates whether all read requests should be logged. - Read *bool `xml:"Read"` - - // REQUIRED; the retention policy which determines how long the associated data should persist - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` - - // REQUIRED; The version of Storage Analytics to configure. - Version *string `xml:"Version"` - - // REQUIRED; Indicates whether all write requests should be logged. - Write *bool `xml:"Write"` -} - -// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs -type Metrics struct { - // REQUIRED; Indicates whether metrics are enabled for the Blob service. - Enabled *bool `xml:"Enabled"` - - // Indicates whether metrics should generate summary statistics for called API operations. - IncludeAPIs *bool `xml:"IncludeAPIs"` - - // the retention policy which determines how long the associated data should persist - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` - - // The version of Storage Analytics to configure. - Version *string `xml:"Version"` -} - -// ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *string - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *string - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageList - the list of pages -type PageList struct { - ClearRange []*ClearRange `xml:"ClearRange"` - NextMarker *string `xml:"NextMarker"` - PageRange []*PageRange `xml:"PageRange"` -} - -// MarshalXML implements the xml.Marshaller interface for type PageList. -func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias PageList - aux := &struct { - *alias - ClearRange *[]*ClearRange `xml:"ClearRange"` - PageRange *[]*PageRange `xml:"PageRange"` - }{ - alias: (*alias)(&p), - } - if p.ClearRange != nil { - aux.ClearRange = &p.ClearRange - } - if p.PageRange != nil { - aux.PageRange = &p.PageRange - } - return e.EncodeElement(aux, start) -} - -// PageRange struct -type PageRange struct { - // REQUIRED - End *int64 `xml:"End"` - - // REQUIRED - Start *int64 `xml:"Start"` -} - -// QueryFormat struct -type QueryFormat struct { - // REQUIRED; The quick query format type. - Type *QueryFormatType `xml:"Type"` - - // Groups the settings used for formatting the response if the response should be Arrow formatted. - ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` - - // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. - DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` - - // json text configuration - JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` - - // Anything - ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` -} - -// QueryRequest - Groups the set of query request settings. -type QueryRequest struct { - // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. - Expression *string `xml:"Expression"` - - // REQUIRED; Required. The type of the provided query expression. - QueryType *string `xml:"QueryType"` - InputSerialization *QuerySerialization `xml:"InputSerialization"` - OutputSerialization *QuerySerialization `xml:"OutputSerialization"` -} - -// MarshalXML implements the xml.Marshaller interface for type QueryRequest. -func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "QueryRequest" - type alias QueryRequest - aux := &struct { - *alias - }{ - alias: (*alias)(&q), - } - return e.EncodeElement(aux, start) -} - -//QuerySerialization struct -type QuerySerialization struct { - // REQUIRED - Format *QueryFormat `xml:"Format"` -} - -// RetentionPolicy - the retention policy which determines how long the associated data should persist -type RetentionPolicy struct { - // REQUIRED; Indicates whether a retention policy is enabled for the storage service - Enabled *bool `xml:"Enabled"` - - // Indicates whether permanent delete is allowed on this storage account. - AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` - - // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this - // value will be deleted - Days *int32 `xml:"Days"` -} - -// SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// SignedIdentifier - signed identifier -type SignedIdentifier struct { - // REQUIRED; An Access policy - AccessPolicy *AccessPolicy `xml:"AccessPolicy"` - - // REQUIRED; a unique id - ID *string `xml:"Id"` -} - -// SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *string - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *string - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} - -// StaticWebsite - The properties that enable an account to host a static website -type StaticWebsite struct { - // REQUIRED; Indicates whether this account is hosting a static website - Enabled *bool `xml:"Enabled"` - - // Absolute path of the default index page - DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` - - // The absolute path of the custom 404 page - ErrorDocument404Path *string `xml:"ErrorDocument404Path"` - - // The default name of the index page under each directory - IndexDocument *string `xml:"IndexDocument"` -} - -// StorageServiceProperties - Storage Service Properties. -type StorageServiceProperties struct { - // The set of CORS rules. - Cors []*CorsRule `xml:"Cors>CorsRule"` - - // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions - DefaultServiceVersion *string `xml:"DefaultServiceVersion"` - - // the retention policy which determines how long the associated data should persist - DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - HourMetrics *Metrics `xml:"HourMetrics"` - - // Azure Analytics Logging settings. - Logging *Logging `xml:"Logging"` - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - MinuteMetrics *Metrics `xml:"MinuteMetrics"` - - // The properties that enable an account to host a static website - StaticWebsite *StaticWebsite `xml:"StaticWebsite"` -} - -// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. -func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias StorageServiceProperties - aux := &struct { - *alias - Cors *[]*CorsRule `xml:"Cors>CorsRule"` - }{ - alias: (*alias)(&s), - } - if s.Cors != nil { - aux.Cors = &s.Cors - } - return e.EncodeElement(aux, start) -} - -// StorageServiceStats - Stats for the storage service. -type StorageServiceStats struct { - // Geo-Replication information for the Secondary Storage Service - GeoReplication *GeoReplication `xml:"GeoReplication"` -} - -// UserDelegationKey - A user delegation key -type UserDelegationKey struct { - // REQUIRED; The date-time the key expires - SignedExpiry *time.Time `xml:"SignedExpiry"` - - // REQUIRED; The Azure Active Directory object ID in GUID format. - SignedOid *string `xml:"SignedOid"` - - // REQUIRED; Abbreviation of the Azure Storage service that accepts the key - SignedService *string `xml:"SignedService"` - - // REQUIRED; The date-time the key is active - SignedStart *time.Time `xml:"SignedStart"` - - // REQUIRED; The Azure Active Directory tenant ID in GUID format - SignedTid *string `xml:"SignedTid"` - - // REQUIRED; The service version that created the key - SignedVersion *string `xml:"SignedVersion"` - - // REQUIRED; The key as a base64 string - Value *string `xml:"Value"` -} - -// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. -func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias UserDelegationKey - aux := &struct { - *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` - }{ - alias: (*alias)(&u), - SignedExpiry: (*timeRFC3339)(u.SignedExpiry), - SignedStart: (*timeRFC3339)(u.SignedStart), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. -func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias UserDelegationKey - aux := &struct { - *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` - }{ - alias: (*alias)(u), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - u.SignedExpiry = (*time.Time)(aux.SignedExpiry) - u.SignedStart = (*time.Time)(aux.SignedStart) - return nil -} - -// appendBlobClientAppendBlockFromURLOptions contains the optional parameters for the appendBlobClient.AppendBlockFromURL -// method. -type appendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock method. -type appendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create method. -type appendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal method. -type appendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL method. -type blobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease method. -type blobClientAcquireLeaseOptions struct { - // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease duration cannot be changed using - // renew or change. - Duration *int32 - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease method. -type blobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease method. -type blobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL method. -type blobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot method. -type blobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy -// method. -type blobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. -type blobClientDeleteOptions struct { - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - BlobDeleteType *BlobDeleteType - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. -type blobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. -type blobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties method. -type blobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. -type blobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientQueryOptions contains the optional parameters for the blobClient.Query method. -type blobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease method. -type blobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease method. -type blobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. -type blobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders method. -type blobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetImmutabilityPolicyOptions contains the optional parameters for the blobClient.SetImmutabilityPolicy method. -type blobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. -type blobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata method. -type blobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. -type blobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Blob tags - Tags *BlobTags - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. -type blobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL method. -type blobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. -type blobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blockBlobClientCommitBlockListOptions contains the optional parameters for the blockBlobClient.CommitBlockList method. -type blockBlobClientCommitBlockListOptions struct { +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { // Optional. Used to set blob tags in various blob operations. BlobTagsString *string // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicySetting // Specified if a legal hold should be set on the blob. LegalHold *bool // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1533,8 +671,8 @@ type blockBlobClientCommitBlockListOptions struct { TransactionalContentMD5 []byte } -// blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList method. -type blockBlobClientGetBlockListOptions struct { +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1547,8 +685,8 @@ type blockBlobClientGetBlockListOptions struct { Timeout *int32 } -// blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL method. -type blockBlobClientPutBlobFromURLOptions struct { +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { // Optional. Used to set blob tags in various blob operations. BlobTagsString *string // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. @@ -1576,8 +714,8 @@ type blockBlobClientPutBlobFromURLOptions struct { TransactionalContentMD5 []byte } -// blockBlobClientStageBlockFromURLOptions contains the optional parameters for the blockBlobClient.StageBlockFromURL method. -type blockBlobClientStageBlockFromURLOptions struct { +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -1594,8 +732,8 @@ type blockBlobClientStageBlockFromURLOptions struct { Timeout *int32 } -// blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock method. -type blockBlobClientStageBlockOptions struct { +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1608,14 +746,14 @@ type blockBlobClientStageBlockOptions struct { TransactionalContentMD5 []byte } -// blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload method. -type blockBlobClientUploadOptions struct { +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { // Optional. Used to set blob tags in various blob operations. BlobTagsString *string // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicySetting // Specified if a legal hold should be set on the blob. LegalHold *bool // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1637,8 +775,27 @@ type blockBlobClientUploadOptions struct { TransactionalContentMD5 []byte } -// containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease method. -type containerClientAcquireLeaseOptions struct { +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease // can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. @@ -1655,8 +812,8 @@ type containerClientAcquireLeaseOptions struct { Timeout *int32 } -// containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease method. -type containerClientBreakLeaseOptions struct { +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This // break period is only used if it is shorter than the time remaining on the // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has @@ -1672,8 +829,8 @@ type containerClientBreakLeaseOptions struct { Timeout *int32 } -// containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease method. -type containerClientChangeLeaseOptions struct { +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1682,8 +839,8 @@ type containerClientChangeLeaseOptions struct { Timeout *int32 } -// containerClientCreateOptions contains the optional parameters for the containerClient.Create method. -type containerClientCreateOptions struct { +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { // Specifies whether data in the container may be accessed publicly and the level of access Access *PublicAccessType // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1701,8 +858,119 @@ type containerClientCreateOptions struct { Timeout *int32 } -// containerClientDeleteOptions contains the optional parameters for the containerClient.Delete method. -type containerClientDeleteOptions struct { +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1711,8 +979,12 @@ type containerClientDeleteOptions struct { Timeout *int32 } -// containerClientGetAccessPolicyOptions contains the optional parameters for the containerClient.GetAccessPolicy method. -type containerClientGetAccessPolicyOptions struct { +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1721,13 +993,10 @@ type containerClientGetAccessPolicyOptions struct { Timeout *int32 } -// containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo method. -type containerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties method. -type containerClientGetPropertiesOptions struct { +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1736,25 +1005,15 @@ type containerClientGetPropertiesOptions struct { Timeout *int32 } -// containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment -// method. -type containerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1763,25 +1022,8 @@ type containerClientListBlobFlatSegmentOptions struct { Timeout *int32 } -// containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment -// method. -type containerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1790,95 +1032,268 @@ type containerClientListBlobHierarchySegmentOptions struct { Timeout *int32 } -// containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease method. -type containerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} +// ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *ContainerProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// CpkInfo contains a group of parameters for the BlobClient.Download method. +type CpkInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // REQUIRED + ContainerName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterBlobItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + // REQUIRED + ContainerItems []*ContainerItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` -// containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. -type containerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` -// containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease method. -type containerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` -// containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. -type containerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` -// containerClientSetAccessPolicyOptions contains the optional parameters for the containerClient.SetAccessPolicy method. -type containerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // the acls for the container - ContainerACL []*SignedIdentifier - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` } -// containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata method. -type containerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` } -// containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. -type containerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time } -// pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages method. -type pageBlobClientClearPagesOptions struct { +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { // Return only the bytes of the blob in the specified range. Range *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -1889,8 +1304,8 @@ type pageBlobClientClearPagesOptions struct { Timeout *int32 } -// pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental method. -type pageBlobClientCopyIncrementalOptions struct { +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1899,8 +1314,8 @@ type pageBlobClientCopyIncrementalOptions struct { Timeout *int32 } -// pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create method. -type pageBlobClientCreateOptions struct { +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of // the sequence number must be between 0 and 2^63 - 1. BlobSequenceNumber *int64 @@ -1909,7 +1324,7 @@ type pageBlobClientCreateOptions struct { // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicySetting // Specified if a legal hold should be set on the blob. LegalHold *bool // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1929,8 +1344,8 @@ type pageBlobClientCreateOptions struct { Timeout *int32 } -// pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the pageBlobClient.GetPageRangesDiff method. -type pageBlobClientGetPageRangesDiffOptions struct { +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff method. +type PageBlobClientGetPageRangesDiffOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -1967,8 +1382,8 @@ type pageBlobClientGetPageRangesDiffOptions struct { Timeout *int32 } -// pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges method. -type pageBlobClientGetPageRangesOptions struct { +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. +type PageBlobClientGetPageRangesOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -1995,8 +1410,8 @@ type pageBlobClientGetPageRangesOptions struct { Timeout *int32 } -// pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize method. -type pageBlobClientResizeOptions struct { +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2005,9 +1420,9 @@ type pageBlobClientResizeOptions struct { Timeout *int32 } -// pageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the pageBlobClient.UpdateSequenceNumber +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber // method. -type pageBlobClientUpdateSequenceNumberOptions struct { +type PageBlobClientUpdateSequenceNumberOptions struct { // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of // the sequence number must be between 0 and 2^63 - 1. BlobSequenceNumber *int64 @@ -2019,8 +1434,8 @@ type pageBlobClientUpdateSequenceNumberOptions struct { Timeout *int32 } -// pageBlobClientUploadPagesFromURLOptions contains the optional parameters for the pageBlobClient.UploadPagesFromURL method. -type pageBlobClientUploadPagesFromURLOptions struct { +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -2035,8 +1450,8 @@ type pageBlobClientUploadPagesFromURLOptions struct { Timeout *int32 } -// pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages method. -type pageBlobClientUploadPagesOptions struct { +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { // Return only the bytes of the blob in the specified range. Range *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -2051,8 +1466,80 @@ type pageBlobClientUploadPagesOptions struct { TransactionalContentMD5 []byte } -// serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. -type serviceClientFilterBlobsOptions struct { +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // parquet configuration + ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // CONSTANT; Required. The type of the provided query expression. + // Field has constant value "SQL", any specified value is ignored. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -2075,13 +1562,13 @@ type serviceClientFilterBlobsOptions struct { Where *string } -// serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. -type serviceClientGetAccountInfoOptions struct { +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { // placeholder for future optional parameters } -// serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. -type serviceClientGetPropertiesOptions struct { +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2090,8 +1577,8 @@ type serviceClientGetPropertiesOptions struct { Timeout *int32 } -// serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. -type serviceClientGetStatisticsOptions struct { +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2100,8 +1587,8 @@ type serviceClientGetStatisticsOptions struct { Timeout *int32 } -// serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey method. -type serviceClientGetUserDelegationKeyOptions struct { +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2110,9 +1597,9 @@ type serviceClientGetUserDelegationKeyOptions struct { Timeout *int32 } -// serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment // method. -type serviceClientListContainersSegmentOptions struct { +type ServiceClientListContainersSegmentOptions struct { // Include this parameter to specify that the container's metadata be returned as part of the response body. Include []ListContainersIncludeType // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The @@ -2137,8 +1624,8 @@ type serviceClientListContainersSegmentOptions struct { Timeout *int32 } -// serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. -type serviceClientSetPropertiesOptions struct { +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2147,8 +1634,8 @@ type serviceClientSetPropertiesOptions struct { Timeout *int32 } -// serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. -type serviceClientSubmitBatchOptions struct { +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2156,3 +1643,100 @@ type serviceClientSubmitBatchOptions struct { // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 } + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +type StorageError struct { + Message *string `json:"Message,omitempty"` +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + Cors []*CorsRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOID *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTID *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go new file mode 100644 index 00000000000..770e41a080b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -0,0 +1,481 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. +func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobHierarchyListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + if b.BlobPrefixes != nil { + aux.BlobPrefixes = &b.BlobPrefixes + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. +func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobItemInternal + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. +func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), + CreationTime: (*timeRFC1123)(b.CreationTime), + DeletedTime: (*timeRFC1123)(b.DeletedTime), + ExpiresOn: (*timeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), + LastModified: (*timeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) + aux.ContentMD5 = &encodedContentMD5 + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. +func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.ContentMD5 != nil { + if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { + return err + } + } + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. +func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. +func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*timeRFC1123)(c.DeletedTime), + LastModified: (*timeRFC1123)(c.LastModified), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. +func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.DeletedTime = (*time.Time)(aux.DeletedTime) + c.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. +func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ListContainersSegmentResponse + aux := &struct { + *alias + ContainerItems *[]*ContainerItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.ContainerItems != nil { + aux.ContainerItems = &l.ContainerItems + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return e.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + Cors *[]*CorsRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.Cors != nil { + aux.Cors = &s.Cors + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*timeRFC3339)(u.SignedExpiry), + SignedStart: (*timeRFC3339)(u.SignedStart), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + u.SignedStart = (*time.Time)(aux.SignedStart) + return nil +} + +func populate(m map[string]interface{}, k string, v interface{}) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v interface{}) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go new file mode 100644 index 00000000000..2747e408120 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -0,0 +1,1287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// PageBlobClient contains the methods for the PageBlob group. +// Don't use this type directly, use NewPageBlobClient() instead. +type PageBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { + client := &PageBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + } + return client.clearPagesHandleResponse(resp) +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { + result := PageBlobClientClearPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + } + return client.copyIncrementalHandleResponse(resp) +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { + result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { + result := PageBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot +// of a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ + More: func(page PageBlobClientGetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesHandleResponse(resp) + }, + }) +} + +// GetPageRangesCreateRequest creates the GetPageRanges request. +func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesHandleResponse handles the GetPageRanges response. +func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { + result := PageBlobClientGetPageRangesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that +// were changed between target blob and previous snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ + More: func(page PageBlobClientGetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// GetPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { + result := PageBlobClientGetPageRangesDiffResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + } + return client.resizeHandleResponse(resp) +} + +// resizeCreateRequest creates the Resize request. +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { + result := PageBlobClientResizeResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + } + return client.updateSequenceNumberHandleResponse(resp) +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { + result := PageBlobClientUpdateSequenceNumberResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesHandleResponse(resp) +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { + result := PageBlobClientUploadPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sourceURL - Specify a URL to the copy source. +// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// contentLength - The length of the request. +// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesFromURLHandleResponse(resp) +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { + result := PageBlobClientUploadPagesFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go similarity index 70% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index 60c1c0c34ec..3d2d5d2f953 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -5,23 +5,18 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( - "net/http" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" "time" ) -// appendBlobClientAppendBlockFromURLResponse contains the response from method appendBlobClient.AppendBlockFromURL. -type appendBlobClientAppendBlockFromURLResponse struct { - appendBlobClientAppendBlockFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientAppendBlockFromURLResult contains the result from method appendBlobClient.AppendBlockFromURL. -type appendBlobClientAppendBlockFromURLResult struct { +// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobClientAppendBlockFromURLResponse struct { // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. BlobAppendOffset *string @@ -35,7 +30,7 @@ type appendBlobClientAppendBlockFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -59,15 +54,8 @@ type appendBlobClientAppendBlockFromURLResult struct { XMSContentCRC64 []byte } -// appendBlobClientAppendBlockResponse contains the response from method appendBlobClient.AppendBlock. -type appendBlobClientAppendBlockResponse struct { - appendBlobClientAppendBlockResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientAppendBlockResult contains the result from method appendBlobClient.AppendBlock. -type appendBlobClientAppendBlockResult struct { +// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobClientAppendBlockResponse struct { // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. BlobAppendOffset *string @@ -84,7 +72,7 @@ type appendBlobClientAppendBlockResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -108,15 +96,8 @@ type appendBlobClientAppendBlockResult struct { XMSContentCRC64 []byte } -// appendBlobClientCreateResponse contains the response from method appendBlobClient.Create. -type appendBlobClientCreateResponse struct { - appendBlobClientCreateResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientCreateResult contains the result from method appendBlobClient.Create. -type appendBlobClientCreateResult struct { +// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobClientCreateResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -127,7 +108,7 @@ type appendBlobClientCreateResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -151,15 +132,8 @@ type appendBlobClientCreateResult struct { VersionID *string } -// appendBlobClientSealResponse contains the response from method appendBlobClient.Seal. -type appendBlobClientSealResponse struct { - appendBlobClientSealResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientSealResult contains the result from method appendBlobClient.Seal. -type appendBlobClientSealResult struct { +// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobClientSealResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -167,7 +141,7 @@ type appendBlobClientSealResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // IsSealed contains the information returned from the x-ms-blob-sealed header response. IsSealed *bool @@ -182,15 +156,8 @@ type appendBlobClientSealResult struct { Version *string } -// blobClientAbortCopyFromURLResponse contains the response from method blobClient.AbortCopyFromURL. -type blobClientAbortCopyFromURLResponse struct { - blobClientAbortCopyFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientAbortCopyFromURLResult contains the result from method blobClient.AbortCopyFromURL. -type blobClientAbortCopyFromURLResult struct { +// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type BlobClientAbortCopyFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -204,15 +171,8 @@ type blobClientAbortCopyFromURLResult struct { Version *string } -// blobClientAcquireLeaseResponse contains the response from method blobClient.AcquireLease. -type blobClientAcquireLeaseResponse struct { - blobClientAcquireLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientAcquireLeaseResult contains the result from method blobClient.AcquireLease. -type blobClientAcquireLeaseResult struct { +// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type BlobClientAcquireLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -220,7 +180,7 @@ type blobClientAcquireLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -235,15 +195,8 @@ type blobClientAcquireLeaseResult struct { Version *string } -// blobClientBreakLeaseResponse contains the response from method blobClient.BreakLease. -type blobClientBreakLeaseResponse struct { - blobClientBreakLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientBreakLeaseResult contains the result from method blobClient.BreakLease. -type blobClientBreakLeaseResult struct { +// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BlobClientBreakLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -251,7 +204,7 @@ type blobClientBreakLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -266,15 +219,8 @@ type blobClientBreakLeaseResult struct { Version *string } -// blobClientChangeLeaseResponse contains the response from method blobClient.ChangeLease. -type blobClientChangeLeaseResponse struct { - blobClientChangeLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientChangeLeaseResult contains the result from method blobClient.ChangeLease. -type blobClientChangeLeaseResult struct { +// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type BlobClientChangeLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -282,7 +228,7 @@ type blobClientChangeLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -297,15 +243,8 @@ type blobClientChangeLeaseResult struct { Version *string } -// blobClientCopyFromURLResponse contains the response from method blobClient.CopyFromURL. -type blobClientCopyFromURLResponse struct { - blobClientCopyFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientCopyFromURLResult contains the result from method blobClient.CopyFromURL. -type blobClientCopyFromURLResult struct { +// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type BlobClientCopyFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -322,7 +261,7 @@ type blobClientCopyFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -340,15 +279,8 @@ type blobClientCopyFromURLResult struct { XMSContentCRC64 []byte } -// blobClientCreateSnapshotResponse contains the response from method blobClient.CreateSnapshot. -type blobClientCreateSnapshotResponse struct { - blobClientCreateSnapshotResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientCreateSnapshotResult contains the result from method blobClient.CreateSnapshot. -type blobClientCreateSnapshotResult struct { +// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type BlobClientCreateSnapshotResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -356,7 +288,7 @@ type blobClientCreateSnapshotResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. IsServerEncrypted *bool @@ -377,15 +309,8 @@ type blobClientCreateSnapshotResult struct { VersionID *string } -// blobClientDeleteImmutabilityPolicyResponse contains the response from method blobClient.DeleteImmutabilityPolicy. -type blobClientDeleteImmutabilityPolicyResponse struct { - blobClientDeleteImmutabilityPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientDeleteImmutabilityPolicyResult contains the result from method blobClient.DeleteImmutabilityPolicy. -type blobClientDeleteImmutabilityPolicyResult struct { +// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. +type BlobClientDeleteImmutabilityPolicyResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -399,15 +324,8 @@ type blobClientDeleteImmutabilityPolicyResult struct { Version *string } -// blobClientDeleteResponse contains the response from method blobClient.Delete. -type blobClientDeleteResponse struct { - blobClientDeleteResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientDeleteResult contains the result from method blobClient.Delete. -type blobClientDeleteResult struct { +// BlobClientDeleteResponse contains the response from method BlobClient.Delete. +type BlobClientDeleteResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -421,15 +339,8 @@ type blobClientDeleteResult struct { Version *string } -// blobClientDownloadResponse contains the response from method blobClient.Download. -type blobClientDownloadResponse struct { - blobClientDownloadResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientDownloadResult contains the result from method blobClient.Download. -type blobClientDownloadResult struct { +// BlobClientDownloadResponse contains the response from method BlobClient.Download. +type BlobClientDownloadResponse struct { // AcceptRanges contains the information returned from the Accept-Ranges header response. AcceptRanges *string @@ -445,6 +356,9 @@ type blobClientDownloadResult struct { // BlobType contains the information returned from the x-ms-blob-type header response. BlobType *BlobType + // Body contains the streaming response. + Body io.ReadCloser + // CacheControl contains the information returned from the Cache-Control header response. CacheControl *string @@ -497,7 +411,7 @@ type blobClientDownloadResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -512,7 +426,7 @@ type blobClientDownloadResult struct { ImmutabilityPolicyExpiresOn *time.Time // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicyMode // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. IsCurrentVersion *bool @@ -563,15 +477,8 @@ type blobClientDownloadResult struct { VersionID *string } -// blobClientGetAccountInfoResponse contains the response from method blobClient.GetAccountInfo. -type blobClientGetAccountInfoResponse struct { - blobClientGetAccountInfoResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientGetAccountInfoResult contains the result from method blobClient.GetAccountInfo. -type blobClientGetAccountInfoResult struct { +// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type BlobClientGetAccountInfoResponse struct { // AccountKind contains the information returned from the x-ms-account-kind header response. AccountKind *AccountKind @@ -591,15 +498,8 @@ type blobClientGetAccountInfoResult struct { Version *string } -// blobClientGetPropertiesResponse contains the response from method blobClient.GetProperties. -type blobClientGetPropertiesResponse struct { - blobClientGetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientGetPropertiesResult contains the result from method blobClient.GetProperties. -type blobClientGetPropertiesResult struct { +// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. +type BlobClientGetPropertiesResponse struct { // AcceptRanges contains the information returned from the Accept-Ranges header response. AcceptRanges *string @@ -676,7 +576,7 @@ type blobClientGetPropertiesResult struct { DestinationSnapshot *string // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -691,7 +591,7 @@ type blobClientGetPropertiesResult struct { ImmutabilityPolicyExpiresOn *time.Time // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicyMode // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. IsCurrentVersion *bool @@ -748,15 +648,8 @@ type blobClientGetPropertiesResult struct { VersionID *string } -// blobClientGetTagsResponse contains the response from method blobClient.GetTags. -type blobClientGetTagsResponse struct { - blobClientGetTagsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientGetTagsResult contains the result from method blobClient.GetTags. -type blobClientGetTagsResult struct { +// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. +type BlobClientGetTagsResponse struct { BlobTags // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -771,15 +664,8 @@ type blobClientGetTagsResult struct { Version *string `xml:"Version"` } -// blobClientQueryResponse contains the response from method blobClient.Query. -type blobClientQueryResponse struct { - blobClientQueryResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientQueryResult contains the result from method blobClient.Query. -type blobClientQueryResult struct { +// BlobClientQueryResponse contains the response from method BlobClient.Query. +type BlobClientQueryResponse struct { // AcceptRanges contains the information returned from the Accept-Ranges header response. AcceptRanges *string @@ -795,6 +681,9 @@ type blobClientQueryResult struct { // BlobType contains the information returned from the x-ms-blob-type header response. BlobType *BlobType + // Body contains the streaming response. + Body io.ReadCloser + // CacheControl contains the information returned from the Cache-Control header response. CacheControl *string @@ -847,7 +736,7 @@ type blobClientQueryResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -880,15 +769,8 @@ type blobClientQueryResult struct { Version *string } -// blobClientReleaseLeaseResponse contains the response from method blobClient.ReleaseLease. -type blobClientReleaseLeaseResponse struct { - blobClientReleaseLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientReleaseLeaseResult contains the result from method blobClient.ReleaseLease. -type blobClientReleaseLeaseResult struct { +// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobClientReleaseLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -896,7 +778,7 @@ type blobClientReleaseLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -908,15 +790,8 @@ type blobClientReleaseLeaseResult struct { Version *string } -// blobClientRenewLeaseResponse contains the response from method blobClient.RenewLease. -type blobClientRenewLeaseResponse struct { - blobClientRenewLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientRenewLeaseResult contains the result from method blobClient.RenewLease. -type blobClientRenewLeaseResult struct { +// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobClientRenewLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -924,7 +799,7 @@ type blobClientRenewLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -939,15 +814,8 @@ type blobClientRenewLeaseResult struct { Version *string } -// blobClientSetExpiryResponse contains the response from method blobClient.SetExpiry. -type blobClientSetExpiryResponse struct { - blobClientSetExpiryResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetExpiryResult contains the result from method blobClient.SetExpiry. -type blobClientSetExpiryResult struct { +// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. +type BlobClientSetExpiryResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -955,7 +823,7 @@ type blobClientSetExpiryResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -967,15 +835,8 @@ type blobClientSetExpiryResult struct { Version *string } -// blobClientSetHTTPHeadersResponse contains the response from method blobClient.SetHTTPHeaders. -type blobClientSetHTTPHeadersResponse struct { - blobClientSetHTTPHeadersResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetHTTPHeadersResult contains the result from method blobClient.SetHTTPHeaders. -type blobClientSetHTTPHeadersResult struct { +// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobClientSetHTTPHeadersResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -986,7 +847,7 @@ type blobClientSetHTTPHeadersResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -998,15 +859,8 @@ type blobClientSetHTTPHeadersResult struct { Version *string } -// blobClientSetImmutabilityPolicyResponse contains the response from method blobClient.SetImmutabilityPolicy. -type blobClientSetImmutabilityPolicyResponse struct { - blobClientSetImmutabilityPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetImmutabilityPolicyResult contains the result from method blobClient.SetImmutabilityPolicy. -type blobClientSetImmutabilityPolicyResult struct { +// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type BlobClientSetImmutabilityPolicyResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1017,7 +871,7 @@ type blobClientSetImmutabilityPolicyResult struct { ImmutabilityPolicyExpiry *time.Time // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicyMode // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1026,15 +880,8 @@ type blobClientSetImmutabilityPolicyResult struct { Version *string } -// blobClientSetLegalHoldResponse contains the response from method blobClient.SetLegalHold. -type blobClientSetLegalHoldResponse struct { - blobClientSetLegalHoldResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetLegalHoldResult contains the result from method blobClient.SetLegalHold. -type blobClientSetLegalHoldResult struct { +// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type BlobClientSetLegalHoldResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1051,15 +898,8 @@ type blobClientSetLegalHoldResult struct { Version *string } -// blobClientSetMetadataResponse contains the response from method blobClient.SetMetadata. -type blobClientSetMetadataResponse struct { - blobClientSetMetadataResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetMetadataResult contains the result from method blobClient.SetMetadata. -type blobClientSetMetadataResult struct { +// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobClientSetMetadataResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1067,7 +907,7 @@ type blobClientSetMetadataResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1091,15 +931,8 @@ type blobClientSetMetadataResult struct { VersionID *string } -// blobClientSetTagsResponse contains the response from method blobClient.SetTags. -type blobClientSetTagsResponse struct { - blobClientSetTagsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetTagsResult contains the result from method blobClient.SetTags. -type blobClientSetTagsResult struct { +// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. +type BlobClientSetTagsResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1113,15 +946,8 @@ type blobClientSetTagsResult struct { Version *string } -// blobClientSetTierResponse contains the response from method blobClient.SetTier. -type blobClientSetTierResponse struct { - blobClientSetTierResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetTierResult contains the result from method blobClient.SetTier. -type blobClientSetTierResult struct { +// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. +type BlobClientSetTierResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1132,15 +958,8 @@ type blobClientSetTierResult struct { Version *string } -// blobClientStartCopyFromURLResponse contains the response from method blobClient.StartCopyFromURL. -type blobClientStartCopyFromURLResponse struct { - blobClientStartCopyFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientStartCopyFromURLResult contains the result from method blobClient.StartCopyFromURL. -type blobClientStartCopyFromURLResult struct { +// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobClientStartCopyFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1154,7 +973,7 @@ type blobClientStartCopyFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1169,15 +988,8 @@ type blobClientStartCopyFromURLResult struct { VersionID *string } -// blobClientUndeleteResponse contains the response from method blobClient.Undelete. -type blobClientUndeleteResponse struct { - blobClientUndeleteResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientUndeleteResult contains the result from method blobClient.Undelete. -type blobClientUndeleteResult struct { +// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobClientUndeleteResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1191,15 +1003,8 @@ type blobClientUndeleteResult struct { Version *string } -// blockBlobClientCommitBlockListResponse contains the response from method blockBlobClient.CommitBlockList. -type blockBlobClientCommitBlockListResponse struct { - blockBlobClientCommitBlockListResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientCommitBlockListResult contains the result from method blockBlobClient.CommitBlockList. -type blockBlobClientCommitBlockListResult struct { +// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobClientCommitBlockListResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1210,7 +1015,7 @@ type blockBlobClientCommitBlockListResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1237,15 +1042,8 @@ type blockBlobClientCommitBlockListResult struct { XMSContentCRC64 []byte } -// blockBlobClientGetBlockListResponse contains the response from method blockBlobClient.GetBlockList. -type blockBlobClientGetBlockListResponse struct { - blockBlobClientGetBlockListResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientGetBlockListResult contains the result from method blockBlobClient.GetBlockList. -type blockBlobClientGetBlockListResult struct { +// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobClientGetBlockListResponse struct { BlockList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. BlobContentLength *int64 `xml:"BlobContentLength"` @@ -1260,7 +1058,7 @@ type blockBlobClientGetBlockListResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -1272,15 +1070,8 @@ type blockBlobClientGetBlockListResult struct { Version *string `xml:"Version"` } -// blockBlobClientPutBlobFromURLResponse contains the response from method blockBlobClient.PutBlobFromURL. -type blockBlobClientPutBlobFromURLResponse struct { - blockBlobClientPutBlobFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientPutBlobFromURLResult contains the result from method blockBlobClient.PutBlobFromURL. -type blockBlobClientPutBlobFromURLResult struct { +// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. +type BlockBlobClientPutBlobFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1291,7 +1082,7 @@ type blockBlobClientPutBlobFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1315,15 +1106,8 @@ type blockBlobClientPutBlobFromURLResult struct { VersionID *string } -// blockBlobClientStageBlockFromURLResponse contains the response from method blockBlobClient.StageBlockFromURL. -type blockBlobClientStageBlockFromURLResponse struct { - blockBlobClientStageBlockFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientStageBlockFromURLResult contains the result from method blockBlobClient.StageBlockFromURL. -type blockBlobClientStageBlockFromURLResult struct { +// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobClientStageBlockFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1352,15 +1136,8 @@ type blockBlobClientStageBlockFromURLResult struct { XMSContentCRC64 []byte } -// blockBlobClientStageBlockResponse contains the response from method blockBlobClient.StageBlock. -type blockBlobClientStageBlockResponse struct { - blockBlobClientStageBlockResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientStageBlockResult contains the result from method blockBlobClient.StageBlock. -type blockBlobClientStageBlockResult struct { +// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobClientStageBlockResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1389,15 +1166,8 @@ type blockBlobClientStageBlockResult struct { XMSContentCRC64 []byte } -// blockBlobClientUploadResponse contains the response from method blockBlobClient.Upload. -type blockBlobClientUploadResponse struct { - blockBlobClientUploadResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientUploadResult contains the result from method blockBlobClient.Upload. -type blockBlobClientUploadResult struct { +// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobClientUploadResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1408,7 +1178,7 @@ type blockBlobClientUploadResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1432,15 +1202,8 @@ type blockBlobClientUploadResult struct { VersionID *string } -// containerClientAcquireLeaseResponse contains the response from method containerClient.AcquireLease. -type containerClientAcquireLeaseResponse struct { - containerClientAcquireLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientAcquireLeaseResult contains the result from method containerClient.AcquireLease. -type containerClientAcquireLeaseResult struct { +// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. +type ContainerClientAcquireLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1448,7 +1211,7 @@ type containerClientAcquireLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1463,15 +1226,8 @@ type containerClientAcquireLeaseResult struct { Version *string } -// containerClientBreakLeaseResponse contains the response from method containerClient.BreakLease. -type containerClientBreakLeaseResponse struct { - containerClientBreakLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientBreakLeaseResult contains the result from method containerClient.BreakLease. -type containerClientBreakLeaseResult struct { +// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. +type ContainerClientBreakLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1479,7 +1235,7 @@ type containerClientBreakLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1494,15 +1250,8 @@ type containerClientBreakLeaseResult struct { Version *string } -// containerClientChangeLeaseResponse contains the response from method containerClient.ChangeLease. -type containerClientChangeLeaseResponse struct { - containerClientChangeLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientChangeLeaseResult contains the result from method containerClient.ChangeLease. -type containerClientChangeLeaseResult struct { +// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. +type ContainerClientChangeLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1510,7 +1259,7 @@ type containerClientChangeLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1525,15 +1274,8 @@ type containerClientChangeLeaseResult struct { Version *string } -// containerClientCreateResponse contains the response from method containerClient.Create. -type containerClientCreateResponse struct { - containerClientCreateResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientCreateResult contains the result from method containerClient.Create. -type containerClientCreateResult struct { +// ContainerClientCreateResponse contains the response from method ContainerClient.Create. +type ContainerClientCreateResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1541,7 +1283,7 @@ type containerClientCreateResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1553,15 +1295,8 @@ type containerClientCreateResult struct { Version *string } -// containerClientDeleteResponse contains the response from method containerClient.Delete. -type containerClientDeleteResponse struct { - containerClientDeleteResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientDeleteResult contains the result from method containerClient.Delete. -type containerClientDeleteResult struct { +// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerClientDeleteResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1575,15 +1310,8 @@ type containerClientDeleteResult struct { Version *string } -// containerClientGetAccessPolicyResponse contains the response from method containerClient.GetAccessPolicy. -type containerClientGetAccessPolicyResponse struct { - containerClientGetAccessPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientGetAccessPolicyResult contains the result from method containerClient.GetAccessPolicy. -type containerClientGetAccessPolicyResult struct { +// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerClientGetAccessPolicyResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` @@ -1594,7 +1322,7 @@ type containerClientGetAccessPolicyResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -1609,15 +1337,8 @@ type containerClientGetAccessPolicyResult struct { Version *string `xml:"Version"` } -// containerClientGetAccountInfoResponse contains the response from method containerClient.GetAccountInfo. -type containerClientGetAccountInfoResponse struct { - containerClientGetAccountInfoResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientGetAccountInfoResult contains the result from method containerClient.GetAccountInfo. -type containerClientGetAccountInfoResult struct { +// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. +type ContainerClientGetAccountInfoResponse struct { // AccountKind contains the information returned from the x-ms-account-kind header response. AccountKind *AccountKind @@ -1637,15 +1358,8 @@ type containerClientGetAccountInfoResult struct { Version *string } -// containerClientGetPropertiesResponse contains the response from method containerClient.GetProperties. -type containerClientGetPropertiesResponse struct { - containerClientGetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientGetPropertiesResult contains the result from method containerClient.GetProperties. -type containerClientGetPropertiesResult struct { +// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. +type ContainerClientGetPropertiesResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. BlobPublicAccess *PublicAccessType @@ -1662,7 +1376,7 @@ type containerClientGetPropertiesResult struct { DenyEncryptionScopeOverride *bool // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. HasImmutabilityPolicy *bool @@ -1696,15 +1410,8 @@ type containerClientGetPropertiesResult struct { Version *string } -// containerClientListBlobFlatSegmentResponse contains the response from method containerClient.ListBlobFlatSegment. -type containerClientListBlobFlatSegmentResponse struct { - containerClientListBlobFlatSegmentResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientListBlobFlatSegmentResult contains the result from method containerClient.ListBlobFlatSegment. -type containerClientListBlobFlatSegmentResult struct { +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.ListBlobFlatSegment. +type ContainerClientListBlobFlatSegmentResponse struct { ListBlobsFlatSegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -1722,15 +1429,8 @@ type containerClientListBlobFlatSegmentResult struct { Version *string `xml:"Version"` } -// containerClientListBlobHierarchySegmentResponse contains the response from method containerClient.ListBlobHierarchySegment. -type containerClientListBlobHierarchySegmentResponse struct { - containerClientListBlobHierarchySegmentResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientListBlobHierarchySegmentResult contains the result from method containerClient.ListBlobHierarchySegment. -type containerClientListBlobHierarchySegmentResult struct { +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.ListBlobHierarchySegment. +type ContainerClientListBlobHierarchySegmentResponse struct { ListBlobsHierarchySegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -1748,15 +1448,8 @@ type containerClientListBlobHierarchySegmentResult struct { Version *string `xml:"Version"` } -// containerClientReleaseLeaseResponse contains the response from method containerClient.ReleaseLease. -type containerClientReleaseLeaseResponse struct { - containerClientReleaseLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientReleaseLeaseResult contains the result from method containerClient.ReleaseLease. -type containerClientReleaseLeaseResult struct { +// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. +type ContainerClientReleaseLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1764,7 +1457,7 @@ type containerClientReleaseLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1776,15 +1469,8 @@ type containerClientReleaseLeaseResult struct { Version *string } -// containerClientRenameResponse contains the response from method containerClient.Rename. -type containerClientRenameResponse struct { - containerClientRenameResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientRenameResult contains the result from method containerClient.Rename. -type containerClientRenameResult struct { +// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. +type ContainerClientRenameResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1798,15 +1484,8 @@ type containerClientRenameResult struct { Version *string } -// containerClientRenewLeaseResponse contains the response from method containerClient.RenewLease. -type containerClientRenewLeaseResponse struct { - containerClientRenewLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientRenewLeaseResult contains the result from method containerClient.RenewLease. -type containerClientRenewLeaseResult struct { +// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. +type ContainerClientRenewLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1814,7 +1493,7 @@ type containerClientRenewLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1829,15 +1508,8 @@ type containerClientRenewLeaseResult struct { Version *string } -// containerClientRestoreResponse contains the response from method containerClient.Restore. -type containerClientRestoreResponse struct { - containerClientRestoreResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientRestoreResult contains the result from method containerClient.Restore. -type containerClientRestoreResult struct { +// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. +type ContainerClientRestoreResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1851,15 +1523,8 @@ type containerClientRestoreResult struct { Version *string } -// containerClientSetAccessPolicyResponse contains the response from method containerClient.SetAccessPolicy. -type containerClientSetAccessPolicyResponse struct { - containerClientSetAccessPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientSetAccessPolicyResult contains the result from method containerClient.SetAccessPolicy. -type containerClientSetAccessPolicyResult struct { +// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. +type ContainerClientSetAccessPolicyResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1867,7 +1532,7 @@ type containerClientSetAccessPolicyResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1879,15 +1544,8 @@ type containerClientSetAccessPolicyResult struct { Version *string } -// containerClientSetMetadataResponse contains the response from method containerClient.SetMetadata. -type containerClientSetMetadataResponse struct { - containerClientSetMetadataResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientSetMetadataResult contains the result from method containerClient.SetMetadata. -type containerClientSetMetadataResult struct { +// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. +type ContainerClientSetMetadataResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1895,7 +1553,7 @@ type containerClientSetMetadataResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1907,15 +1565,11 @@ type containerClientSetMetadataResult struct { Version *string } -// containerClientSubmitBatchResponse contains the response from method containerClient.SubmitBatch. -type containerClientSubmitBatchResponse struct { - containerClientSubmitBatchResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} +// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. +type ContainerClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser -// containerClientSubmitBatchResult contains the result from method containerClient.SubmitBatch. -type containerClientSubmitBatchResult struct { // ContentType contains the information returned from the Content-Type header response. ContentType *string @@ -1926,15 +1580,8 @@ type containerClientSubmitBatchResult struct { Version *string } -// pageBlobClientClearPagesResponse contains the response from method pageBlobClient.ClearPages. -type pageBlobClientClearPagesResponse struct { - pageBlobClientClearPagesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientClearPagesResult contains the result from method pageBlobClient.ClearPages. -type pageBlobClientClearPagesResult struct { +// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. +type PageBlobClientClearPagesResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -1948,7 +1595,7 @@ type pageBlobClientClearPagesResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1963,15 +1610,8 @@ type pageBlobClientClearPagesResult struct { XMSContentCRC64 []byte } -// pageBlobClientCopyIncrementalResponse contains the response from method pageBlobClient.CopyIncremental. -type pageBlobClientCopyIncrementalResponse struct { - pageBlobClientCopyIncrementalResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientCopyIncrementalResult contains the result from method pageBlobClient.CopyIncremental. -type pageBlobClientCopyIncrementalResult struct { +// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. +type PageBlobClientCopyIncrementalResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1985,7 +1625,7 @@ type pageBlobClientCopyIncrementalResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1997,15 +1637,8 @@ type pageBlobClientCopyIncrementalResult struct { Version *string } -// pageBlobClientCreateResponse contains the response from method pageBlobClient.Create. -type pageBlobClientCreateResponse struct { - pageBlobClientCreateResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientCreateResult contains the result from method pageBlobClient.Create. -type pageBlobClientCreateResult struct { +// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobClientCreateResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -2016,7 +1649,7 @@ type pageBlobClientCreateResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -2040,15 +1673,8 @@ type pageBlobClientCreateResult struct { VersionID *string } -// pageBlobClientGetPageRangesDiffResponse contains the response from method pageBlobClient.GetPageRangesDiff. -type pageBlobClientGetPageRangesDiffResponse struct { - pageBlobClientGetPageRangesDiffResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientGetPageRangesDiffResult contains the result from method pageBlobClient.GetPageRangesDiff. -type pageBlobClientGetPageRangesDiffResult struct { +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.GetPageRangesDiff. +type PageBlobClientGetPageRangesDiffResponse struct { PageList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. BlobContentLength *int64 `xml:"BlobContentLength"` @@ -2060,7 +1686,7 @@ type pageBlobClientGetPageRangesDiffResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -2072,15 +1698,8 @@ type pageBlobClientGetPageRangesDiffResult struct { Version *string `xml:"Version"` } -// pageBlobClientGetPageRangesResponse contains the response from method pageBlobClient.GetPageRanges. -type pageBlobClientGetPageRangesResponse struct { - pageBlobClientGetPageRangesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientGetPageRangesResult contains the result from method pageBlobClient.GetPageRanges. -type pageBlobClientGetPageRangesResult struct { +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.GetPageRanges. +type PageBlobClientGetPageRangesResponse struct { PageList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. BlobContentLength *int64 `xml:"BlobContentLength"` @@ -2092,7 +1711,7 @@ type pageBlobClientGetPageRangesResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -2104,15 +1723,8 @@ type pageBlobClientGetPageRangesResult struct { Version *string `xml:"Version"` } -// pageBlobClientResizeResponse contains the response from method pageBlobClient.Resize. -type pageBlobClientResizeResponse struct { - pageBlobClientResizeResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientResizeResult contains the result from method pageBlobClient.Resize. -type pageBlobClientResizeResult struct { +// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. +type PageBlobClientResizeResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2123,7 +1735,7 @@ type pageBlobClientResizeResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -2135,15 +1747,8 @@ type pageBlobClientResizeResult struct { Version *string } -// pageBlobClientUpdateSequenceNumberResponse contains the response from method pageBlobClient.UpdateSequenceNumber. -type pageBlobClientUpdateSequenceNumberResponse struct { - pageBlobClientUpdateSequenceNumberResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientUpdateSequenceNumberResult contains the result from method pageBlobClient.UpdateSequenceNumber. -type pageBlobClientUpdateSequenceNumberResult struct { +// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. +type PageBlobClientUpdateSequenceNumberResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2154,7 +1759,7 @@ type pageBlobClientUpdateSequenceNumberResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -2166,15 +1771,8 @@ type pageBlobClientUpdateSequenceNumberResult struct { Version *string } -// pageBlobClientUploadPagesFromURLResponse contains the response from method pageBlobClient.UploadPagesFromURL. -type pageBlobClientUploadPagesFromURLResponse struct { - pageBlobClientUploadPagesFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientUploadPagesFromURLResult contains the result from method pageBlobClient.UploadPagesFromURL. -type pageBlobClientUploadPagesFromURLResult struct { +// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. +type PageBlobClientUploadPagesFromURLResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2185,7 +1783,7 @@ type pageBlobClientUploadPagesFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -2209,15 +1807,8 @@ type pageBlobClientUploadPagesFromURLResult struct { XMSContentCRC64 []byte } -// pageBlobClientUploadPagesResponse contains the response from method pageBlobClient.UploadPages. -type pageBlobClientUploadPagesResponse struct { - pageBlobClientUploadPagesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientUploadPagesResult contains the result from method pageBlobClient.UploadPages. -type pageBlobClientUploadPagesResult struct { +// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobClientUploadPagesResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2231,7 +1822,7 @@ type pageBlobClientUploadPagesResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -2255,15 +1846,8 @@ type pageBlobClientUploadPagesResult struct { XMSContentCRC64 []byte } -// serviceClientFilterBlobsResponse contains the response from method serviceClient.FilterBlobs. -type serviceClientFilterBlobsResponse struct { - serviceClientFilterBlobsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientFilterBlobsResult contains the result from method serviceClient.FilterBlobs. -type serviceClientFilterBlobsResult struct { +// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. +type ServiceClientFilterBlobsResponse struct { FilterBlobSegment // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2278,15 +1862,8 @@ type serviceClientFilterBlobsResult struct { Version *string `xml:"Version"` } -// serviceClientGetAccountInfoResponse contains the response from method serviceClient.GetAccountInfo. -type serviceClientGetAccountInfoResponse struct { - serviceClientGetAccountInfoResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetAccountInfoResult contains the result from method serviceClient.GetAccountInfo. -type serviceClientGetAccountInfoResult struct { +// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. +type ServiceClientGetAccountInfoResponse struct { // AccountKind contains the information returned from the x-ms-account-kind header response. AccountKind *AccountKind @@ -2309,15 +1886,8 @@ type serviceClientGetAccountInfoResult struct { Version *string } -// serviceClientGetPropertiesResponse contains the response from method serviceClient.GetProperties. -type serviceClientGetPropertiesResponse struct { - serviceClientGetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetPropertiesResult contains the result from method serviceClient.GetProperties. -type serviceClientGetPropertiesResult struct { +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { StorageServiceProperties // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2329,15 +1899,8 @@ type serviceClientGetPropertiesResult struct { Version *string `xml:"Version"` } -// serviceClientGetStatisticsResponse contains the response from method serviceClient.GetStatistics. -type serviceClientGetStatisticsResponse struct { - serviceClientGetStatisticsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetStatisticsResult contains the result from method serviceClient.GetStatistics. -type serviceClientGetStatisticsResult struct { +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { StorageServiceStats // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2352,15 +1915,8 @@ type serviceClientGetStatisticsResult struct { Version *string `xml:"Version"` } -// serviceClientGetUserDelegationKeyResponse contains the response from method serviceClient.GetUserDelegationKey. -type serviceClientGetUserDelegationKeyResponse struct { - serviceClientGetUserDelegationKeyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetUserDelegationKeyResult contains the result from method serviceClient.GetUserDelegationKey. -type serviceClientGetUserDelegationKeyResult struct { +// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type ServiceClientGetUserDelegationKeyResponse struct { UserDelegationKey // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2375,15 +1931,8 @@ type serviceClientGetUserDelegationKeyResult struct { Version *string `xml:"Version"` } -// serviceClientListContainersSegmentResponse contains the response from method serviceClient.ListContainersSegment. -type serviceClientListContainersSegmentResponse struct { - serviceClientListContainersSegmentResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientListContainersSegmentResult contains the result from method serviceClient.ListContainersSegment. -type serviceClientListContainersSegmentResult struct { +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.ListContainersSegment. +type ServiceClientListContainersSegmentResponse struct { ListContainersSegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2395,15 +1944,8 @@ type serviceClientListContainersSegmentResult struct { Version *string `xml:"Version"` } -// serviceClientSetPropertiesResponse contains the response from method serviceClient.SetProperties. -type serviceClientSetPropertiesResponse struct { - serviceClientSetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientSetPropertiesResult contains the result from method serviceClient.SetProperties. -type serviceClientSetPropertiesResult struct { +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -2414,15 +1956,11 @@ type serviceClientSetPropertiesResult struct { Version *string } -// serviceClientSubmitBatchResponse contains the response from method serviceClient.SubmitBatch. -type serviceClientSubmitBatchResponse struct { - serviceClientSubmitBatchResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} +// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. +type ServiceClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser -// serviceClientSubmitBatchResult contains the result from method serviceClient.SubmitBatch. -type serviceClientSubmitBatchResult struct { // ContentType contains the information returned from the Content-Type header response. ContentType *string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go similarity index 62% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index 7dcf6ef13e3..1cb779d8481 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "context" @@ -20,16 +21,18 @@ import ( "time" ) -type serviceClient struct { +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use NewServiceClient() instead. +type ServiceClient struct { endpoint string pl runtime.Pipeline } -// newServiceClient creates a new instance of serviceClient with the specified values. +// NewServiceClient creates a new instance of ServiceClient with the specified values. // endpoint - The URL of the service account, container, or blob that is the target of the desired operation. // pl - the pipeline used for sending requests and handling responses. -func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient { - client := &serviceClient{ +func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { + client := &ServiceClient{ endpoint: endpoint, pl: pl, } @@ -40,24 +43,25 @@ func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient { // expression. Filter blobs searches across all containers within a storage account but can // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. -func (client *serviceClient) FilterBlobs(ctx context.Context, options *serviceClientFilterBlobsOptions) (serviceClientFilterBlobsResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { req, err := client.filterBlobsCreateRequest(ctx, options) if err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) } return client.filterBlobsHandleResponse(resp) } // filterBlobsCreateRequest creates the FilterBlobs request. -func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, options *serviceClientFilterBlobsOptions) (*policy.Request, error) { +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -77,17 +81,17 @@ func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, optio reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // filterBlobsHandleResponse handles the FilterBlobs response. -func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (serviceClientFilterBlobsResponse, error) { - result := serviceClientFilterBlobsResponse{RawResponse: resp} +func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) { + result := ServiceClientFilterBlobsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -100,36 +104,37 @@ func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (ser if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } return result, nil } // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. -func (client *serviceClient) GetAccountInfo(ctx context.Context, options *serviceClientGetAccountInfoOptions) (serviceClientGetAccountInfoResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) } return client.getAccountInfoHandleResponse(resp) } // getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, options *serviceClientGetAccountInfoOptions) (*policy.Request, error) { +func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -138,14 +143,14 @@ func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, op reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (serviceClientGetAccountInfoResponse, error) { - result := serviceClientGetAccountInfoResponse{RawResponse: resp} +func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { + result := ServiceClientGetAccountInfoResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -158,7 +163,7 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) ( if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } result.Date = &date } @@ -171,7 +176,7 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } @@ -181,24 +186,25 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) ( // GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. -func (client *serviceClient) GetProperties(ctx context.Context, options *serviceClientGetPropertiesOptions) (serviceClientGetPropertiesResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { - return serviceClientGetPropertiesResponse{}, err + return ServiceClientGetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetPropertiesResponse{}, err + return ServiceClientGetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.getPropertiesHandleResponse(resp) } // getPropertiesCreateRequest creates the GetProperties request. -func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, options *serviceClientGetPropertiesOptions) (*policy.Request, error) { +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -210,17 +216,17 @@ func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. -func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (serviceClientGetPropertiesResponse, error) { - result := serviceClientGetPropertiesResponse{RawResponse: resp} +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -231,7 +237,7 @@ func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (s result.Version = &val } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { - return serviceClientGetPropertiesResponse{}, err + return ServiceClientGetPropertiesResponse{}, err } return result, nil } @@ -239,24 +245,25 @@ func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (s // GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. -func (client *serviceClient) GetStatistics(ctx context.Context, options *serviceClientGetStatisticsOptions) (serviceClientGetStatisticsResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) } return client.getStatisticsHandleResponse(resp) } // getStatisticsCreateRequest creates the GetStatistics request. -func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, options *serviceClientGetStatisticsOptions) (*policy.Request, error) { +func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -268,17 +275,17 @@ func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getStatisticsHandleResponse handles the GetStatistics response. -func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (serviceClientGetStatisticsResponse, error) { - result := serviceClientGetStatisticsResponse{RawResponse: resp} +func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { + result := ServiceClientGetStatisticsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -291,12 +298,12 @@ func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (s if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } return result, nil } @@ -304,26 +311,27 @@ func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (s // GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // keyInfo - Key information -// options - serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey +// options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. -func (client *serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (serviceClientGetUserDelegationKeyResponse, error) { +func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) } return client.getUserDelegationKeyHandleResponse(resp) } // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. -func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (*policy.Request, error) { +func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -335,17 +343,17 @@ func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, runtime.MarshalAsXML(req, keyInfo) } // getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. -func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (serviceClientGetUserDelegationKeyResponse, error) { - result := serviceClientGetUserDelegationKeyResponse{RawResponse: resp} +func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) { + result := ServiceClientGetUserDelegationKeyResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -358,34 +366,24 @@ func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Respo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } return result, nil } -// ListContainersSegment - The List Containers Segment operation returns a list of the containers under the specified account +// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified +// account // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// Generated from API version 2020-10-02 +// options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment // method. -func (client *serviceClient) ListContainersSegment(options *serviceClientListContainersSegmentOptions) *serviceClientListContainersSegmentPager { - return &serviceClientListContainersSegmentPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.listContainersSegmentCreateRequest(ctx, options) - }, - advancer: func(ctx context.Context, resp serviceClientListContainersSegmentResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.ListContainersSegmentResponse.NextMarker) - }, - } -} - // listContainersSegmentCreateRequest creates the ListContainersSegment request. -func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Context, options *serviceClientListContainersSegmentOptions) (*policy.Request, error) { +func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -408,17 +406,17 @@ func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // listContainersSegmentHandleResponse handles the ListContainersSegment response. -func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Response) (serviceClientListContainersSegmentResponse, error) { - result := serviceClientListContainersSegmentResponse{RawResponse: resp} +func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListContainersSegmentResponse, error) { + result := ServiceClientListContainersSegmentResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -429,7 +427,7 @@ func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Resp result.Version = &val } if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { - return serviceClientListContainersSegmentResponse{}, err + return ServiceClientListContainersSegmentResponse{}, err } return result, nil } @@ -437,25 +435,26 @@ func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Resp // SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // storageServiceProperties - The StorageService properties. -// options - serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. -func (client *serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (serviceClientSetPropertiesResponse, error) { +// options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { - return serviceClientSetPropertiesResponse{}, err + return ServiceClientSetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientSetPropertiesResponse{}, err + return ServiceClientSetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return serviceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.setPropertiesHandleResponse(resp) } // setPropertiesCreateRequest creates the SetProperties request. -func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (*policy.Request, error) { +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -467,17 +466,17 @@ func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, sto reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, runtime.MarshalAsXML(req, storageServiceProperties) } // setPropertiesHandleResponse handles the SetProperties response. -func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (serviceClientSetPropertiesResponse, error) { - result := serviceClientSetPropertiesResponse{RawResponse: resp} +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -492,28 +491,29 @@ func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (s // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // contentLength - The length of the request. // multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // body - Initial data -// options - serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. -func (client *serviceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (serviceClientSubmitBatchResponse, error) { +// options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { - return serviceClientSubmitBatchResponse{}, err + return ServiceClientSubmitBatchResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientSubmitBatchResponse{}, err + return ServiceClientSubmitBatchResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) } return client.submitBatchHandleResponse(resp) } // submitBatchCreateRequest creates the SubmitBatch request. -func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (*policy.Request, error) { +func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -525,19 +525,19 @@ func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, conte } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("Content-Type", multipartContentType) - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") - return req, runtime.MarshalAsXML(req, body) + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/xml") } // submitBatchHandleResponse handles the SubmitBatch response. -func (client *serviceClient) submitBatchHandleResponse(resp *http.Response) (serviceClientSubmitBatchResponse, error) { - result := serviceClientSubmitBatchResponse{RawResponse: resp} +func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) { + result := ServiceClientSubmitBatchResponse{Body: resp.Body} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 42726159b6f..4b4d51aa399 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "strings" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index c51d8d78c12..1ce9d621164 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "regexp" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 1cf97387de2..144ea18e1ab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go new file mode 100644 index 00000000000..a86fc582c52 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -0,0 +1,78 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Concurrency uint16 + Operation func(offset int64, chunkSize int64, ctx context.Context) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = 5 // default concurrency + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + + operationChannel <- func() error { + return o.Operation(offset, curChunkSize, ctx) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go similarity index 74% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go index 14c7feda110..8d4d35bdeff 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package shared import ( "errors" @@ -12,7 +12,7 @@ import ( type bytesWriter []byte -func newBytesWriter(b []byte) bytesWriter { +func NewBytesWriter(b []byte) bytesWriter { return b } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go new file mode 100644 index 00000000000..c8528a2e3ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go new file mode 100644 index 00000000000..48a5ad842ae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -0,0 +1,238 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "io" + "net" + "net/url" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" +) + +// CopyOptions returns a zero-value T if opts is nil. +// If opts is not nil, a copy is made and its address returned. +func CopyOptions[T any](opts *T) *T { + if opts == nil { + return new(T) + } + cp := *opts + return &cp +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } + + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") + } + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), + }, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok { + return ParsedConnectionString{ + ServiceURL: blobEndpoint, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } + + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix), + AccountName: accountName, + AccountKey: accountKey, + }, nil +} + +// SerializeBlobTags converts tags to generated.BlobTags +func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { + if tagsMap == nil { + return nil + } + blobTagSet := make([]*generated.BlobTag, 0) + for key, val := range tagsMap { + newKey, newVal := key, val + blobTagSet = append(blobTagSet, &generated.BlobTag{Key: &newKey, Value: &newVal}) + } + return &generated.BlobTags{BlobTagSet: blobTagSet} +} + +func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if tagsMap == nil { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} + +func RangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go index 5c40e9bc2ab..f3c9d4be7e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go @@ -2,15 +2,17 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package shared import ( "fmt" "sync" ) +const _1MiB = 1024 * 1024 + // TransferManager provides a buffer and thread pool manager for certain transfer options. // It is undefined behavior if code outside this package call any of these methods. type TransferManager interface { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go deleted file mode 100644 index cd2ada9b5db..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go +++ /dev/null @@ -1,150 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "strconv" - "time" -) - -// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. -type CtxWithHTTPHeaderKey struct{} - -// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. -type CtxWithRetryOptionsKey struct{} - -type nopCloser struct { - io.ReadSeeker -} - -func (n nopCloser) Close() error { - return nil -} - -// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. -func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { - return nopCloser{rs} -} - -// BodyDownloadPolicyOpValues is the struct containing the per-operation values -type BodyDownloadPolicyOpValues struct { - Skip bool -} - -func NewResponseError(inner error, resp *http.Response) error { - return &ResponseError{inner: inner, resp: resp} -} - -type ResponseError struct { - inner error - resp *http.Response -} - -// Error implements the error interface for type ResponseError. -func (e *ResponseError) Error() string { - return e.inner.Error() -} - -// Unwrap returns the inner error. -func (e *ResponseError) Unwrap() error { - return e.inner -} - -// RawResponse returns the HTTP response associated with this error. -func (e *ResponseError) RawResponse() *http.Response { - return e.resp -} - -// NonRetriable indicates this error is non-transient. -func (e *ResponseError) NonRetriable() { - // marker method -} - -// Delay waits for the duration to elapse or the context to be cancelled. -func Delay(ctx context.Context, delay time.Duration) error { - select { - case <-time.After(delay): - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// ErrNoBody is returned if the response didn't contain a body. -var ErrNoBody = errors.New("the response did not contain a body") - -// GetJSON reads the response body into a raw JSON object. -// It returns ErrNoBody if there was no content. -func GetJSON(resp *http.Response) (map[string]interface{}, error) { - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - if err != nil { - return nil, err - } - if len(body) == 0 { - return nil, ErrNoBody - } - // put the body back so it's available to others - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - // unmarshall the body to get the value - var jsonBody map[string]interface{} - if err = json.Unmarshal(body, &jsonBody); err != nil { - return nil, err - } - return jsonBody, nil -} - -const HeaderRetryAfter = "Retry-After" - -// RetryAfter returns non-zero if the response contains a Retry-After header value. -func RetryAfter(resp *http.Response) time.Duration { - if resp == nil { - return 0 - } - ra := resp.Header.Get(HeaderRetryAfter) - if ra == "" { - return 0 - } - // retry-after values are expressed in either number of - // seconds or an HTTP-date indicating when to try again - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - return time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - return time.Until(t) - } - return 0 -} - -// HasStatusCode returns true if the Response's status code is one of the specified values. -func HasStatusCode(resp *http.Response, statusCodes ...int) bool { - if resp == nil { - return false - } - for _, sc := range statusCodes { - if resp.StatusCode == sc { - return true - } - } - return false -} - -const defaultScope = "/.default" - -// EndpointToScope converts the provided URL endpoint to its default scope. -func EndpointToScope(endpoint string) string { - if endpoint[len(endpoint)-1] != '/' { - endpoint += "/" - } - return endpoint + defaultScope -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md new file mode 100644 index 00000000000..1b1a4b45d54 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md @@ -0,0 +1,76 @@ +# Guide to migrate from `azure-storage-blob-go` to `azblob` + +This guide is intended to assist in the migration from the `azure-storage-blob-go` module, or previous betas of `azblob`, to the latest releases of the `azblob` module. + +## Simplified API surface area + +The redesign of the `azblob` module separates clients into various sub-packages. +In previous versions, the public surface area was "flat", so all clients and supporting types were in the `azblob` package. +This made it difficult to navigate the public surface area. + +## Clients + +In `azure-storage-blob-go` a client constructor always requires a `url.URL` and `Pipeline` parameters. + +In `azblob` a client constructor always requires a `string` URL, any specified credential type, and a `*ClientOptions` for optional values. You pass `nil` to accept default options. + +```go +// new code +client, err := azblob.NewClient("", cred, nil) +``` + +## Authentication + +In `azure-storage-blob-go` you created a `Pipeline` with the required credential type. This pipeline was then passed to the client constructor. + +In `azblob`, you pass the required credential directly to the client constructor. + +```go +// new code. cred is an AAD token credential created from the azidentity module +client, err := azblob.NewClient("", cred, nil) +``` + +The `azure-storage-blob-go` module provided limited support for OAuth token authentication via `NewTokenCredential`. +This been replaced by using Azure Identity credentials from [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#section-readme). + +Authentication with a shared key via `NewSharedKeyCredential` remains unchanged. + +In `azure-storage-blob-go` you created a `Pipeline` with `NewAnonymousCredential` to support anonymous or SAS authentication. + +In `azblob` you use the construtor `NewClientWithNoCredential()` instead. + +```go +// new code +client, err := azblob.NewClientWithNoCredential("", nil) +``` + +## Listing blobs/containers + +In `azure-storage-blob-go` you explicitly created a `Marker` type that was used to page over results ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-package)). + +In `azblob`, operations that return paginated values return a `*runtime.Pager[T]`. + +```go +// new code +pager := client.NewListBlobsFlatPager("my-container", nil) +for pager.More() { + page, err := pager.NextPage(context.TODO()) + // process results +} +``` + +## Configuring the HTTP pipeline + +In `azure-storage-blob-go` you explicitly created a HTTP pipeline with configuration before creating a client. +This pipeline instance was then passed as an argument to the client constructor ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-NewPipeline)). + +In `azblob` a HTTP pipeline is created during client construction. The pipeline is configured through the `azcore.ClientOptions` type. + +```go +// new code +client, err := azblob.NewClient(account, cred, &azblob.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + // configure HTTP pipeline options here + }, +}) +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go new file mode 100644 index 00000000000..099f48d1721 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerOptions contains the optional parameters for the ContainerClient.Create method. +type CreateContainerOptions = service.CreateContainerOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = service.DeleteContainerOptions + +// DeleteBlobOptions contains the optional parameters for the Client.Delete method. +type DeleteBlobOptions = blob.DeleteOptions + +// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. +type DownloadStreamOptions = blob.DownloadStreamOptions + +// ListBlobsFlatOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method. +type ListBlobsFlatOptions = container.ListBlobsFlatOptions + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude = container.ListBlobsInclude + +// ListContainersOptions contains the optional parameters for the container.Client.ListContainers operation +type ListContainersOptions = service.ListContainersOptions + +// UploadBufferOptions provides set of configurations for UploadBuffer operation +type UploadBufferOptions = blockblob.UploadBufferOptions + +// UploadFileOptions provides set of configurations for UploadFile operation +type UploadFileOptions = blockblob.UploadFileOptions + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions = blockblob.UploadStreamOptions + +// DownloadBufferOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadBufferOptions = blob.DownloadBufferOptions + +// DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadFileOptions = blob.DownloadFileOptions + +// CpkInfo contains a group of parameters for client provided encryption key. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CpkScopeInfo = generated.ContainerCpkScopeInfo + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude = service.ListContainersInclude + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy = blob.ObjectReplicationPolicy + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions = blob.RetryReaderOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go new file mode 100644 index 00000000000..e210a76e6e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -0,0 +1,403 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "context" + "io" + "net/http" + "net/url" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a client to an Azure Storage page blob; +type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (pb *Client) generated() *generated.PageBlobClient { + _, pageBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return pageBlob +} + +// URL returns the URL endpoint used by the Client object. +func (pb *Client) URL() string { + return pb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (pb *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return (*blob.Client)(innerBlob) +} + +func (pb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the version returning a URL to the base blob. +func (pb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (CreateResponse, error) { + createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + + resp, err := pb.generated().Create(ctx, 0, size, createOptions, HTTPHeaders, + leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *UploadPagesOptions) (UploadPagesResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + + if err != nil { + return UploadPagesResponse{}, err + } + + uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, + cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb *Client) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, + o *UploadPagesFromURLOptions) (UploadPagesFromURLResponse, error) { + + uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, + modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + resp, err := pb.generated().UploadPagesFromURL(ctx, source, shared.RangeToString(sourceOffset, count), 0, + shared.RangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, + sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) ClearPages(ctx context.Context, rnge blob.HTTPRange, options *ClearPagesOptions) (ClearPagesResponse, error) { + clearOptions := &generated.PageBlobClientClearPagesOptions{ + Range: exported.FormatHTTPRange(rnge), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.generated().ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, + cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// NewGetPageRangesPager returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[GetPageRangesResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesResponse]{ + More: func(page GetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesResponse) (GetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesResponse{}, err + } + resp, err := pb.generated().Pipeline().Do(req) + if err != nil { + return GetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesHandleResponse(resp) + }, + }) +} + +// NewGetPageRangesDiffPager gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtime.Pager[GetPageRangesDiffResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesDiffResponse]{ + More: func(page GetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesDiffResponse) (GetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesDiffResponse{}, err + } + resp, err := pb.generated().Pipeline().Do(req) + if err != nil { + return GetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) { + resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := pb.generated().Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return resp, err +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb *Client) UpdateSequenceNumber(ctx context.Context, options *UpdateSequenceNumberOptions) (UpdateSequenceNumberResponse, error) { + actionType, updateOptions, lac, mac := options.format() + resp, err := pb.generated().UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) + + return resp, err +} + +// StartCopyIncremental begins an operation to start an incremental copy from one-page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb *Client) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *CopyIncrementalOptions) (CopyIncrementalResponse, error) { + copySourceURL, err := url.Parse(copySource) + if err != nil { + return CopyIncrementalResponse{}, err + } + + queryParams := copySourceURL.Query() + queryParams.Set("snapshot", prevSnapshot) + copySourceURL.RawQuery = queryParams.Encode() + + pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() + resp, err := pb.generated().CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) + + return resp, err +} + +// Redeclared APIs + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (pb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return pb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return pb.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (pb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return pb.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return pb.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return pb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (pb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return pb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (pb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return pb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (pb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return pb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (pb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return pb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (pb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return pb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return pb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (pb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return pb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (pb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return pb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (pb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return pb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go new file mode 100644 index 00000000000..646587cec8d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const ( + // PageBytes indicates the number of bytes in a page (512). + PageBytes = 512 +) + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier +type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP10 + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP15 + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP20 + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP30 + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP4 + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP40 + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP50 + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP6 + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP60 + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP70 + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP80 +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return generated.PossiblePremiumPageBlobAccessTierValues() +} + +// SequenceNumberActionType defines values for SequenceNumberActionType +type SequenceNumberActionType = generated.SequenceNumberActionType + +const ( + SequenceNumberActionTypeMax SequenceNumberActionType = generated.SequenceNumberActionTypeMax + SequenceNumberActionTypeUpdate SequenceNumberActionType = generated.SequenceNumberActionTypeUpdate + SequenceNumberActionTypeIncrement SequenceNumberActionType = generated.SequenceNumberActionTypeIncrement +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return generated.PossibleSequenceNumberActionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go similarity index 51% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go index 2be2758736a..c1b1194ff83 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -2,126 +2,123 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package pageblob import ( - "strconv" "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) -// --------------------------------------------------------------------------------------------------------------------- +// Type Declarations --------------------------------------------------------------------- -func rangeToString(offset, count int64) string { - return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) -} +// PageList - the list of pages +type PageList = generated.PageList -// --------------------------------------------------------------------------------------------------------------------- +// ClearRange defines a range of pages. +type ClearRange = generated.ClearRange + +// PageRange defines a range of pages. +type PageRange = generated.PageRange + +// SequenceNumberAccessConditions contains a group of parameters for the Client.UploadPages method. +type SequenceNumberAccessConditions = generated.SequenceNumberAccessConditions -// PageBlobCreateOptions provides set of configurations for CreatePageBlob operation -type PageBlobCreateOptions struct { +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 + SequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. - BlobTagsMap map[string]string + Tags map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. Metadata map[string]string + // Optional. Indicates the tier to be set on the page blob. Tier *PremiumPageBlobAccessTier - HTTPHeaders *BlobHTTPHeaders + HTTPHeaders *blob.HTTPHeaders - CpkInfo *CpkInfo + CpkInfo *blob.CpkInfo - CpkScopeInfo *CpkScopeInfo + CpkScopeInfo *blob.CpkScopeInfo - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *blob.ImmutabilityPolicyMode // Specified if a legal hold should be set on the blob. LegalHold *bool } -func (o *PageBlobCreateOptions) format() (*pageBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { +func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } - options := &pageBlobClientCreateOptions{ - BlobSequenceNumber: o.BlobSequenceNumber, - BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + options := &generated.PageBlobClientCreateOptions{ + BlobSequenceNumber: o.SequenceNumber, + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), Metadata: o.Metadata, Tier: o.Tier, } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions } -// PageBlobCreateResponse contains the response from method PageBlobClient.Create. -type PageBlobCreateResponse struct { - pageBlobClientCreateResponse -} - -func toPageBlobCreateResponse(resp pageBlobClientCreateResponse) PageBlobCreateResponse { - return PageBlobCreateResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobUploadPagesOptions provides set of configurations for UploadPages operation -type PageBlobUploadPagesOptions struct { - // Specify the transactional crc64 for the body, to be validated by the service. - PageRange *HttpRange +// UploadPagesOptions contains the optional parameters for the Client.UploadPages method. +type UploadPagesOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + TransactionalContentCRC64 []byte // Specify the transactional md5 for the body, to be validated by the service. TransactionalContentMD5 []byte - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobUploadPagesOptions) format() (*pageBlobClientUploadPagesOptions, *LeaseAccessConditions, - *CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { +func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptions, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } - options := &pageBlobClientUploadPagesOptions{ + options := &generated.PageBlobClientUploadPagesOptions{ TransactionalContentCRC64: o.TransactionalContentCRC64, TransactionalContentMD5: o.TransactionalContentMD5, + Range: exported.FormatHTTPRange(o.Range), } - if o.PageRange != nil { - options.Range = o.PageRange.format() - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } -// PageBlobUploadPagesResponse contains the response from method PageBlobClient.UploadPages. -type PageBlobUploadPagesResponse struct { - pageBlobClientUploadPagesResponse -} - -func toPageBlobUploadPagesResponse(resp pageBlobClientUploadPagesResponse) PageBlobUploadPagesResponse { - return PageBlobUploadPagesResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobUploadPagesFromURLOptions provides set of configurations for UploadPagesFromURL operation -type PageBlobUploadPagesFromURLOptions struct { +// UploadPagesFromURLOptions contains the optional parameters for the Client.UploadPagesFromURL method. +type UploadPagesFromURLOptions struct { // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string // Specify the md5 calculated for the range of bytes that must be read from the copy source. @@ -129,75 +126,57 @@ type PageBlobUploadPagesFromURLOptions struct { // Specify the crc64 calculated for the range of bytes that must be read from the copy source. SourceContentCRC64 []byte - CpkInfo *CpkInfo + CpkInfo *blob.CpkInfo - CpkScopeInfo *CpkScopeInfo + CpkScopeInfo *blob.CpkScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions - SourceModifiedAccessConditions *SourceModifiedAccessConditions + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobUploadPagesFromURLOptions) format() (*pageBlobClientUploadPagesFromURLOptions, *CpkInfo, *CpkScopeInfo, - *LeaseAccessConditions, *SequenceNumberAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { +func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, + *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil, nil } - options := &pageBlobClientUploadPagesFromURLOptions{ + options := &generated.PageBlobClientUploadPagesFromURLOptions{ SourceContentMD5: o.SourceContentMD5, SourceContentcrc64: o.SourceContentCRC64, CopySourceAuthorization: o.CopySourceAuthorization, } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions } -// PageBlobUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL -type PageBlobUploadPagesFromURLResponse struct { - pageBlobClientUploadPagesFromURLResponse -} - -func toPageBlobUploadPagesFromURLResponse(resp pageBlobClientUploadPagesFromURLResponse) PageBlobUploadPagesFromURLResponse { - return PageBlobUploadPagesFromURLResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobClearPagesOptions provides set of configurations for PageBlobClient.ClearPages operation -type PageBlobClearPagesOptions struct { - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo +// ClearPagesOptions contains the optional parameters for the Client.ClearPages operation +type ClearPagesOptions struct { + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobClearPagesOptions) format() (*LeaseAccessConditions, *CpkInfo, - *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { +func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } -// PageBlobClearPagesResponse contains the response from method PageBlobClient.ClearPages -type PageBlobClearPagesResponse struct { - pageBlobClientClearPagesResponse -} - -func toPageBlobClearPagesResponse(resp pageBlobClientClearPagesResponse) PageBlobClearPagesResponse { - return PageBlobClearPagesResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobGetPageRangesOptions provides set of configurations for GetPageRanges operation -type PageBlobGetPageRangesOptions struct { +// GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method. +type GetPageRangesOptions struct { Marker *string // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the @@ -215,43 +194,34 @@ type PageBlobGetPageRangesOptions struct { // specified by prevsnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string - // Optional, you can specify whether a particular range of the blob is read - PageRange *HttpRange + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more // information on working with blob snapshots, see Creating a Snapshot of a Blob. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] Snapshot *string - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobGetPageRangesOptions) format() (*pageBlobClientGetPageRangesOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &pageBlobClientGetPageRangesOptions{ + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesOptions{ Marker: o.Marker, Maxresults: o.MaxResults, - Range: o.PageRange.format(), + Range: exported.FormatHTTPRange(o.Range), Snapshot: o.Snapshot, }, leaseAccessConditions, modifiedAccessConditions } -// PageBlobGetPageRangesPager provides operations for iterating over paged responses -type PageBlobGetPageRangesPager struct { - *pageBlobClientGetPageRangesPager -} - -func toPageBlobGetPageRangesPager(resp *pageBlobClientGetPageRangesPager) *PageBlobGetPageRangesPager { - return &PageBlobGetPageRangesPager{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobGetPageRangesDiffOptions provides set of configurations for PageBlobClient.GetPageRangesDiff operation -type PageBlobGetPageRangesDiffOptions struct { +// GetPageRangesDiffOptions contains the optional parameters for the Client.NewGetPageRangesDiffPager method. +type GetPageRangesDiffOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -274,115 +244,90 @@ type PageBlobGetPageRangesDiffOptions struct { // specified by prevsnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string - // Optional, you can specify whether a particular range of the blob is read - PageRange *HttpRange + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more // information on working with blob snapshots, see Creating a Snapshot of a Blob. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] Snapshot *string - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobGetPageRangesDiffOptions) format() (*pageBlobClientGetPageRangesDiffOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRangesDiffOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &pageBlobClientGetPageRangesDiffOptions{ + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesDiffOptions{ Marker: o.Marker, Maxresults: o.MaxResults, PrevSnapshotURL: o.PrevSnapshotURL, Prevsnapshot: o.PrevSnapshot, - Range: o.PageRange.format(), + Range: exported.FormatHTTPRange(o.Range), Snapshot: o.Snapshot, }, leaseAccessConditions, modifiedAccessConditions } -// PageBlobGetPageRangesDiffPager provides operations for iterating over paged responses -type PageBlobGetPageRangesDiffPager struct { - *pageBlobClientGetPageRangesDiffPager -} - -func toPageBlobGetPageRangesDiffPager(resp *pageBlobClientGetPageRangesDiffPager) *PageBlobGetPageRangesDiffPager { - return &PageBlobGetPageRangesDiffPager{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobResizeOptions provides set of configurations for PageBlobClient.Resize operation -type PageBlobResizeOptions struct { - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - BlobAccessConditions *BlobAccessConditions +// ResizeOptions contains the optional parameters for the Client.Resize method. +type ResizeOptions struct { + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions } -func (o *PageBlobResizeOptions) format() (*pageBlobClientResizeOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { +func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions } -// PageBlobResizeResponse contains the response from method PageBlobClient.Resize -type PageBlobResizeResponse struct { - pageBlobClientResizeResponse -} - -func toPageBlobResizeResponse(resp pageBlobClientResizeResponse) PageBlobResizeResponse { - return PageBlobResizeResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobUpdateSequenceNumberOptions provides set of configurations for PageBlobClient.UpdateSequenceNumber operation -type PageBlobUpdateSequenceNumberOptions struct { +// UpdateSequenceNumberOptions contains the optional parameters for the Client.UpdateSequenceNumber method. +type UpdateSequenceNumberOptions struct { ActionType *SequenceNumberActionType - BlobSequenceNumber *int64 + SequenceNumber *int64 - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobUpdateSequenceNumberOptions) format() (*SequenceNumberActionType, *pageBlobClientUpdateSequenceNumberOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *UpdateSequenceNumberOptions) format() (*generated.SequenceNumberActionType, *generated.PageBlobClientUpdateSequenceNumberOptions, + *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } - options := &pageBlobClientUpdateSequenceNumberOptions{ - BlobSequenceNumber: o.BlobSequenceNumber, + options := &generated.PageBlobClientUpdateSequenceNumberOptions{ + BlobSequenceNumber: o.SequenceNumber, } if *o.ActionType == SequenceNumberActionTypeIncrement { options.BlobSequenceNumber = nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions } -// PageBlobUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber -type PageBlobUpdateSequenceNumberResponse struct { - pageBlobClientUpdateSequenceNumberResponse -} - -func toPageBlobUpdateSequenceNumberResponse(resp pageBlobClientUpdateSequenceNumberResponse) PageBlobUpdateSequenceNumberResponse { - return PageBlobUpdateSequenceNumberResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobCopyIncrementalOptions provides set of configurations for PageBlobClient.StartCopyIncremental operation -type PageBlobCopyIncrementalOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions +// CopyIncrementalOptions contains the optional parameters for the Client.StartCopyIncremental method. +type CopyIncrementalOptions struct { + ModifiedAccessConditions *blob.ModifiedAccessConditions } -func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementalOptions, *ModifiedAccessConditions) { +func (o *CopyIncrementalOptions) format() (*generated.PageBlobClientCopyIncrementalOptions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil } @@ -390,13 +335,4 @@ func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementa return nil, o.ModifiedAccessConditions } -// PageBlobCopyIncrementalResponse contains the response from method PageBlobClient.StartCopyIncremental -type PageBlobCopyIncrementalResponse struct { - pageBlobClientCopyIncrementalResponse -} - -func toPageBlobCopyIncrementalResponse(resp pageBlobClientCopyIncrementalResponse) PageBlobCopyIncrementalResponse { - return PageBlobCopyIncrementalResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go new file mode 100644 index 00000000000..876efbab1d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.PageBlobClientCreateResponse + +// UploadPagesResponse contains the response from method Client.UploadPages. +type UploadPagesResponse = generated.PageBlobClientUploadPagesResponse + +// UploadPagesFromURLResponse contains the response from method Client.UploadPagesFromURL. +type UploadPagesFromURLResponse = generated.PageBlobClientUploadPagesFromURLResponse + +// ClearPagesResponse contains the response from method Client.ClearPages. +type ClearPagesResponse = generated.PageBlobClientClearPagesResponse + +// GetPageRangesResponse contains the response from method Client.NewGetPageRangesPager. +type GetPageRangesResponse = generated.PageBlobClientGetPageRangesResponse + +// GetPageRangesDiffResponse contains the response from method Client.NewGetPageRangesDiffPager. +type GetPageRangesDiffResponse = generated.PageBlobClientGetPageRangesDiffResponse + +// ResizeResponse contains the response from method Client.Resize. +type ResizeResponse = generated.PageBlobClientResizeResponse + +// UpdateSequenceNumberResponse contains the response from method Client.UpdateSequenceNumber. +type UpdateSequenceNumberResponse = generated.PageBlobClientUpdateSequenceNumberResponse + +// CopyIncrementalResponse contains the response from method Client.StartCopyIncremental. +type CopyIncrementalResponse = generated.PageBlobClientCopyIncrementalResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go new file mode 100644 index 00000000000..86b05d098f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = service.CreateContainerResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = service.DeleteContainerResponse + +// DeleteBlobResponse contains the response from method blob.Client.Delete. +type DeleteBlobResponse = blob.DeleteResponse + +// UploadResponse contains the response from method blockblob.Client.CommitBlockList. +type UploadResponse = blockblob.CommitBlockListResponse + +// DownloadStreamResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. +type DownloadStreamResponse = blob.DownloadStreamResponse + +// ListBlobsFlatResponse contains the response from method container.Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = container.ListBlobsFlatResponse + +// ListContainersResponse contains the response from method service.Client.ListContainersSegment. +type ListContainersResponse = service.ListContainersResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = blockblob.UploadBufferResponse + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = blockblob.UploadFileResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = blockblob.CommitBlockListResponse + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse = generated.ListContainersSegmentResponse + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go new file mode 100644 index 00000000000..fa76ed95c41 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -0,0 +1,316 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationKey to sign this signature values to produce the proper SAS query parameters. +func (v AccountSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + exported.GetAccountName(userDelegationCredential), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + udk := exported.GetUDKParams(userDelegationCredential) + + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountPermissions struct { + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s *AccountServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +/*func parseAccountServices(str string) (AccountServices, error) { + s := AccountServices{} // Clear out the flags + for _, r := range str { + switch r { + case 'b': + s.Blob = true + case 'q': + s.Queue = true + case 'f': + s.File = true + default: + return AccountServices{}, fmt.Errorf("invalid service character: '%v'", r) + } + } + return s, nil +}*/ + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountResourceTypes's fields from a string. +/*func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type: '%v'", r) + } + } + return rt, nil +}*/ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go new file mode 100644 index 00000000000..1a9c8c12d17 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -0,0 +1,440 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "net" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 +) + +var ( + // Version is the default version encoded in the SAS token. + Version = "2020-02-10" +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(exported.SnapshotTimeFormat) + } + return ss, se, sh +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + preauthorizedAgentObjectID string `param:"saoid"` + agentObjectID string `param:"suoid"` + correlationID string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// PreauthorizedAgentObjectID returns preauthorizedAgentObjectID +func (p *QueryParameters) PreauthorizedAgentObjectID() string { + return p.preauthorizedAgentObjectID +} + +// AgentObjectID returns agentObjectID +func (p *QueryParameters) AgentObjectID() string { + return p.agentObjectID +} + +// SignedCorrelationID returns signedCorrelationID +func (p *QueryParameters) SignedCorrelationID() string { + return p.correlationID +} + +// SignedOID returns signedOID +func (p *QueryParameters) SignedOID() string { + return p.signedOID +} + +// SignedTID returns signedTID +func (p *QueryParameters) SignedTID() string { + return p.signedTID +} + +// SignedStart returns signedStart +func (p *QueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry +func (p *QueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService +func (p *QueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion +func (p *QueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime +func (p *QueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns sontentType +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth +func (p *QueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOID != "" { + v.Add("skoid", p.signedOID) + v.Add("sktid", p.signedTID) + v.Add("skt", p.signedStart.Format(TimeFormat)) + v.Add("ske", p.signedExpiry.Format(TimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.preauthorizedAgentObjectID != "" { + v.Add("saoid", p.preauthorizedAgentObjectID) + } + if p.agentObjectID != "" { + v.Add("suoid", p.agentObjectID) + } + if p.correlationID != "" { + v.Add("scid", p.correlationID) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(TimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(TimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.preauthorizedAgentObjectID = val + case "suoid": + p.agentObjectID = val + case "scid": + p.correlationID = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go similarity index 51% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index 488baed8c0c..98d853d4e9f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -4,22 +4,24 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package azblob +package sas import ( "bytes" "fmt" "strings" "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" ) -// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas -type BlobSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero +type BlobSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero SnapshotTime time.Time Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() IPRange IPRange `param:"sip"` @@ -45,81 +47,40 @@ func getDirectoryDepth(path string) string { return fmt.Sprint(strings.Count(path, "/") + 1) } -// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce -// the proper SAS query parameters. -// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential -func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { - resource := "c" +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { if sharedKeyCredential == nil { - return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + } + + //Make sure the permission characters are in the correct order + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err } + v.Permissions = perms.String() + resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" - //Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } else if v.BlobVersion != "" { resource = "bv" - //Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } else if v.Directory != "" { resource = "d" v.BlobName = "" - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } else if v.BlobName == "" { - // Make sure the permission characters are in the correct order - perms := &ContainerSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() + // do nothing } else { resource = "b" - // Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } + if v.Version == "" { - v.Version = SASVersion + v.Version = Version } - startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) signedIdentifier := v.Identifier - //udk := sharedKeyCredential.getUDKParams() - // - //if udk != nil { - // udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) - // //I don't like this answer to combining the functions - // //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. - // signedIdentifier = strings.Join([]string{ - // udk.SignedOid, - // udk.SignedTid, - // udkStart, - // udkExpiry, - // udk.SignedService, - // udk.SignedVersion, - // v.PreauthorizedAgentObjectId, - // v.AgentObjectId, - // v.CorrelationId, - // }, "\n") - //} - // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx stringToSign := strings.Join([]string{ v.Permissions, @@ -139,13 +100,114 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share v.ContentType}, // rsct "\n") - signature := "" - signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, + agentObjectID: v.AgentObjectId, + correlationID: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + if userDelegationCredential == nil { + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") + } + + //Make sure the permission characters are in the correct order + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + udk := exported.GetUDKParams(userDelegationCredential) + + udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{}) + //I don't like this answer to combining the functions + //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. + signedIdentifier := strings.Join([]string{ + *udk.SignedOID, + *udk.SignedTID, + udkStart, + udkExpiry, + *udk.SignedService, + *udk.SignedVersion, + v.PreauthorizedAgentObjectId, + v.AgentObjectId, + v.CorrelationId, + }, "\n") + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) if err != nil { - return SASQueryParameters{}, err + return QueryParameters{}, err } - p := SASQueryParameters{ + p := QueryParameters{ // Common SAS parameters version: v.Version, protocol: v.Protocol, @@ -164,22 +226,20 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share contentType: v.ContentType, snapshotTime: v.SnapshotTime, signedDirectoryDepth: getDirectoryDepth(v.Directory), - preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, - agentObjectId: v.AgentObjectId, - correlationId: v.CorrelationId, + preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, + agentObjectID: v.AgentObjectId, + correlationID: v.CorrelationId, // Calculated SAS signature signature: signature, } - ////User delegation SAS specific parameters - //if udk != nil { - // p.signedOid = udk.SignedOid - // p.signedTid = udk.SignedTid - // p.signedStart = udk.SignedStart - // p.signedExpiry = udk.SignedExpiry - // p.signedService = udk.SignedService - // p.signedVersion = udk.SignedVersion - //} + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion return p, nil } @@ -197,17 +257,17 @@ func getCanonicalName(account string, containerName string, blobName string, dir return strings.Join(elements, "") } -// ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. // All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob -type ContainerSASPermissions struct { +type ContainerPermissions struct { Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only } // String produces the SAS permissions string for an Azure Storage container. // Call this method to set BlobSASSignatureValues's Permissions field. -func (p ContainerSASPermissions) String() string { +func (p *ContainerPermissions) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') @@ -245,9 +305,9 @@ func (p ContainerSASPermissions) String() string { return b.String() } -// Parse initializes the ContainerSASPermissions's fields from a string. -func (p *ContainerSASPermissions) Parse(s string) error { - *p = ContainerSASPermissions{} // Clear the flags +// Parse initializes the ContainerSASPermissions' fields from a string. +/*func parseContainerPermissions(s string) (ContainerPermissions, error) { + p := ContainerPermissions{} // Clear the flags for _, r := range s { switch r { case 'r': @@ -273,21 +333,21 @@ func (p *ContainerSASPermissions) Parse(s string) error { case 'p': p.ModifyPermissions = true default: - return fmt.Errorf("invalid permission: '%v'", r) + return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } } - return nil -} + return p, nil +}*/ -// BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. -type BlobSASPermissions struct { +type BlobPermissions struct { Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool } // String produces the SAS permissions string for an Azure Storage blob. -// Call this method to set BlobSASSignatureValues's Permissions field. -func (p BlobSASPermissions) String() string { +// Call this method to set BlobSignatureValues's Permissions field. +func (p *BlobPermissions) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') @@ -329,8 +389,8 @@ func (p BlobSASPermissions) String() string { } // Parse initializes the BlobSASPermissions's fields from a string. -func (p *BlobSASPermissions) Parse(s string) error { - *p = BlobSASPermissions{} // Clear the flags +func parseBlobPermissions(s string) (BlobPermissions, error) { + p := BlobPermissions{} // Clear the flags for _, r := range s { switch r { case 'r': @@ -358,8 +418,8 @@ func (p *BlobSASPermissions) Parse(s string) error { case 'p': p.Permissions = true default: - return fmt.Errorf("invalid permission: '%v'", r) + return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } } - return nil + return p, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go similarity index 68% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go index 062587604e8..57fe053f07a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -2,71 +2,52 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package sas import ( - "net" "net/url" "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( - snapshot = "snapshot" - versionId = "versionid" - SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + snapshot = "snapshot" + versionId = "versionid" ) -// BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an -// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. // NOTE: Changing any SAS-related field requires computing a new SAS signature. -type BlobURLParts struct { +type URLParts struct { Scheme string // Ex: "https://" Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" IPEndpointStyleInfo IPEndpointStyleInfo ContainerName string // "" if no container BlobName string // "" if no blob Snapshot string // "" if not a snapshot - SAS SASQueryParameters + SAS QueryParameters UnparsedParams string VersionID string // "" if not versioning enabled } -// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. -// Ex: "https://10.132.141.33/accountname/containername" -type IPEndpointStyleInfo struct { - AccountName string // "" if not using IP endpoint style -} - -// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: -// http(s)://IP(:port)/storageaccount/container/... -// As url's Host property, host could be both host or host:port -func isIPEndpointStyle(host string) bool { - if host == "" { - return false - } - if h, _, err := net.SplitHostPort(host); err == nil { - host = h - } - // For IPv6, there could be case where SplitHostPort fails for cannot finding port. - // In this case, eliminate the '[' and ']' in the URL. - // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 - if host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - return net.ParseIP(host) != nil -} - -// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other -// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. -func NewBlobURLParts(u string) (BlobURLParts, error) { +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { uri, err := url.Parse(u) if err != nil { - return BlobURLParts{}, err + return URLParts{}, err } - up := BlobURLParts{ + up := URLParts{ Scheme: uri.Scheme, Host: uri.Host, } @@ -77,7 +58,7 @@ func NewBlobURLParts(u string) (BlobURLParts, error) { if path[0] == '/' { path = path[1:] // If path starts with a slash, remove it } - if isIPEndpointStyle(up.Host) { + if shared.IsIPEndpointStyle(up.Host) { if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob up.IPEndpointStyleInfo.AccountName = path path = "" // No ContainerName present in the URL so path should be empty @@ -114,27 +95,16 @@ func NewBlobURLParts(u string) (BlobURLParts, error) { delete(paramsMap, "versionId") // delete "versionId" from paramsMap } - up.SAS = newSASQueryParameters(paramsMap, true) + up.SAS = NewQueryParameters(paramsMap, true) up.UnparsedParams = paramsMap.Encode() return up, nil } -type caseInsensitiveValues url.Values // map[string][]string -func (values caseInsensitiveValues) Get(key string) ([]string, bool) { - key = strings.ToLower(key) - for k, v := range values { - if strings.ToLower(k) == key { - return v, true - } - } - return []string{}, false -} - -// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery // field contains the SAS, snapshot, and unparsed query parameters. -func (up BlobURLParts) URL() string { +func (up URLParts) String() string { path := "" - if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { path += "/" + up.IPEndpointStyleInfo.AccountName } // Concatenate container & blob names (if they exist) @@ -148,8 +118,8 @@ func (up BlobURLParts) URL() string { rawQuery := up.UnparsedParams //If no snapshot is initially provided, fill it in from the SAS query properties to help the user - if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { - up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) + if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { + up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) } // Concatenate blob version id query parameter (if it exists) @@ -182,3 +152,15 @@ func (up BlobURLParts) URL() string { } return u.String() } + +type caseInsensitiveValues url.Values // map[string][]string + +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go deleted file mode 100644 index d2e89f5b2a6..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "io" -) - -type sectionWriter struct { - count int64 - offset int64 - position int64 - writerAt io.WriterAt -} - -func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { - return §ionWriter{ - count: count, - offset: off, - writerAt: c, - } -} - -func (c *sectionWriter) Write(p []byte) (int, error) { - remaining := c.count - c.position - - if remaining <= 0 { - return 0, errors.New("end of section reached") - } - - slice := p - - if int64(len(slice)) > remaining { - slice = slice[:remaining] - } - - n, err := c.writerAt.WriteAt(slice, c.offset+c.position) - c.position += int64(n) - if err != nil { - return n, err - } - - if len(p) > n { - return n, errors.New("not enough space for all bytes") - } - - return n, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go new file mode 100644 index 00000000000..724d92b91c9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -0,0 +1,287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "errors" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. +type Client base.Client[generated.ServiceClient] + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. +// OAuth is required for this call, as well as any role that can delegate access to the storage account. +func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, o *GetUserDelegationCredentialOptions) (*UserDelegationCredential, error) { + url, err := blob.ParseURL(s.URL()) + if err != nil { + return nil, err + } + + getUserDelegationKeyOptions := o.format() + udk, err := s.generated().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions) + if err != nil { + return nil, err + } + + return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil +} + +func (s *Client) generated() *generated.ServiceClient { + return base.InnerClient((*base.Client[generated.ServiceClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ServiceClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of +// Client's URL. The new ContainerClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's +// NewContainerClient method. +func (s *Client) NewContainerClient(containerName string) *container.Client { + containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey())) +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (s *Client) CreateContainer(ctx context.Context, containerName string, options *CreateContainerOptions) (CreateContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerCreateResp, err := containerClient.Create(ctx, options) + return containerCreateResp, err +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (s *Client) DeleteContainer(ctx context.Context, containerName string, options *DeleteContainerOptions) (DeleteContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerDeleteResp, err := containerClient.Delete(ctx, options) + return containerDeleteResp, err +} + +// RestoreContainer restores soft-deleted container +// Operation will only be successful if used within the specified number of days set in the delete retention policy +func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName string, deletedContainerVersion string, options *RestoreContainerOptions) (RestoreContainerResponse, error) { + containerClient := s.NewContainerClient(deletedContainerName) + containerRestoreResp, err := containerClient.Restore(ctx, deletedContainerVersion, options) + return containerRestoreResp, err +} + +// GetAccountInfo provides account level information +func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + listOptions := generated.ServiceClientListContainersSegmentOptions{} + if o != nil { + if o.Include.Deleted { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeDeleted) + } + if o.Include.Metadata { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) + } + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListContainersResponse]{ + More: func(page ListContainersResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListContainersResponse) (ListContainersResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.Marker + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListContainersResponse{}, err + } + resp, err := s.generated().Pipeline().Do(req) + if err != nil { + return ListContainersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListContainersResponse{}, runtime.NewResponseError(resp) + } + return s.generated().ListContainersSegmentHandleResponse(resp) + }, + }) +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (s *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + getPropertiesOptions := o.format() + resp, err := s.generated().GetProperties(ctx, getPropertiesOptions) + return resp, err +} + +// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. +func (s *Client) SetProperties(ctx context.Context, o *SetPropertiesOptions) (SetPropertiesResponse, error) { + properties, setPropertiesOptions := o.format() + resp, err := s.generated().SetProperties(ctx, properties, setPropertiesOptions) + return resp, err +} + +// GetStatistics Retrieves statistics related to replication for the Blob service. +// It is only available when read-access geo-redundant replication is enabled for the storage account. +// With geo-redundant replication, Azure Storage maintains your data durable +// in two locations. In both locations, Azure Storage constantly maintains +// multiple healthy replicas of your data. The location where you read, +// create, update, or delete data is the primary storage account location. +// The primary location exists in the region you choose at the time you +// create an account via the Azure Management Azure classic portal, for +// example, North Central US. The location to which your data is replicated +// is the secondary location. The secondary location is automatically +// determined based on the location of the primary; it is in a second data +// center that resides in the same region as the primary location. Read-only +// access is available from the secondary location, if read-access geo-redundant +// replication is enabled for your storage account. +func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (GetStatisticsResponse, error) { + getStatisticsOptions := o.format() + resp, err := s.generated().GetStatistics(ctx, getStatisticsOptions) + + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +// This validity can be checked with CanGetAccountSASToken(). +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, services sas.AccountServices, start time.Time, expiry time.Time) (string, error) { + if s.sharedKey() == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + qps, err := sas.AccountSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + Permissions: permissions.String(), + Services: services.String(), + ResourceTypes: resources.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + // add a trailing slash to be consistent with the portal + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} + +// FilterBlobs operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (s *Client) FilterBlobs(ctx context.Context, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + serviceFilterBlobsOptions := o.format() + resp, err := s.generated().FilterBlobs(ctx, serviceFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go new file mode 100644 index 00000000000..20665fc2b79 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go @@ -0,0 +1,92 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// ListContainersIncludeType defines values for ListContainersIncludeType +type ListContainersIncludeType = generated.ListContainersIncludeType + +const ( + ListContainersIncludeTypeMetadata ListContainersIncludeType = generated.ListContainersIncludeTypeMetadata + ListContainersIncludeTypeDeleted ListContainersIncludeType = generated.ListContainersIncludeTypeDeleted + ListContainersIncludeTypeSystem ListContainersIncludeType = generated.ListContainersIncludeTypeSystem +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return generated.PossibleListContainersIncludeTypeValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus = generated.BlobGeoReplicationStatus + +const ( + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusLive + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusBootstrap + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusUnavailable +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return generated.PossibleBlobGeoReplicationStatusValues() +} + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go new file mode 100644 index 00000000000..b0354cd90e3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -0,0 +1,220 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// KeyInfo contains KeyInfo struct. +type KeyInfo = generated.KeyInfo + +// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method +type GetUserDelegationCredentialOptions struct { + // placeholder for future options +} + +func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGetUserDelegationKeyOptions { + return nil +} + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// CpkInfo contains a group of parameters for the BlobClient.Download method. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CpkScopeInfo = generated.CpkScopeInfo + +// CreateContainerOptions contains the optional parameters for the container.Client.Create method. +type CreateContainerOptions = container.CreateOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = container.DeleteOptions + +// RestoreContainerOptions contains the optional parameters for the container.Client.Restore method. +type RestoreContainerOptions = container.RestoreOptions + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule = generated.CorsRule + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy = generated.RetentionPolicy + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics = generated.Metrics + +// Logging - Azure Analytics Logging settings. +type Logging = generated.Logging + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite = generated.StaticWebsite + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties = generated.StorageServiceProperties + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats = generated.StorageServiceStats + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ServiceClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // placeholder for future options +} + +func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListContainersOptions provides set of configurations for ListContainers operation +type ListContainersOptions struct { + Include ListContainersInclude + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing operation did not return all containers + // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in + // a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, + // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible + // that the service will return fewer results than specified by max results, or than the default of 5000. + MaxResults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Tells the service whether to return soft-deleted containers. + Deleted bool +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions provides set of options for Client.SetProperties +type SetPropertiesOptions struct { + // The set of CORS rules. + Cors []*CorsRule + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics + + // Azure Analytics Logging settings. + Logging *Logging + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite +} + +func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { + if o == nil { + return generated.StorageServiceProperties{}, nil + } + + return generated.StorageServiceProperties{ + Cors: o.Cors, + DefaultServiceVersion: o.DefaultServiceVersion, + DeleteRetentionPolicy: o.DeleteRetentionPolicy, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + StaticWebsite: o.StaticWebsite, + }, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetStatisticsOptions provides set of options for Client.GetStatistics +type GetStatisticsOptions struct { + // placeholder for future options +} + +func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FindBlobsByTags +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ServiceClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Where: o.Where, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go new file mode 100644 index 00000000000..788514cca37 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = generated.ContainerClientCreateResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = generated.ContainerClientDeleteResponse + +// RestoreContainerResponse contains the response from method container.Client.Restore +type RestoreContainerResponse = generated.ContainerClientRestoreResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse + +// ListContainersResponse contains the response from method Client.ListContainersSegment. +type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse + +// GetStatisticsResponse contains the response from method Client.GetStatistics. +type GetStatisticsResponse = generated.ServiceClientGetStatisticsResponse + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse + +// GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json new file mode 100644 index 00000000000..b23e56abbb9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json @@ -0,0 +1,513 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2019-06-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "deleteRetentionPolicy": { + "enabled": true, + "days": 1 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go deleted file mode 100644 index 25490ab5950..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go +++ /dev/null @@ -1,154 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "io" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// AppendBlobClient represents a client to an Azure Storage append blob; -type AppendBlobClient struct { - BlobClient - client *appendBlobClient -} - -// NewAppendBlobClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options. -func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*AppendBlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &AppendBlobClient{ - client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewAppendBlobClientWithNoCredential creates an AppendBlobClient with the specified URL and options. -func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*AppendBlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - return &AppendBlobClient{ - client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewAppendBlobClientWithSharedKey creates an AppendBlobClient with the specified URL, shared key, and options. -func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*AppendBlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &AppendBlobClient{ - client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, - }, nil -} - -// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (ab *AppendBlobClient) WithSnapshot(snapshot string) (*AppendBlobClient, error) { - p, err := NewBlobURLParts(ab.URL()) - if err != nil { - return nil, err - } - - p.Snapshot = snapshot - endpoint := p.URL() - pipeline := ab.client.pl - - return &AppendBlobClient{ - client: newAppendBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: ab.sharedKey, - }, - }, nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (ab *AppendBlobClient) WithVersionID(versionID string) (*AppendBlobClient, error) { - p, err := NewBlobURLParts(ab.URL()) - if err != nil { - return nil, err - } - - p.VersionID = versionID - endpoint := p.URL() - pipeline := ab.client.pl - - return &AppendBlobClient{ - client: newAppendBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: ab.sharedKey, - }, - }, nil -} - -// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (ab *AppendBlobClient) Create(ctx context.Context, options *AppendBlobCreateOptions) (AppendBlobCreateResponse, error) { - appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() - - resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders, - leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - - return toAppendBlobCreateResponse(resp), handleError(err) -} - -// AppendBlock writes a stream to a new block of data to the end of the existing append blob. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. -func (ab *AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlobAppendBlockOptions) (AppendBlobAppendBlockResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return AppendBlobAppendBlockResponse{}, nil - } - - appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() - - resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) - - return toAppendBlobAppendBlockResponse(resp), handleError(err) -} - -// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. -func (ab *AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlobAppendBlockFromURLOptions) (AppendBlobAppendBlockFromURLResponse, error) { - appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() - - // content length should be 0 on * from URL. always. It's a 400 if it isn't. - resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, - leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - return toAppendBlobAppendBlockFromURLResponse(resp), handleError(err) -} - -// SealAppendBlob - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. -// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal -func (ab *AppendBlobClient) SealAppendBlob(ctx context.Context, options *AppendBlobSealOptions) (AppendBlobSealResponse, error) { - leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.format() - resp, err := ab.client.Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) - return toAppendBlobSealResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go deleted file mode 100644 index 9543d14f877..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go +++ /dev/null @@ -1,278 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. -type BlobClient struct { - client *blobClient - sharedKey *SharedKeyCredential -} - -// NewBlobClient creates a BlobClient object using the specified URL, Azure AD credential, and options. -func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewBlobClientWithNoCredential creates a BlobClient object using the specified URL and options. -func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - return &BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewBlobClientWithSharedKey creates a BlobClient object using the specified URL, shared key, and options. -func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &BlobClient{ - client: newBlobClient(blobURL, conn.Pipeline()), - sharedKey: cred, - }, nil -} - -// NewBlobClientFromConnectionString creates BlobClient from a connection String -//nolint -func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*BlobClient, error) { - containerClient, err := NewContainerClientFromConnectionString(connectionString, containerName, options) - if err != nil { - return nil, err - } - return containerClient.NewBlobClient(blobName) -} - -// URL returns the URL endpoint used by the BlobClient object. -func (b *BlobClient) URL() string { - return b.client.endpoint -} - -// WithSnapshot creates a new BlobClient object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (b *BlobClient) WithSnapshot(snapshot string) (*BlobClient, error) { - p, err := NewBlobURLParts(b.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - pipeline := b.client.pl - return &BlobClient{ - client: newBlobClient(p.URL(), pipeline), - sharedKey: b.sharedKey, - }, nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (b *BlobClient) WithVersionID(versionID string) (*BlobClient, error) { - p, err := NewBlobURLParts(b.URL()) - if err != nil { - return nil, err - } - p.VersionID = versionID - - pipeline := b.client.pl - return &BlobClient{ - client: newBlobClient(p.URL(), pipeline), - sharedKey: b.sharedKey, - }, nil -} - -// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (b *BlobClient) Download(ctx context.Context, options *BlobDownloadOptions) (BlobDownloadResponse, error) { - o, lease, cpk, accessConditions := options.format() - dr, err := b.client.Download(ctx, o, lease, cpk, accessConditions) - if err != nil { - return BlobDownloadResponse{}, handleError(err) - } - - offset := int64(0) - count := int64(CountToEnd) - - if options != nil && options.Offset != nil { - offset = *options.Offset - } - - if options != nil && options.Count != nil { - count = *options.Count - } - - eTag := "" - if dr.ETag != nil { - eTag = *dr.ETag - } - return BlobDownloadResponse{ - b: b, - blobClientDownloadResponse: dr, - ctx: ctx, - getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: eTag}, - ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), - }, err -} - -// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (b *BlobClient) Delete(ctx context.Context, o *BlobDeleteOptions) (BlobDeleteResponse, error) { - basics, leaseInfo, accessConditions := o.format() - resp, err := b.client.Delete(ctx, basics, leaseInfo, accessConditions) - - return toBlobDeleteResponse(resp), handleError(err) -} - -// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. -func (b *BlobClient) Undelete(ctx context.Context, o *BlobUndeleteOptions) (BlobUndeleteResponse, error) { - undeleteOptions := o.format() - resp, err := b.client.Undelete(ctx, undeleteOptions) - - return toBlobUndeleteResponse(resp), handleError(err) -} - -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. -func (b *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobSetTierOptions) (BlobSetTierResponse, error) { - basics, lease, accessConditions := options.format() - resp, err := b.client.SetTier(ctx, tier, basics, lease, accessConditions) - - return toBlobSetTierResponse(resp), handleError(err) -} - -// GetProperties returns the blob's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (b *BlobClient) GetProperties(ctx context.Context, options *BlobGetPropertiesOptions) (BlobGetPropertiesResponse, error) { - basics, lease, cpk, access := options.format() - resp, err := b.client.GetProperties(ctx, basics, lease, cpk, access) - - return toGetBlobPropertiesResponse(resp), handleError(err) -} - -// SetHTTPHeaders changes a blob's HTTP headers. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (b *BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *BlobSetHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) { - basics, lease, access := options.format() - resp, err := b.client.SetHTTPHeaders(ctx, basics, &blobHttpHeaders, lease, access) - - return toBlobSetHTTPHeadersResponse(resp), handleError(err) -} - -// SetMetadata changes a blob's metadata. -// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (b *BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *BlobSetMetadataOptions) (BlobSetMetadataResponse, error) { - basics := blobClientSetMetadataOptions{ - Metadata: metadata, - } - lease, cpk, cpkScope, access := options.format() - resp, err := b.client.SetMetadata(ctx, &basics, lease, cpk, cpkScope, access) - - return toBlobSetMetadataResponse(resp), handleError(err) -} - -// CreateSnapshot creates a read-only snapshot of a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (b *BlobClient) CreateSnapshot(ctx context.Context, options *BlobCreateSnapshotOptions) (BlobCreateSnapshotResponse, error) { - // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter - // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this - // performance hit. - basics, cpk, cpkScope, access, lease := options.format() - resp, err := b.client.CreateSnapshot(ctx, basics, cpk, cpkScope, access, lease) - - return toBlobCreateSnapshotResponse(resp), handleError(err) -} - -// StartCopyFromURL copies the data at the source URL to a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (b *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobStartCopyOptions) (BlobStartCopyFromURLResponse, error) { - basics, srcAccess, destAccess, lease := options.format() - resp, err := b.client.StartCopyFromURL(ctx, copySource, basics, srcAccess, destAccess, lease) - - return toBlobStartCopyFromURLResponse(resp), handleError(err) -} - -// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. -func (b *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobAbortCopyOptions) (BlobAbortCopyFromURLResponse, error) { - basics, lease := options.format() - resp, err := b.client.AbortCopyFromURL(ctx, copyID, basics, lease) - - return toBlobAbortCopyFromURLResponse(resp), handleError(err) -} - -// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. -// Each call to this operation replaces all existing tags attached to the blob. -// To remove all tags from the blob, call this operation with no tags set. -// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags -func (b *BlobClient) SetTags(ctx context.Context, options *BlobSetTagsOptions) (BlobSetTagsResponse, error) { - blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) - - return toBlobSetTagsResponse(resp), handleError(err) -} - -// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. -// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags -func (b *BlobClient) GetTags(ctx context.Context, options *BlobGetTagsOptions) (BlobGetTagsResponse, error) { - blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) - - return toBlobGetTagsResponse(resp), handleError(err) - -} - -// GetSASToken is a convenience method for generating a SAS token for the currently pointed at blob. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (b *BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) { - urlParts, _ := NewBlobURLParts(b.URL()) - - t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) - - if err != nil { - t = time.Time{} - } - - if b.sharedKey == nil { - return SASQueryParameters{}, errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") - } - - return BlobSASSignatureValues{ - ContainerName: urlParts.ContainerName, - BlobName: urlParts.BlobName, - SnapshotTime: t, - Version: SASVersion, - - Permissions: permissions.String(), - - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), - }.NewSASQueryParameters(b.sharedKey) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go deleted file mode 100644 index a9273dfb62c..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go +++ /dev/null @@ -1,98 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -// BlobLeaseClient represents lease client on blob -type BlobLeaseClient struct { - BlobClient - leaseID *string -} - -// NewBlobLeaseClient is constructor for BlobLeaseClient -func (b *BlobClient) NewBlobLeaseClient(leaseID *string) (*BlobLeaseClient, error) { - if leaseID == nil { - generatedUuid, err := uuid.New() - if err != nil { - return nil, err - } - leaseID = to.Ptr(generatedUuid.String()) - } - return &BlobLeaseClient{ - BlobClient: *b, - leaseID: leaseID, - }, nil -} - -// AcquireLease acquires a lease on the blob for write and delete operations. -//The lease Duration must be between 15 and 60 seconds, or infinite (-1). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *BlobAcquireLeaseOptions) (BlobAcquireLeaseResponse, error) { - blobAcquireLeaseOptions, modifiedAccessConditions := options.format() - blobAcquireLeaseOptions.ProposedLeaseID = blc.leaseID - - resp, err := blc.client.AcquireLease(ctx, &blobAcquireLeaseOptions, modifiedAccessConditions) - return toBlobAcquireLeaseResponse(resp), handleError(err) -} - -// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) -// constant to break a fixed-Duration lease when it expires or an infinite lease immediately. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BlobBreakLeaseOptions) (BlobBreakLeaseResponse, error) { - blobBreakLeaseOptions, modifiedAccessConditions := options.format() - resp, err := blc.client.BreakLease(ctx, blobBreakLeaseOptions, modifiedAccessConditions) - return toBlobBreakLeaseResponse(resp), handleError(err) -} - -// ChangeLease changes the blob's lease ID. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *BlobChangeLeaseOptions) (BlobChangeLeaseResponse, error) { - if blc.leaseID == nil { - return BlobChangeLeaseResponse{}, errors.New("leaseID cannot be nil") - } - proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() - if err != nil { - return BlobChangeLeaseResponse{}, err - } - resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) - - // If lease has been changed successfully, set the leaseID in client - if err == nil { - blc.leaseID = proposedLeaseID - } - - return toBlobChangeLeaseResponse(resp), handleError(err) -} - -// RenewLease renews the blob's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *BlobRenewLeaseOptions) (BlobRenewLeaseResponse, error) { - if blc.leaseID == nil { - return BlobRenewLeaseResponse{}, errors.New("leaseID cannot be nil") - } - renewLeaseBlobOptions, modifiedAccessConditions := options.format() - resp, err := blc.client.RenewLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) - return toBlobRenewLeaseResponse(resp), handleError(err) -} - -// ReleaseLease releases the blob's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLeaseBlobOptions) (BlobReleaseLeaseResponse, error) { - if blc.leaseID == nil { - return BlobReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") - } - renewLeaseBlobOptions, modifiedAccessConditions := options.format() - resp, err := blc.client.ReleaseLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) - return toBlobReleaseLeaseResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go deleted file mode 100644 index b080128c815..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go +++ /dev/null @@ -1,201 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "io" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" -) - -// BlockBlobClient defines a set of operations applicable to block blobs. -type BlockBlobClient struct { - BlobClient - client *blockBlobClient -} - -// NewBlockBlobClient creates a BlockBlobClient object using the specified URL, Azure AD credential, and options. -func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlockBlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - }, - }, nil -} - -// NewBlockBlobClientWithNoCredential creates a BlockBlobClient object using the specified URL and options. -func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlockBlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - }, - }, nil -} - -// NewBlockBlobClientWithSharedKey creates a BlockBlobClient object using the specified URL, shared key, and options. -func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlockBlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - sharedKey: cred, - }, - }, nil -} - -// WithSnapshot creates a new BlockBlobClient object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (bb *BlockBlobClient) WithSnapshot(snapshot string) (*BlockBlobClient, error) { - p, err := NewBlobURLParts(bb.URL()) - if err != nil { - return nil, err - } - - p.Snapshot = snapshot - endpoint := p.URL() - bClient := newBlobClient(endpoint, bb.client.pl) - - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - sharedKey: bb.sharedKey, - }, - }, nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (bb *BlockBlobClient) WithVersionID(versionID string) (*BlockBlobClient, error) { - p, err := NewBlobURLParts(bb.URL()) - if err != nil { - return nil, err - } - - p.VersionID = versionID - endpoint := p.URL() - bClient := newBlobClient(endpoint, bb.client.pl) - - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - sharedKey: bb.sharedKey, - }, - }, nil -} - -// Upload creates a new block blob or overwrites an existing block blob. -// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not -// supported with Upload; the content of the existing blob is overwritten with the new content. To -// perform a partial update of a block blob, use StageBlock and CommitBlockList. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (bb *BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *BlockBlobUploadOptions) (BlockBlobUploadResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return BlockBlobUploadResponse{}, err - } - - basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() - - resp, err := bb.client.Upload(ctx, count, body, basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) - - return toBlockBlobUploadResponse(resp), handleError(err) -} - -// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. -func (bb *BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, - options *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return BlockBlobStageBlockResponse{}, err - } - - stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() - resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) - - return toBlockBlobStageBlockResponse(resp), handleError(err) -} - -// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// If count is CountToEnd (0), then data is read from specified offset to the end. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. -func (bb *BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, - contentLength int64, options *BlockBlobStageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) { - - stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() - - resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, - cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) - - return toBlockBlobStageBlockFromURLResponse(resp), handleError(err) -} - -// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. -// In order to be written as part of a blob, a block must have been successfully written -// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob -// by uploading only those blocks that have changed, then committing the new and existing -// blocks together. Any blocks not specified in the block list and permanently deleted. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. -func (bb *BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) { - // this is a code smell in the generated code - blockIds := make([]*string, len(base64BlockIDs)) - for k, v := range base64BlockIDs { - blockIds[k] = to.Ptr(v) - } - - blockLookupList := BlockLookupList{Latest: blockIds} - commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess := options.format() - - resp, err := bb.client.CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) - - return toBlockBlobCommitBlockListResponse(resp), handleError(err) -} - -// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. -func (bb *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobGetBlockListOptions) (BlockBlobGetBlockListResponse, error) { - o, lac, mac := options.format() - - resp, err := bb.client.GetBlockList(ctx, listType, o, lac, mac) - - return toBlockBlobGetBlockListResponse(resp), handleError(err) -} - -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. -func (bb *BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *BlockBlobCopyFromURLOptions) (BlockBlobCopyFromURLResponse, error) { - copyOptions, smac, mac, lac := options.format() - resp, err := bb.BlobClient.client.CopyFromURL(ctx, source, copyOptions, smac, mac, lac) - - return toBlockBlobCopyFromURLResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go deleted file mode 100644 index 2c23b8f4ed8..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go +++ /dev/null @@ -1,88 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "fmt" - "strings" -) - -var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + - "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + - "AccountKey=;EndpointSuffix=core.windows.net'") - -// convertConnStrToMap converts a connection string (in format key1=value1;key2=value2;key3=value3;) into a map of key-value pairs -func convertConnStrToMap(connStr string) (map[string]string, error) { - ret := make(map[string]string) - connStr = strings.TrimRight(connStr, ";") - - splitString := strings.Split(connStr, ";") - if len(splitString) == 0 { - return ret, errConnectionString - } - for _, stringPart := range splitString { - parts := strings.SplitN(stringPart, "=", 2) - if len(parts) != 2 { - return ret, errConnectionString - } - ret[parts[0]] = parts[1] - } - return ret, nil -} - -// parseConnectionString parses a connection string into a service URL and a SharedKeyCredential or a service url with the -// SharedAccessSignature combined. -func parseConnectionString(connectionString string) (string, *SharedKeyCredential, error) { - var serviceURL string - var cred *SharedKeyCredential - - defaultScheme := "https" - defaultSuffix := "core.windows.net" - - connStrMap, err := convertConnStrToMap(connectionString) - if err != nil { - return "", nil, err - } - - accountName, ok := connStrMap["AccountName"] - if !ok { - return "", nil, errConnectionString - } - accountKey, ok := connStrMap["AccountKey"] - if !ok { - sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] - if !ok { - return "", nil, errConnectionString - } - return fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), nil, nil - } - - protocol, ok := connStrMap["DefaultEndpointsProtocol"] - if !ok { - protocol = defaultScheme - } - - suffix, ok := connStrMap["EndpointSuffix"] - if !ok { - suffix = defaultSuffix - } - - blobEndpoint, ok := connStrMap["BlobEndpoint"] - if ok { - cred, err = NewSharedKeyCredential(accountName, accountKey) - return blobEndpoint, cred, err - } - serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) - - cred, err = NewSharedKeyCredential(accountName, accountKey) - if err != nil { - return "", nil, err - } - - return serviceURL, cred, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go deleted file mode 100644 index 12c4a18dfd1..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go +++ /dev/null @@ -1,253 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs. -type ContainerClient struct { - client *containerClient - sharedKey *SharedKeyCredential -} - -// URL returns the URL endpoint used by the ContainerClient object. -func (c *ContainerClient) URL() string { - return c.client.endpoint -} - -// NewContainerClient creates a ContainerClient object using the specified URL, Azure AD credential, and options. -func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*ContainerClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(containerURL, conOptions) - - return &ContainerClient{ - client: newContainerClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewContainerClientWithNoCredential creates a ContainerClient object using the specified URL and options. -func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (*ContainerClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(containerURL, conOptions) - - return &ContainerClient{ - client: newContainerClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewContainerClientWithSharedKey creates a ContainerClient object using the specified URL, shared key, and options. -func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*ContainerClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(containerURL, conOptions) - - return &ContainerClient{ - client: newContainerClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, nil -} - -// NewContainerClientFromConnectionString creates a ContainerClient object using connection string of an account -func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*ContainerClient, error) { - svcClient, err := NewServiceClientFromConnectionString(connectionString, options) - if err != nil { - return nil, err - } - return svcClient.NewContainerClient(containerName) -} - -// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of -// ContainerClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's -// NewBlobClient method. -func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, nil -} - -// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of -// ContainerClient's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's -// NewAppendBlobClient method. -func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &AppendBlobClient{ - BlobClient: BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, - client: newAppendBlobClient(blobURL, c.client.pl), - }, nil -} - -// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of -// ContainerClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's -// NewBlockBlobClient method. -func (c *ContainerClient) NewBlockBlobClient(blobName string) (*BlockBlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &BlockBlobClient{ - BlobClient: BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, - client: newBlockBlobClient(blobURL, c.client.pl), - }, nil -} - -// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of ContainerClient's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's -// NewPageBlobClient method. -func (c *ContainerClient) NewPageBlobClient(blobName string) (*PageBlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &PageBlobClient{ - BlobClient: BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, - client: newPageBlobClient(blobURL, c.client.pl), - }, nil -} - -// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. -func (c *ContainerClient) Create(ctx context.Context, options *ContainerCreateOptions) (ContainerCreateResponse, error) { - basics, cpkInfo := options.format() - resp, err := c.client.Create(ctx, basics, cpkInfo) - - return toContainerCreateResponse(resp), handleError(err) -} - -// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. -func (c *ContainerClient) Delete(ctx context.Context, o *ContainerDeleteOptions) (ContainerDeleteResponse, error) { - basics, leaseInfo, accessConditions := o.format() - resp, err := c.client.Delete(ctx, basics, leaseInfo, accessConditions) - - return toContainerDeleteResponse(resp), handleError(err) -} - -// GetProperties returns the container's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. -func (c *ContainerClient) GetProperties(ctx context.Context, o *ContainerGetPropertiesOptions) (ContainerGetPropertiesResponse, error) { - // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. - // This allows us to not expose a GetProperties method at all simplifying the API. - // The optionals are nil, like they were in track 1.5 - options, leaseAccess := o.format() - resp, err := c.client.GetProperties(ctx, options, leaseAccess) - - return toContainerGetPropertiesResponse(resp), handleError(err) -} - -// SetMetadata sets the container's metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. -func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) { - metadataOptions, lac, mac := o.format() - resp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac) - - return toContainerSetMetadataResponse(resp), handleError(err) -} - -// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. -func (c *ContainerClient) GetAccessPolicy(ctx context.Context, o *ContainerGetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) { - options, ac := o.format() - resp, err := c.client.GetAccessPolicy(ctx, options, ac) - - return toContainerGetAccessPolicyResponse(resp), handleError(err) -} - -// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. -func (c *ContainerClient) SetAccessPolicy(ctx context.Context, o *ContainerSetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) { - accessPolicy, mac, lac := o.format() - resp, err := c.client.SetAccessPolicy(ctx, accessPolicy, mac, lac) - - return toContainerSetAccessPolicyResponse(resp), handleError(err) -} - -// ListBlobsFlat returns a pager for blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c *ContainerClient) ListBlobsFlat(o *ContainerListBlobsFlatOptions) *ContainerListBlobFlatPager { - listOptions := o.format() - pager := c.client.ListBlobFlatSegment(listOptions) - - // override the advancer - pager.advancer = func(ctx context.Context, response containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { - listOptions.Marker = response.NextMarker - return c.client.listBlobFlatSegmentCreateRequest(ctx, listOptions) - } - - return toContainerListBlobFlatSegmentPager(pager) -} - -// ListBlobsHierarchy returns a channel of blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the -// previously-returned Marker) to get the next segment. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. -// AutoPagerBufferSize specifies the channel's buffer size. -// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) -func (c *ContainerClient) ListBlobsHierarchy(delimiter string, o *ContainerListBlobsHierarchyOptions) *ContainerListBlobHierarchyPager { - listOptions := o.format() - pager := c.client.ListBlobHierarchySegment(delimiter, listOptions) - - // override the advancer - pager.advancer = func(ctx context.Context, response containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { - listOptions.Marker = response.NextMarker - return c.client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, listOptions) - } - - return toContainerListBlobHierarchySegmentPager(pager) -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (c *ContainerClient) GetSASURL(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (string, error) { - if c.sharedKey == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") - } - - urlParts, err := NewBlobURLParts(c.URL()) - if err != nil { - return "", err - } - - // Containers do not have snapshots, nor versions. - urlParts.SAS, err = BlobSASSignatureValues{ - ContainerName: urlParts.ContainerName, - Permissions: permissions.String(), - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), - }.NewSASQueryParameters(c.sharedKey) - - return urlParts.URL(), err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go deleted file mode 100644 index 395a72a89aa..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go +++ /dev/null @@ -1,102 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -//ContainerLeaseClient represents lease client of container -type ContainerLeaseClient struct { - ContainerClient - leaseID *string -} - -// NewContainerLeaseClient is constructor of ContainerLeaseClient -func (c *ContainerClient) NewContainerLeaseClient(leaseID *string) (*ContainerLeaseClient, error) { - if leaseID == nil { - generatedUuid, err := uuid.New() - if err != nil { - return nil, err - } - leaseID = to.Ptr(generatedUuid.String()) - } - return &ContainerLeaseClient{ - ContainerClient: *c, - leaseID: leaseID, - }, nil -} - -// AcquireLease acquires a lease on the container for delete operations. The lease Duration must be between 15 to 60 seconds, or infinite (-1). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *ContainerAcquireLeaseOptions) (ContainerAcquireLeaseResponse, error) { - containerAcquireLeaseOptions, modifiedAccessConditions := options.format() - containerAcquireLeaseOptions.ProposedLeaseID = clc.leaseID - - resp, err := clc.client.AcquireLease(ctx, &containerAcquireLeaseOptions, modifiedAccessConditions) - if err == nil && resp.LeaseID != nil { - clc.leaseID = resp.LeaseID - } - return toContainerAcquireLeaseResponse(resp), handleError(err) -} - -// BreakLease breaks the container's previously-acquired lease (if it exists). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *ContainerBreakLeaseOptions) (ContainerBreakLeaseResponse, error) { - containerBreakLeaseOptions, modifiedAccessConditions := options.format() - resp, err := clc.client.BreakLease(ctx, containerBreakLeaseOptions, modifiedAccessConditions) - return toContainerBreakLeaseResponse(resp), handleError(err) -} - -// ChangeLease changes the container's lease ID. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ContainerChangeLeaseOptions) (ContainerChangeLeaseResponse, error) { - if clc.leaseID == nil { - return ContainerChangeLeaseResponse{}, errors.New("leaseID cannot be nil") - } - - proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() - if err != nil { - return ContainerChangeLeaseResponse{}, err - } - - resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) - if err == nil && resp.LeaseID != nil { - clc.leaseID = resp.LeaseID - } - return toContainerChangeLeaseResponse(resp), handleError(err) -} - -// ReleaseLease releases the container's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ContainerReleaseLeaseOptions) (ContainerReleaseLeaseResponse, error) { - if clc.leaseID == nil { - return ContainerReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") - } - containerReleaseLeaseOptions, modifiedAccessConditions := options.format() - resp, err := clc.client.ReleaseLease(ctx, *clc.leaseID, containerReleaseLeaseOptions, modifiedAccessConditions) - - return toContainerReleaseLeaseResponse(resp), handleError(err) -} - -// RenewLease renews the container's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *ContainerRenewLeaseOptions) (ContainerRenewLeaseResponse, error) { - if clc.leaseID == nil { - return ContainerRenewLeaseResponse{}, errors.New("leaseID cannot be nil") - } - renewLeaseBlobOptions, modifiedAccessConditions := options.format() - resp, err := clc.client.RenewLease(ctx, *clc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) - if err == nil && resp.LeaseID != nil { - clc.leaseID = resp.LeaseID - } - return toContainerRenewLeaseResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go deleted file mode 100644 index 507993b9e5d..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go +++ /dev/null @@ -1,261 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "io" - "net/url" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// PageBlobClient represents a client to an Azure Storage page blob; -type PageBlobClient struct { - BlobClient - client *pageBlobClient -} - -// NewPageBlobClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*PageBlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &PageBlobClient{ - client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewPageBlobClientWithNoCredential creates a ServiceClient object using the specified URL and options. -// Example of serviceURL: https://.blob.core.windows.net? -func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*PageBlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - return &PageBlobClient{ - client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewPageBlobClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*PageBlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &PageBlobClient{ - client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, - }, nil -} - -// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (pb *PageBlobClient) WithSnapshot(snapshot string) (*PageBlobClient, error) { - p, err := NewBlobURLParts(pb.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - endpoint := p.URL() - pipeline := pb.client.pl - return &PageBlobClient{ - client: newPageBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: pb.sharedKey, - }, - }, nil -} - -// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the version returning a URL to the base blob. -func (pb *PageBlobClient) WithVersionID(versionID string) (*PageBlobClient, error) { - p, err := NewBlobURLParts(pb.URL()) - if err != nil { - return nil, err - } - - p.VersionID = versionID - endpoint := p.URL() - - pipeline := pb.client.pl - return &PageBlobClient{ - client: newPageBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: pb.sharedKey, - }, - }, nil -} - -// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (pb *PageBlobClient) Create(ctx context.Context, size int64, o *PageBlobCreateOptions) (PageBlobCreateResponse, error) { - createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() - - resp, err := pb.client.Create(ctx, 0, size, createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - - return toPageBlobCreateResponse(resp), handleError(err) -} - -// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *PageBlobUploadPagesOptions) (PageBlobUploadPagesResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - - if err != nil { - return PageBlobUploadPagesResponse{}, err - } - - uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() - - resp, err := pb.client.UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, - cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - - return toPageBlobUploadPagesResponse(resp), handleError(err) -} - -// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. -// The sourceOffset specifies the start offset of source data to copy from. -// The destOffset specifies the start offset of data in page blob will be written to. -// The count must be a multiple of 512 bytes. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. -func (pb *PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, - options *PageBlobUploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) { - - uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := options.format() - - resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0, - rangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, - sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - - return toPageBlobUploadPagesFromURLResponse(resp), handleError(err) -} - -// ClearPages frees the specified pages from the page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *PageBlobClearPagesOptions) (PageBlobClearPagesResponse, error) { - clearOptions := &pageBlobClientClearPagesOptions{ - Range: pageRange.format(), - } - - leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() - - resp, err := pb.client.ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, - cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - - return toPageBlobClearPagesResponse(resp), handleError(err) -} - -// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb *PageBlobClient) GetPageRanges(options *PageBlobGetPageRangesOptions) *PageBlobGetPageRangesPager { - getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions := options.format() - - pageBlobGetPageRangesPager := pb.client.GetPageRanges(getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) - - // Fixing Advancer - pageBlobGetPageRangesPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesResponse) (*policy.Request, error) { - getPageRangesOptions.Marker = response.NextMarker - req, err := pb.client.getPageRangesCreateRequest(ctx, getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return nil, handleError(err) - } - queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) - if err != nil { - return nil, handleError(err) - } - req.Raw().URL.RawQuery = queryValues.Encode() - return req, nil - } - - return toPageBlobGetPageRangesPager(pageBlobGetPageRangesPager) -} - -// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb *PageBlobClient) GetPageRangesDiff(options *PageBlobGetPageRangesDiffOptions) *PageBlobGetPageRangesDiffPager { - getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions := options.format() - - getPageRangesDiffPager := pb.client.GetPageRangesDiff(getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) - - // Fixing Advancer - getPageRangesDiffPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { - getPageRangesDiffOptions.Marker = response.NextMarker - req, err := pb.client.getPageRangesDiffCreateRequest(ctx, getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return nil, handleError(err) - } - queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) - if err != nil { - return nil, handleError(err) - } - req.Raw().URL.RawQuery = queryValues.Encode() - return req, nil - } - - return toPageBlobGetPageRangesDiffPager(getPageRangesDiffPager) -} - -// Resize resizes the page blob to the specified size (which must be a multiple of 512). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb *PageBlobClient) Resize(ctx context.Context, size int64, options *PageBlobResizeOptions) (PageBlobResizeResponse, error) { - resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() - - resp, err := pb.client.Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - - return toPageBlobResizeResponse(resp), handleError(err) -} - -// UpdateSequenceNumber sets the page blob's sequence number. -func (pb *PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *PageBlobUpdateSequenceNumberOptions) (PageBlobUpdateSequenceNumberResponse, error) { - actionType, updateOptions, lac, mac := options.format() - resp, err := pb.client.UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) - - return toPageBlobUpdateSequenceNumberResponse(resp), handleError(err) -} - -// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. -// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. -// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and -// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. -func (pb *PageBlobClient) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *PageBlobCopyIncrementalOptions) (PageBlobCopyIncrementalResponse, error) { - copySourceURL, err := url.Parse(copySource) - if err != nil { - return PageBlobCopyIncrementalResponse{}, err - } - - queryParams := copySourceURL.Query() - queryParams.Set("snapshot", prevSnapshot) - copySourceURL.RawQuery = queryParams.Encode() - - pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() - resp, err := pb.client.CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) - - return toPageBlobCopyIncrementalResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go deleted file mode 100644 index 3f987843904..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import "net/http" - -// ResponseError is a wrapper of error passed from service -type ResponseError interface { - Error() string - Unwrap() error - RawResponse() *http.Response - NonRetriable() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go deleted file mode 100644 index dda993d1c96..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -// GetHTTPHeaders returns the user-modifiable properties for this blob. -func (bgpr BlobGetPropertiesResponse) GetHTTPHeaders() BlobHTTPHeaders { - return BlobHTTPHeaders{ - BlobContentType: bgpr.ContentType, - BlobContentEncoding: bgpr.ContentEncoding, - BlobContentLanguage: bgpr.ContentLanguage, - BlobContentDisposition: bgpr.ContentDisposition, - BlobCacheControl: bgpr.CacheControl, - BlobContentMD5: bgpr.ContentMD5, - } -} - -/////////////////////////////////////////////////////////////////////////////// - -// GetHTTPHeaders returns the user-modifiable properties for this blob. -func (r BlobDownloadResponse) GetHTTPHeaders() BlobHTTPHeaders { - return BlobHTTPHeaders{ - BlobContentType: r.ContentType, - BlobContentEncoding: r.ContentEncoding, - BlobContentLanguage: r.ContentLanguage, - BlobContentDisposition: r.ContentDisposition, - BlobCacheControl: r.CacheControl, - BlobContentMD5: r.ContentMD5, - } -} - -/////////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go deleted file mode 100644 index b4104def583..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go +++ /dev/null @@ -1,243 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" -) - -// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas -type AccountSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() - ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() -} - -// Sign uses an account's shared key credential to sign this signature values to produce -// the proper SAS query parameters. -func (v AccountSASSignatureValues) Sign(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { - return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") - } - if v.Version == "" { - v.Version = SASVersion - } - perms := &AccountSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - - startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) - - stringToSign := strings.Join([]string{ - sharedKeyCredential.AccountName(), - v.Permissions, - v.Services, - v.ResourceTypes, - startTime, - expiryTime, - v.IPRange.String(), - string(v.Protocol), - v.Version, - ""}, // That right, the account SAS requires a terminating extra newline - "\n") - - signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) - if err != nil { - return SASQueryParameters{}, err - } - p := SASQueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - - // Account-specific SAS parameters - services: v.Services, - resourceTypes: v.ResourceTypes, - - // Calculated SAS signature - signature: signature, - } - - return p, nil -} - -// AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. -type AccountSASPermissions struct { - Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool -} - -// String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Permissions field. -func (p AccountSASPermissions) String() string { - var buffer bytes.Buffer - if p.Read { - buffer.WriteRune('r') - } - if p.Write { - buffer.WriteRune('w') - } - if p.Delete { - buffer.WriteRune('d') - } - if p.DeletePreviousVersion { - buffer.WriteRune('x') - } - if p.List { - buffer.WriteRune('l') - } - if p.Add { - buffer.WriteRune('a') - } - if p.Create { - buffer.WriteRune('c') - } - if p.Update { - buffer.WriteRune('u') - } - if p.Process { - buffer.WriteRune('p') - } - if p.Tag { - buffer.WriteRune('t') - } - if p.FilterByTags { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASPermissions's fields from a string. -func (p *AccountSASPermissions) Parse(s string) error { - *p = AccountSASPermissions{} // Clear out the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'l': - p.List = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'u': - p.Update = true - case 'p': - p.Process = true - case 'x': - p.Process = true - case 't': - p.Tag = true - case 'f': - p.FilterByTags = true - default: - return fmt.Errorf("invalid permission character: '%v'", r) - } - } - return nil -} - -// AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. -type AccountSASServices struct { - Blob, Queue, File bool -} - -// String produces the SAS services string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Services field. -func (s AccountSASServices) String() string { - var buffer bytes.Buffer - if s.Blob { - buffer.WriteRune('b') - } - if s.Queue { - buffer.WriteRune('q') - } - if s.File { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASServices' fields from a string. -func (s *AccountSASServices) Parse(str string) error { - *s = AccountSASServices{} // Clear out the flags - for _, r := range str { - switch r { - case 'b': - s.Blob = true - case 'q': - s.Queue = true - case 'f': - s.File = true - default: - return fmt.Errorf("invalid service character: '%v'", r) - } - } - return nil -} - -// AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. -type AccountSASResourceTypes struct { - Service, Container, Object bool -} - -// String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's ResourceTypes field. -func (rt AccountSASResourceTypes) String() string { - var buffer bytes.Buffer - if rt.Service { - buffer.WriteRune('s') - } - if rt.Container { - buffer.WriteRune('c') - } - if rt.Object { - buffer.WriteRune('o') - } - return buffer.String() -} - -// Parse initializes the AccountSASResourceType's fields from a string. -func (rt *AccountSASResourceTypes) Parse(s string) error { - *rt = AccountSASResourceTypes{} // Clear out the flags - for _, r := range s { - switch r { - case 's': - rt.Service = true - case 'c': - rt.Container = true - case 'o': - rt.Object = true - default: - return fmt.Errorf("invalid resource type: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go deleted file mode 100644 index 7efbec9b8cf..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go +++ /dev/null @@ -1,427 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "net" - "net/url" - "strings" - "time" -) - -// SASProtocol indicates the http/https. -type SASProtocol string - -const ( - // SASProtocolHTTPS can be specified for a SAS protocol - SASProtocolHTTPS SASProtocol = "https" - - // SASProtocolHTTPSandHTTP can be specified for a SAS protocol - //SASProtocolHTTPSandHTTP SASProtocol = "https,http" -) - -// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a -// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). -func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { - ss := "" - if !startTime.IsZero() { - ss = formatSASTimeWithDefaultFormat(&startTime) - } - se := "" - if !expiryTime.IsZero() { - se = formatSASTimeWithDefaultFormat(&expiryTime) - } - sh := "" - if !snapshotTime.IsZero() { - sh = snapshotTime.Format(SnapshotTimeFormat) - } - return ss, se, sh -} - -// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. -const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 -var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. - -// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". -func formatSASTimeWithDefaultFormat(t *time.Time) string { - return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. -func formatSASTime(t *time.Time, format string) string { - if format != "" { - return t.Format(format) - } - return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// parseSASTimeString try to parse sas time string. -func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { - for _, sasTimeFormat := range SASTimeFormats { - t, err = time.Parse(sasTimeFormat, val) - if err == nil { - timeFormat = sasTimeFormat - break - } - } - - if err != nil { - err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") - } - - return -} - -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas - -// SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. -// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components -// to a query parameter map by calling AddToValues(). -// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. -// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). -type SASQueryParameters struct { - // All members are immutable or values so copies of this struct are goroutine-safe. - version string `param:"sv"` - services string `param:"ss"` - resourceTypes string `param:"srt"` - protocol SASProtocol `param:"spr"` - startTime time.Time `param:"st"` - expiryTime time.Time `param:"se"` - snapshotTime time.Time `param:"snapshot"` - ipRange IPRange `param:"sip"` - identifier string `param:"si"` - resource string `param:"sr"` - permissions string `param:"sp"` - signature string `param:"sig"` - cacheControl string `param:"rscc"` - contentDisposition string `param:"rscd"` - contentEncoding string `param:"rsce"` - contentLanguage string `param:"rscl"` - contentType string `param:"rsct"` - signedOid string `param:"skoid"` - signedTid string `param:"sktid"` - signedStart time.Time `param:"skt"` - signedService string `param:"sks"` - signedExpiry time.Time `param:"ske"` - signedVersion string `param:"skv"` - signedDirectoryDepth string `param:"sdd"` - preauthorizedAgentObjectId string `param:"saoid"` - agentObjectId string `param:"suoid"` - correlationId string `param:"scid"` - // private member used for startTime and expiryTime formatting. - stTimeFormat string - seTimeFormat string -} - -// PreauthorizedAgentObjectId returns preauthorizedAgentObjectId -func (p *SASQueryParameters) PreauthorizedAgentObjectId() string { - return p.preauthorizedAgentObjectId -} - -// AgentObjectId returns agentObjectId -func (p *SASQueryParameters) AgentObjectId() string { - return p.agentObjectId -} - -// SignedCorrelationId returns signedCorrelationId -func (p *SASQueryParameters) SignedCorrelationId() string { - return p.correlationId -} - -// SignedTid returns aignedTid -func (p *SASQueryParameters) SignedTid() string { - return p.signedTid -} - -// SignedStart returns signedStart -func (p *SASQueryParameters) SignedStart() time.Time { - return p.signedStart -} - -// SignedExpiry returns signedExpiry -func (p *SASQueryParameters) SignedExpiry() time.Time { - return p.signedExpiry -} - -// SignedService returns signedService -func (p *SASQueryParameters) SignedService() string { - return p.signedService -} - -// SignedVersion returns signedVersion -func (p *SASQueryParameters) SignedVersion() string { - return p.signedVersion -} - -// SnapshotTime returns snapshotTime -func (p *SASQueryParameters) SnapshotTime() time.Time { - return p.snapshotTime -} - -// Version returns version -func (p *SASQueryParameters) Version() string { - return p.version -} - -// Services returns services -func (p *SASQueryParameters) Services() string { - return p.services -} - -// ResourceTypes returns resourceTypes -func (p *SASQueryParameters) ResourceTypes() string { - return p.resourceTypes -} - -// Protocol returns protocol -func (p *SASQueryParameters) Protocol() SASProtocol { - return p.protocol -} - -// StartTime returns startTime -func (p *SASQueryParameters) StartTime() time.Time { - return p.startTime -} - -// ExpiryTime returns expiryTime -func (p *SASQueryParameters) ExpiryTime() time.Time { - return p.expiryTime -} - -// IPRange returns ipRange -func (p *SASQueryParameters) IPRange() IPRange { - return p.ipRange -} - -// Identifier returns identifier -func (p *SASQueryParameters) Identifier() string { - return p.identifier -} - -// Resource returns resource -func (p *SASQueryParameters) Resource() string { - return p.resource -} - -// Permissions returns permissions -func (p *SASQueryParameters) Permissions() string { - return p.permissions -} - -// Signature returns signature -func (p *SASQueryParameters) Signature() string { - return p.signature -} - -// CacheControl returns cacheControl -func (p *SASQueryParameters) CacheControl() string { - return p.cacheControl -} - -// ContentDisposition returns contentDisposition -func (p *SASQueryParameters) ContentDisposition() string { - return p.contentDisposition -} - -// ContentEncoding returns contentEncoding -func (p *SASQueryParameters) ContentEncoding() string { - return p.contentEncoding -} - -// ContentLanguage returns contentLanguage -func (p *SASQueryParameters) ContentLanguage() string { - return p.contentLanguage -} - -// ContentType returns sontentType -func (p *SASQueryParameters) ContentType() string { - return p.contentType -} - -// SignedDirectoryDepth returns signedDirectoryDepth -func (p *SASQueryParameters) SignedDirectoryDepth() string { - return p.signedDirectoryDepth -} - -// IPRange represents a SAS IP range's start IP and (optionally) end IP. -type IPRange struct { - Start net.IP // Not specified if length = 0 - End net.IP // Not specified if length = 0 -} - -// String returns a string representation of an IPRange. -func (ipr *IPRange) String() string { - if len(ipr.Start) == 0 { - return "" - } - start := ipr.Start.String() - if len(ipr.End) == 0 { - return start - } - return start + "-" + ipr.End.String() -} - -// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the -// query parameter map's passed-in values. If deleteSASParametersFromValues is true, -// all SAS-related query parameters are removed from the passed-in map. If -// deleteSASParametersFromValues is false, the map passed-in map is unaltered. -func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { - p := SASQueryParameters{} - for k, v := range values { - val := v[0] - isSASKey := true - switch strings.ToLower(k) { - case "sv": - p.version = val - case "ss": - p.services = val - case "srt": - p.resourceTypes = val - case "spr": - p.protocol = SASProtocol(val) - case "snapshot": - p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) - case "st": - p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) - case "se": - p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) - case "sip": - dashIndex := strings.Index(val, "-") - if dashIndex == -1 { - p.ipRange.Start = net.ParseIP(val) - } else { - p.ipRange.Start = net.ParseIP(val[:dashIndex]) - p.ipRange.End = net.ParseIP(val[dashIndex+1:]) - } - case "si": - p.identifier = val - case "sr": - p.resource = val - case "sp": - p.permissions = val - case "sig": - p.signature = val - case "rscc": - p.cacheControl = val - case "rscd": - p.contentDisposition = val - case "rsce": - p.contentEncoding = val - case "rscl": - p.contentLanguage = val - case "rsct": - p.contentType = val - case "skoid": - p.signedOid = val - case "sktid": - p.signedTid = val - case "skt": - p.signedStart, _ = time.Parse(SASTimeFormat, val) - case "ske": - p.signedExpiry, _ = time.Parse(SASTimeFormat, val) - case "sks": - p.signedService = val - case "skv": - p.signedVersion = val - case "sdd": - p.signedDirectoryDepth = val - case "saoid": - p.preauthorizedAgentObjectId = val - case "suoid": - p.agentObjectId = val - case "scid": - p.correlationId = val - default: - isSASKey = false // We didn't recognize the query parameter - } - if isSASKey && deleteSASParametersFromValues { - delete(values, k) - } - } - return p -} - -// AddToValues adds the SAS components to the specified query parameters map. -func (p *SASQueryParameters) addToValues(v url.Values) url.Values { - if p.version != "" { - v.Add("sv", p.version) - } - if p.services != "" { - v.Add("ss", p.services) - } - if p.resourceTypes != "" { - v.Add("srt", p.resourceTypes) - } - if p.protocol != "" { - v.Add("spr", string(p.protocol)) - } - if !p.startTime.IsZero() { - v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) - } - if !p.expiryTime.IsZero() { - v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) - } - if len(p.ipRange.Start) > 0 { - v.Add("sip", p.ipRange.String()) - } - if p.identifier != "" { - v.Add("si", p.identifier) - } - if p.resource != "" { - v.Add("sr", p.resource) - } - if p.permissions != "" { - v.Add("sp", p.permissions) - } - if p.signedOid != "" { - v.Add("skoid", p.signedOid) - v.Add("sktid", p.signedTid) - v.Add("skt", p.signedStart.Format(SASTimeFormat)) - v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) - v.Add("sks", p.signedService) - v.Add("skv", p.signedVersion) - } - if p.signature != "" { - v.Add("sig", p.signature) - } - if p.cacheControl != "" { - v.Add("rscc", p.cacheControl) - } - if p.contentDisposition != "" { - v.Add("rscd", p.contentDisposition) - } - if p.contentEncoding != "" { - v.Add("rsce", p.contentEncoding) - } - if p.contentLanguage != "" { - v.Add("rscl", p.contentLanguage) - } - if p.contentType != "" { - v.Add("rsct", p.contentType) - } - if p.signedDirectoryDepth != "" { - v.Add("sdd", p.signedDirectoryDepth) - } - if p.preauthorizedAgentObjectId != "" { - v.Add("saoid", p.preauthorizedAgentObjectId) - } - if p.agentObjectId != "" { - v.Add("suoid", p.agentObjectId) - } - if p.correlationId != "" { - v.Add("scid", p.correlationId) - } - return v -} - -// Encode encodes the SAS query parameters into URL encoded form sorted by key. -func (p *SASQueryParameters) Encode() string { - v := url.Values{} - p.addToValues(v) - return v.Encode() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go deleted file mode 100644 index e75dd10b31e..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go +++ /dev/null @@ -1,266 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - "net/url" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -//nolint -const ( - // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. - ContainerNameRoot = "$root" - - // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. - ContainerNameLogs = "$logs" -) - -// ServiceClient represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. -type ServiceClient struct { - client *serviceClient - sharedKey *SharedKeyCredential -} - -// URL returns the URL endpoint used by the ServiceClient object. -func (s ServiceClient) URL() string { - return s.client.endpoint -} - -// NewServiceClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*ServiceClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(serviceURL, conOptions) - - return &ServiceClient{ - client: newServiceClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewServiceClientWithNoCredential creates a ServiceClient object using the specified URL and options. -// Example of serviceURL: https://.blob.core.windows.net? -func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (*ServiceClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(serviceURL, conOptions) - - return &ServiceClient{ - client: newServiceClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewServiceClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewServiceClientWithSharedKey(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*ServiceClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(serviceURL, conOptions) - - return &ServiceClient{ - client: newServiceClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, nil -} - -// NewServiceClientFromConnectionString creates a service client from the given connection string. -//nolint -func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (*ServiceClient, error) { - endpoint, credential, err := parseConnectionString(connectionString) - if err != nil { - return nil, err - } - return NewServiceClientWithSharedKey(endpoint, credential, options) -} - -// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of -// ServiceClient's URL. The new ContainerClient uses the same request policy pipeline as the ServiceClient. -// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's -// NewContainerClient method. -func (s *ServiceClient) NewContainerClient(containerName string) (*ContainerClient, error) { - containerURL := appendToURLPath(s.client.endpoint, containerName) - return &ContainerClient{ - client: newContainerClient(containerURL, s.client.pl), - sharedKey: s.sharedKey, - }, nil -} - -// CreateContainer is a lifecycle method to creates a new container under the specified account. -// If the container with the same name already exists, a ResourceExistsError will be raised. -// This method returns a client with which to interact with the newly created container. -func (s *ServiceClient) CreateContainer(ctx context.Context, containerName string, options *ContainerCreateOptions) (ContainerCreateResponse, error) { - containerClient, err := s.NewContainerClient(containerName) - if err != nil { - return ContainerCreateResponse{}, err - } - containerCreateResp, err := containerClient.Create(ctx, options) - return containerCreateResp, err -} - -// DeleteContainer is a lifecycle method that marks the specified container for deletion. -// The container and any blobs contained within it are later deleted during garbage collection. -// If the container is not found, a ResourceNotFoundError will be raised. -func (s *ServiceClient) DeleteContainer(ctx context.Context, containerName string, options *ContainerDeleteOptions) (ContainerDeleteResponse, error) { - containerClient, _ := s.NewContainerClient(containerName) - containerDeleteResp, err := containerClient.Delete(ctx, options) - return containerDeleteResp, err -} - -// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) -func appendToURLPath(u string, name string) string { - // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" - // When you call url.Parse() this is what you'll get: - // Scheme: "https" - // Opaque: "" - // User: nil - // Host: "ms.com" - // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash - // RawPath: "" - // ForceQuery: false - // RawQuery: "k1=v1&k2=v2" - // Fragment: "f" - uri, _ := url.Parse(u) - - if len(uri.Path) == 0 || uri.Path[len(uri.Path)-1] != '/' { - uri.Path += "/" // Append "/" to end before appending name - } - uri.Path += name - return uri.String() -} - -// GetAccountInfo provides account level information -func (s *ServiceClient) GetAccountInfo(ctx context.Context, o *ServiceGetAccountInfoOptions) (ServiceGetAccountInfoResponse, error) { - getAccountInfoOptions := o.format() - resp, err := s.client.GetAccountInfo(ctx, getAccountInfoOptions) - return toServiceGetAccountInfoResponse(resp), handleError(err) -} - -// ListContainers operation returns a pager of the containers under the specified account. -// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. -func (s *ServiceClient) ListContainers(o *ListContainersOptions) *ServiceListContainersSegmentPager { - listOptions := o.format() - pager := s.client.ListContainersSegment(listOptions) - //TODO: .Err()? - //// override the generated advancer, which is incorrect - //if pager.Err() != nil { - // return pager - //} - - pager.advancer = func(ctx context.Context, response serviceClientListContainersSegmentResponse) (*policy.Request, error) { - if response.ListContainersSegmentResponse.NextMarker == nil { - return nil, handleError(errors.New("unexpected missing NextMarker")) - } - req, err := s.client.listContainersSegmentCreateRequest(ctx, listOptions) - if err != nil { - return nil, handleError(err) - } - queryValues, _ := url.ParseQuery(req.Raw().URL.RawQuery) - queryValues.Set("marker", *response.ListContainersSegmentResponse.NextMarker) - - req.Raw().URL.RawQuery = queryValues.Encode() - return req, nil - } - - return toServiceListContainersSegmentPager(*pager) -} - -// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -func (s *ServiceClient) GetProperties(ctx context.Context, o *ServiceGetPropertiesOptions) (ServiceGetPropertiesResponse, error) { - getPropertiesOptions := o.format() - resp, err := s.client.GetProperties(ctx, getPropertiesOptions) - - return toServiceGetPropertiesResponse(resp), handleError(err) -} - -// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. -// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. -func (s *ServiceClient) SetProperties(ctx context.Context, o *ServiceSetPropertiesOptions) (ServiceSetPropertiesResponse, error) { - properties, setPropertiesOptions := o.format() - resp, err := s.client.SetProperties(ctx, properties, setPropertiesOptions) - - return toServiceSetPropertiesResponse(resp), handleError(err) -} - -// GetStatistics Retrieves statistics related to replication for the Blob service. -// It is only available when read-access geo-redundant replication is enabled for the storage account. -// With geo-redundant replication, Azure Storage maintains your data durable -// in two locations. In both locations, Azure Storage constantly maintains -// multiple healthy replicas of your data. The location where you read, -// create, update, or delete data is the primary storage account location. -// The primary location exists in the region you choose at the time you -// create an account via the Azure Management Azure classic portal, for -// example, North Central US. The location to which your data is replicated -// is the secondary location. The secondary location is automatically -// determined based on the location of the primary; it is in a second data -// center that resides in the same region as the primary location. Read-only -// access is available from the secondary location, if read-access geo-redundant -// replication is enabled for your storage account. -func (s *ServiceClient) GetStatistics(ctx context.Context, o *ServiceGetStatisticsOptions) (ServiceGetStatisticsResponse, error) { - getStatisticsOptions := o.format() - resp, err := s.client.GetStatistics(ctx, getStatisticsOptions) - - return toServiceGetStatisticsResponse(resp), handleError(err) -} - -// CanGetAccountSASToken checks if shared key in ServiceClient is nil -func (s *ServiceClient) CanGetAccountSASToken() bool { - return s.sharedKey != nil -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -// This validity can be checked with CanGetAccountSASToken(). -func (s *ServiceClient) GetSASURL(resources AccountSASResourceTypes, permissions AccountSASPermissions, start time.Time, expiry time.Time) (string, error) { - if s.sharedKey == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") - } - - qps, err := AccountSASSignatureValues{ - Version: SASVersion, - Protocol: SASProtocolHTTPS, - Permissions: permissions.String(), - Services: "b", - ResourceTypes: resources.String(), - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), - }.Sign(s.sharedKey) - if err != nil { - return "", err - } - - endpoint := s.URL() - if !strings.HasSuffix(endpoint, "/") { - endpoint += "/" - } - endpoint += "?" + qps.Encode() - - return endpoint, nil -} - -// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. -// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. -// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags -// eg. "dog='germanshepherd' and penguin='emperorpenguin'" -// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" -func (s *ServiceClient) FindBlobsByTags(ctx context.Context, o *ServiceFilterBlobsOptions) (ServiceFilterBlobsResponse, error) { - // TODO: Use pager here? Missing support from zz_generated_pagers.go - serviceFilterBlobsOptions := o.pointer() - resp, err := s.client.FilterBlobs(ctx, serviceFilterBlobsOptions) - return toServiceFilterBlobsResponse(resp), err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go deleted file mode 100644 index 08c9c873090..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go +++ /dev/null @@ -1,236 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "net/http" - "sort" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// InternalError is an internal error type that all errors get wrapped in. -type InternalError struct { - cause error -} - -// Error checks if InternalError can be cast as StorageError -func (e *InternalError) Error() string { - if (errors.Is(e.cause, StorageError{})) { - return e.cause.Error() - } - - return fmt.Sprintf("===== INTERNAL ERROR =====\n%s", e.cause.Error()) -} - -// Is casts err into InternalError -func (e *InternalError) Is(err error) bool { - _, ok := err.(*InternalError) - - return ok -} - -// As casts target interface into InternalError -func (e *InternalError) As(target interface{}) bool { - nt, ok := target.(**InternalError) - - if ok { - *nt = e - return ok - } - - //goland:noinspection GoErrorsAs - return errors.As(e.cause, target) -} - -// StorageError is the internal struct that replaces the generated StorageError. -// TL;DR: This implements xml.Unmarshaler, and when the original StorageError is substituted, this unmarshaler kicks in. -// This handles the description and details. defunkifyStorageError handles the response, cause, and service code. -type StorageError struct { - response *http.Response - description string - - ErrorCode StorageErrorCode - details map[string]string -} - -func handleError(err error) error { - if err == nil { - return nil - } - var respErr *azcore.ResponseError - if errors.As(err, &respErr) { - return &InternalError{responseErrorToStorageError(respErr)} - } - - if err != nil { - return &InternalError{err} - } - - return nil -} - -// converts an *azcore.ResponseError to a *StorageError, or if that fails, a *InternalError -func responseErrorToStorageError(responseError *azcore.ResponseError) error { - var storageError StorageError - body, err := runtime.Payload(responseError.RawResponse) - if err != nil { - goto Default - } - if len(body) > 0 { - if err := xml.Unmarshal(body, &storageError); err != nil { - goto Default - } - } - - storageError.response = responseError.RawResponse - - storageError.ErrorCode = StorageErrorCode(responseError.RawResponse.Header.Get("x-ms-error-code")) - - if code, ok := storageError.details["Code"]; ok { - storageError.ErrorCode = StorageErrorCode(code) - delete(storageError.details, "Code") - } - - return &storageError - -Default: - return &InternalError{ - cause: responseError, - } -} - -// StatusCode returns service-error information. The caller may examine these values but should not modify any of them. -func (e *StorageError) StatusCode() int { - return e.response.StatusCode -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e StorageError) Error() string { - b := &bytes.Buffer{} - - if e.response != nil { - _, _ = fmt.Fprintf(b, "===== RESPONSE ERROR (ErrorCode=%s) =====\n", e.ErrorCode) - _, _ = fmt.Fprintf(b, "Description=%s, Details: ", e.description) - if len(e.details) == 0 { - b.WriteString("(none)\n") - } else { - b.WriteRune('\n') - keys := make([]string, 0, len(e.details)) - // Alphabetize the details - for k := range e.details { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - _, _ = fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) - } - } - // req := azcore.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request - // TODO: Come Here Mohit Adele - //writeRequestWithResponse(b, &azcore.Request{Request: e.response.Request}, e.response) - } - - return b.String() - ///azcore.writeRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) - // return e.ErrorNode.Error(b.String()) -} - -// Is checks if err can be cast as StorageError -func (e StorageError) Is(err error) bool { - _, ok := err.(StorageError) - _, ok2 := err.(*StorageError) - - return ok || ok2 -} - -// Response returns StorageError.response -func (e StorageError) Response() *http.Response { - return e.response -} - -//nolint -func writeRequestWithResponse(b *bytes.Buffer, request *policy.Request, response *http.Response) { - // Write the request into the buffer. - _, _ = fmt.Fprint(b, " "+request.Raw().Method+" "+request.Raw().URL.String()+"\n") - writeHeader(b, request.Raw().Header) - if response != nil { - _, _ = fmt.Fprintln(b, " --------------------------------------------------------------------------------") - _, _ = fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") - writeHeader(b, response.Header) - } -} - -// formatHeaders appends an HTTP request's or response's header into a Buffer. -//nolint -func writeHeader(b *bytes.Buffer, header map[string][]string) { - if len(header) == 0 { - b.WriteString(" (no headers)\n") - return - } - keys := make([]string, 0, len(header)) - // Alphabetize the headers - for k := range header { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - // Redact the value of any Authorization header to prevent security information from persisting in logs - value := interface{}("REDACTED") - if !strings.EqualFold(k, "Authorization") { - value = header[k] - } - _, _ = fmt.Fprintf(b, " %s: %+v\n", k, value) - } -} - -// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). -func (e *StorageError) Temporary() bool { - if e.response != nil { - if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { - return true - } - } - - return false -} - -// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. -//nolint -func (e *StorageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { - tokName := "" - var t xml.Token - for t, err = d.Token(); err == nil; t, err = d.Token() { - switch tt := t.(type) { - case xml.StartElement: - tokName = tt.Name.Local - case xml.EndElement: - tokName = "" - case xml.CharData: - switch tokName { - case "": - continue - case "Message": - e.description = string(tt) - default: - if e.details == nil { - e.details = map[string]string{} - } - e.details[tokName] = string(tt) - } - } - } - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go deleted file mode 100644 index 341858f1ad8..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go +++ /dev/null @@ -1,107 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "fmt" - "io" - "strconv" -) - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// Raw converts PageRange into primitive start, end integers of type int64 -func (pr *PageRange) Raw() (start, end int64) { - if pr.Start != nil { - start = *pr.Start - } - if pr.End != nil { - end = *pr.End - } - - return -} - -// HttpRange defines a range of bytes within an HTTP resource, starting at offset and -// ending at offset+count. A zero-value HttpRange indicates the entire resource. An HttpRange -// which has an offset but na zero value count indicates from the offset to the resource's end. -type HttpRange struct { - Offset int64 - Count int64 -} - -func NewHttpRange(offset, count int64) *HttpRange { - return &HttpRange{Offset: offset, Count: count} -} - -func (r *HttpRange) format() *string { - if r == nil || (r.Offset == 0 && r.Count == 0) { // Do common case first for performance - return nil // No specified range - } - endOffset := "" // if count == CountToEnd (0) - if r.Count > 0 { - endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) - } - dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) - return &dataRange -} - -func getSourceRange(offset, count *int64) *string { - if offset == nil && count == nil { - return nil - } - newOffset := int64(0) - newCount := int64(CountToEnd) - - if offset != nil { - newOffset = *offset - } - - if count != nil { - newCount = *count - } - - return (&HttpRange{Offset: newOffset, Count: newCount}).format() -} - -func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { - if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long - return 0, nil - } - - err := validateSeekableStreamAt0(body) - if err != nil { - return 0, err - } - - count, err := body.Seek(0, io.SeekEnd) - if err != nil { - return 0, errors.New("body stream must be seekable") - } - - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return 0, err - } - return count, nil -} - -// return an error if body is not a valid seekable stream at 0 -func validateSeekableStreamAt0(body io.ReadSeeker) error { - if body == nil { // nil body's are "logically" seekable to 0 - return nil - } - if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { - // Help detect programmer error - if err != nil { - return errors.New("body stream must be seekable") - } - return errors.New("body stream must be set to position 0") - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go deleted file mode 100644 index 93a2b1a7007..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -const ( - // ETagNone represents an empty entity tag. - ETagNone = "" - - // ETagAny matches any entity tag. - ETagAny = "*" -) - -// ContainerAccessConditions identifies container-specific access conditions which you optionally set. -type ContainerAccessConditions struct { - ModifiedAccessConditions *ModifiedAccessConditions - LeaseAccessConditions *LeaseAccessConditions -} - -func (ac *ContainerAccessConditions) format() (*ModifiedAccessConditions, *LeaseAccessConditions) { - if ac == nil { - return nil, nil - } - - return ac.ModifiedAccessConditions, ac.LeaseAccessConditions -} - -// BlobAccessConditions identifies blob-specific access conditions which you optionally set. -type BlobAccessConditions struct { - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (ac *BlobAccessConditions) format() (*LeaseAccessConditions, *ModifiedAccessConditions) { - if ac == nil { - return nil, nil - } - - return ac.LeaseAccessConditions, ac.ModifiedAccessConditions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go deleted file mode 100644 index 19c3fef66a9..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go +++ /dev/null @@ -1,184 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import "time" - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobCreateOptions provides set of configurations for Create Append Blob operation -type AppendBlobCreateOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - BlobAccessConditions *BlobAccessConditions - - HTTPHeaders *BlobHTTPHeaders - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - // Optional. Used to set blob tags in various blob operations. - TagsMap map[string]string - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs - // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source - // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. - // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string -} - -func (o *AppendBlobCreateOptions) format() (*appendBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, - *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { - - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := appendBlobClientCreateOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), - Metadata: o.Metadata, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions -} - -// AppendBlobCreateResponse contains the response from method AppendBlobClient.Create. -type AppendBlobCreateResponse struct { - appendBlobClientCreateResponse -} - -func toAppendBlobCreateResponse(resp appendBlobClientCreateResponse) AppendBlobCreateResponse { - return AppendBlobCreateResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobAppendBlockOptions provides set of configurations for AppendBlock operation -type AppendBlobAppendBlockOptions struct { - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - AppendPositionAccessConditions *AppendPositionAccessConditions - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - - BlobAccessConditions *BlobAccessConditions -} - -func (o *AppendBlobAppendBlockOptions) format() (*appendBlobClientAppendBlockOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := &appendBlobClientAppendBlockOptions{ - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions -} - -// AppendBlobAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. -type AppendBlobAppendBlockResponse struct { - appendBlobClientAppendBlockResponse -} - -func toAppendBlobAppendBlockResponse(resp appendBlobClientAppendBlockResponse) AppendBlobAppendBlockResponse { - return AppendBlobAppendBlockResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobAppendBlockFromURLOptions provides set of configurations for AppendBlockFromURL operation -type AppendBlobAppendBlockFromURLOptions struct { - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - AppendPositionAccessConditions *AppendPositionAccessConditions - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - BlobAccessConditions *BlobAccessConditions - // Optional, you can specify whether a particular range of the blob is read - Offset *int64 - - Count *int64 -} - -func (o *AppendBlobAppendBlockFromURLOptions) format() (*appendBlobClientAppendBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *AppendPositionAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil, nil - } - - options := &appendBlobClientAppendBlockFromURLOptions{ - SourceRange: getSourceRange(o.Offset, o.Count), - SourceContentMD5: o.SourceContentMD5, - SourceContentcrc64: o.SourceContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions -} - -// AppendBlobAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. -type AppendBlobAppendBlockFromURLResponse struct { - appendBlobClientAppendBlockFromURLResponse -} - -func toAppendBlobAppendBlockFromURLResponse(resp appendBlobClientAppendBlockFromURLResponse) AppendBlobAppendBlockFromURLResponse { - return AppendBlobAppendBlockFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobSealOptions provides set of configurations for SealAppendBlob operation -type AppendBlobSealOptions struct { - BlobAccessConditions *BlobAccessConditions - AppendPositionAccessConditions *AppendPositionAccessConditions -} - -func (o *AppendBlobSealOptions) format() (leaseAccessConditions *LeaseAccessConditions, - modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return -} - -// AppendBlobSealResponse contains the response from method AppendBlobClient.Seal. -type AppendBlobSealResponse struct { - appendBlobClientSealResponse -} - -func toAppendBlobSealResponse(resp appendBlobClientSealResponse) AppendBlobSealResponse { - return AppendBlobSealResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go deleted file mode 100644 index f4425b18c82..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go +++ /dev/null @@ -1,478 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "io" - "net/http" - "time" -) - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobDownloadOptions provides set of configurations for Download blob operation -type BlobDownloadOptions struct { - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - - // Optional, you can specify whether a particular range of the blob is read - Offset *int64 - Count *int64 - - BlobAccessConditions *BlobAccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo -} - -func (o *BlobDownloadOptions) format() (*blobClientDownloadOptions, *LeaseAccessConditions, *CpkInfo, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - offset := int64(0) - count := int64(CountToEnd) - - if o.Offset != nil { - offset = *o.Offset - } - - if o.Count != nil { - count = *o.Count - } - - basics := blobClientDownloadOptions{ - RangeGetContentMD5: o.RangeGetContentMD5, - Range: (&HttpRange{Offset: offset, Count: count}).format(), - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions -} - -// BlobDownloadResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. -type BlobDownloadResponse struct { - blobClientDownloadResponse - ctx context.Context - b *BlobClient - getInfo HTTPGetterInfo - ObjectReplicationRules []ObjectReplicationPolicy -} - -// Body constructs new RetryReader stream for reading data. If a connection fails -// while reading, it will make additional requests to reestablish a connection and -// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 -// (the default), returns the original response body and no retries will be performed. -// Pass in nil for options to accept the default options. -func (r *BlobDownloadResponse) Body(options *RetryReaderOptions) io.ReadCloser { - if options == nil { - options = &RetryReaderOptions{} - } - - if options.MaxRetryRequests == 0 { // No additional retries - return r.RawResponse.Body - } - return NewRetryReader(r.ctx, r.RawResponse, r.getInfo, *options, - func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { - accessConditions := &BlobAccessConditions{ - ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: &getInfo.ETag}, - } - options := BlobDownloadOptions{ - Offset: &getInfo.Offset, - Count: &getInfo.Count, - BlobAccessConditions: accessConditions, - CpkInfo: options.CpkInfo, - //CpkScopeInfo: o.CpkScopeInfo, - } - resp, err := r.b.Download(ctx, &options) - if err != nil { - return nil, err - } - return resp.RawResponse, err - }, - ) -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobDeleteOptions provides set of configurations for Delete blob operation -type BlobDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself - DeleteSnapshots *DeleteSnapshotsOptionType - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlobDeleteOptions) format() (*blobClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := blobClientDeleteOptions{ - DeleteSnapshots: o.DeleteSnapshots, - } - - if o.BlobAccessConditions == nil { - return &basics, nil, nil - } - - return &basics, o.BlobAccessConditions.LeaseAccessConditions, o.BlobAccessConditions.ModifiedAccessConditions -} - -// BlobDeleteResponse contains the response from method BlobClient.Delete. -type BlobDeleteResponse struct { - blobClientDeleteResponse -} - -func toBlobDeleteResponse(resp blobClientDeleteResponse) BlobDeleteResponse { - return BlobDeleteResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobUndeleteOptions provides set of configurations for Blob Undelete operation -type BlobUndeleteOptions struct { -} - -func (o *BlobUndeleteOptions) format() *blobClientUndeleteOptions { - return nil -} - -// BlobUndeleteResponse contains the response from method BlobClient.Undelete. -type BlobUndeleteResponse struct { - blobClientUndeleteResponse -} - -func toBlobUndeleteResponse(resp blobClientUndeleteResponse) BlobUndeleteResponse { - return BlobUndeleteResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetTierOptions provides set of configurations for SetTier on blob operation -type BlobSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobSetTierOptions) format() (*blobClientSetTierOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := blobClientSetTierOptions{RehydratePriority: o.RehydratePriority} - return &basics, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// BlobSetTierResponse contains the response from method BlobClient.SetTier. -type BlobSetTierResponse struct { - blobClientSetTierResponse -} - -func toBlobSetTierResponse(resp blobClientSetTierResponse) BlobSetTierResponse { - return BlobSetTierResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobGetPropertiesOptions provides set of configurations for GetProperties blob operation -type BlobGetPropertiesOptions struct { - BlobAccessConditions *BlobAccessConditions - CpkInfo *CpkInfo -} - -func (o *BlobGetPropertiesOptions) format() (blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, - leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.format() - return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions -} - -// ObjectReplicationRules struct -type ObjectReplicationRules struct { - RuleId string - Status string -} - -// ObjectReplicationPolicy are deserialized attributes -type ObjectReplicationPolicy struct { - PolicyId *string - Rules *[]ObjectReplicationRules -} - -// BlobGetPropertiesResponse reformat the GetPropertiesResponse object for easy consumption -type BlobGetPropertiesResponse struct { - blobClientGetPropertiesResponse - - // deserialized attributes - ObjectReplicationRules []ObjectReplicationPolicy -} - -func toGetBlobPropertiesResponse(resp blobClientGetPropertiesResponse) BlobGetPropertiesResponse { - getResp := BlobGetPropertiesResponse{ - blobClientGetPropertiesResponse: resp, - ObjectReplicationRules: deserializeORSPolicies(resp.ObjectReplicationRules), - } - return getResp -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetHTTPHeadersOptions provides set of configurations for SetHTTPHeaders on blob operation -type BlobSetHTTPHeadersOptions struct { - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobSetHTTPHeadersOptions) format() (*blobClientSetHTTPHeadersOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// BlobSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. -type BlobSetHTTPHeadersResponse struct { - blobClientSetHTTPHeadersResponse -} - -func toBlobSetHTTPHeadersResponse(resp blobClientSetHTTPHeadersResponse) BlobSetHTTPHeadersResponse { - return BlobSetHTTPHeadersResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetMetadataOptions provides set of configurations for Set Metadata on blob operation -type BlobSetMetadataOptions struct { - LeaseAccessConditions *LeaseAccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobSetMetadataOptions) format() (leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, - cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - return o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions -} - -// BlobSetMetadataResponse contains the response from method BlobClient.SetMetadata. -type BlobSetMetadataResponse struct { - blobClientSetMetadataResponse -} - -func toBlobSetMetadataResponse(resp blobClientSetMetadataResponse) BlobSetMetadataResponse { - return BlobSetMetadataResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobCreateSnapshotOptions provides set of configurations for CreateSnapshot of blob operation -type BlobCreateSnapshotOptions struct { - Metadata map[string]string - LeaseAccessConditions *LeaseAccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobCreateSnapshotOptions) format() (blobSetMetadataOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, - cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - basics := blobClientCreateSnapshotOptions{ - Metadata: o.Metadata, - } - - return &basics, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions, o.LeaseAccessConditions -} - -// BlobCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot -type BlobCreateSnapshotResponse struct { - blobClientCreateSnapshotResponse -} - -func toBlobCreateSnapshotResponse(resp blobClientCreateSnapshotResponse) BlobCreateSnapshotResponse { - return BlobCreateSnapshotResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobStartCopyOptions provides set of configurations for StartCopyFromURL blob operation -type BlobStartCopyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Used to set blob tags in various blob operations. - TagsMap map[string]string - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs - // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source - // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. - // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - ModifiedAccessConditions *ModifiedAccessConditions - - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *BlobStartCopyOptions) format() (blobStartCopyFromUrlOptions *blobClientStartCopyFromURLOptions, - sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - basics := blobClientStartCopyFromURLOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), - Metadata: o.Metadata, - RehydratePriority: o.RehydratePriority, - SealBlob: o.SealBlob, - Tier: o.Tier, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - } - - return &basics, o.SourceModifiedAccessConditions, o.ModifiedAccessConditions, o.LeaseAccessConditions -} - -// BlobStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. -type BlobStartCopyFromURLResponse struct { - blobClientStartCopyFromURLResponse -} - -func toBlobStartCopyFromURLResponse(resp blobClientStartCopyFromURLResponse) BlobStartCopyFromURLResponse { - return BlobStartCopyFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobAbortCopyOptions provides set of configurations for AbortCopyFromURL operation -type BlobAbortCopyOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *BlobAbortCopyOptions) format() (blobAbortCopyFromUrlOptions *blobClientAbortCopyFromURLOptions, - leaseAccessConditions *LeaseAccessConditions) { - if o == nil { - return nil, nil - } - return nil, o.LeaseAccessConditions -} - -// BlobAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL -type BlobAbortCopyFromURLResponse struct { - blobClientAbortCopyFromURLResponse -} - -func toBlobAbortCopyFromURLResponse(resp blobClientAbortCopyFromURLResponse) BlobAbortCopyFromURLResponse { - return BlobAbortCopyFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetTagsOptions provides set of configurations for SetTags operation -type BlobSetTagsOptions struct { - // The version id parameter is an opaque DateTime value that, when present, - // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - VersionID *string - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Optional header, Specifies the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - TagsMap map[string]string - - ModifiedAccessConditions *ModifiedAccessConditions - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *BlobSetTagsOptions) format() (*blobClientSetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil - } - - options := &blobClientSetTagsOptions{ - Tags: serializeBlobTags(o.TagsMap), - TransactionalContentMD5: o.TransactionalContentMD5, - TransactionalContentCRC64: o.TransactionalContentCRC64, - VersionID: o.VersionID, - } - - return options, o.ModifiedAccessConditions, o.LeaseAccessConditions -} - -// BlobSetTagsResponse contains the response from method BlobClient.SetTags -type BlobSetTagsResponse struct { - blobClientSetTagsResponse -} - -func toBlobSetTagsResponse(resp blobClientSetTagsResponse) BlobSetTagsResponse { - return BlobSetTagsResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobGetTagsOptions provides set of configurations for GetTags operation -type BlobGetTagsOptions struct { - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. - Snapshot *string - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string - - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlobGetTagsOptions) format() (*blobClientGetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil - } - - options := &blobClientGetTagsOptions{ - Snapshot: o.Snapshot, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - - return options, modifiedAccessConditions, leaseAccessConditions -} - -// BlobGetTagsResponse contains the response from method BlobClient.GetTags -type BlobGetTagsResponse struct { - blobClientGetTagsResponse -} - -func toBlobGetTagsResponse(resp blobClientGetTagsResponse) BlobGetTagsResponse { - return BlobGetTagsResponse{resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go deleted file mode 100644 index 4e574622cca..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go +++ /dev/null @@ -1,160 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobAcquireLeaseOptions provides set of configurations for AcquireLeaseBlob operation -type BlobAcquireLeaseOptions struct { - // Specifies the Duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease Duration cannot be changed using renew or change. - Duration *int32 - - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobAcquireLeaseOptions) format() (blobClientAcquireLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return blobClientAcquireLeaseOptions{}, nil - } - return blobClientAcquireLeaseOptions{ - Duration: o.Duration, - }, o.ModifiedAccessConditions -} - -// BlobAcquireLeaseResponse contains the response from method BlobLeaseClient.AcquireLease. -type BlobAcquireLeaseResponse struct { - blobClientAcquireLeaseResponse -} - -func toBlobAcquireLeaseResponse(resp blobClientAcquireLeaseResponse) BlobAcquireLeaseResponse { - return BlobAcquireLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobBreakLeaseOptions provides set of configurations for BreakLeaseBlob operation -type BlobBreakLeaseOptions struct { - // For a break operation, proposed Duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease - // is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than - // the break period. If this header does not appear with a break operation, a fixed-Duration lease breaks after the remaining - // lease period elapses, and an infinite lease breaks immediately. - BreakPeriod *int32 - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobBreakLeaseOptions) format() (*blobClientBreakLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - if o.BreakPeriod != nil { - period := leasePeriodPointer(*o.BreakPeriod) - return &blobClientBreakLeaseOptions{ - BreakPeriod: period, - }, o.ModifiedAccessConditions - } - - return nil, o.ModifiedAccessConditions -} - -// BlobBreakLeaseResponse contains the response from method BlobLeaseClient.BreakLease. -type BlobBreakLeaseResponse struct { - blobClientBreakLeaseResponse -} - -func toBlobBreakLeaseResponse(resp blobClientBreakLeaseResponse) BlobBreakLeaseResponse { - return BlobBreakLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobChangeLeaseOptions provides set of configurations for ChangeLeaseBlob operation -type BlobChangeLeaseOptions struct { - ProposedLeaseID *string - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobChangeLeaseOptions) format() (*string, *blobClientChangeLeaseOptions, *ModifiedAccessConditions, error) { - generatedUuid, err := uuid.New() - if err != nil { - return nil, nil, nil, err - } - leaseID := to.Ptr(generatedUuid.String()) - if o == nil { - return leaseID, nil, nil, nil - } - - if o.ProposedLeaseID == nil { - o.ProposedLeaseID = leaseID - } - - return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, nil -} - -// BlobChangeLeaseResponse contains the response from method BlobLeaseClient.ChangeLease -type BlobChangeLeaseResponse struct { - blobClientChangeLeaseResponse -} - -func toBlobChangeLeaseResponse(resp blobClientChangeLeaseResponse) BlobChangeLeaseResponse { - return BlobChangeLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobRenewLeaseOptions provides set of configurations for RenewLeaseBlob operation -type BlobRenewLeaseOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobRenewLeaseOptions) format() (*blobClientRenewLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// BlobRenewLeaseResponse contains the response from method BlobClient.RenewLease. -type BlobRenewLeaseResponse struct { - blobClientRenewLeaseResponse -} - -func toBlobRenewLeaseResponse(resp blobClientRenewLeaseResponse) BlobRenewLeaseResponse { - return BlobRenewLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ReleaseLeaseBlobOptions provides set of configurations for ReleaseLeaseBlob operation -type ReleaseLeaseBlobOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ReleaseLeaseBlobOptions) format() (*blobClientReleaseLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// BlobReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. -type BlobReleaseLeaseResponse struct { - blobClientReleaseLeaseResponse -} - -func toBlobReleaseLeaseResponse(resp blobClientReleaseLeaseResponse) BlobReleaseLeaseResponse { - return BlobReleaseLeaseResponse{resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go deleted file mode 100644 index 06d4368557a..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go +++ /dev/null @@ -1,272 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import "time" - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobUploadOptions provides set of configurations for UploadBlockBlob operation -type BlockBlobUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - TagsMap map[string]string - - // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string - - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - HTTPHeaders *BlobHTTPHeaders - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobUploadOptions) format() (*blockBlobClientUploadOptions, *BlobHTTPHeaders, *LeaseAccessConditions, - *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - basics := blockBlobClientUploadOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), - Metadata: o.Metadata, - Tier: o.Tier, - TransactionalContentMD5: o.TransactionalContentMD5, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions -} - -// BlockBlobUploadResponse contains the response from method BlockBlobClient.Upload. -type BlockBlobUploadResponse struct { - blockBlobClientUploadResponse -} - -func toBlockBlobUploadResponse(resp blockBlobClientUploadResponse) BlockBlobUploadResponse { - return BlockBlobUploadResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobStageBlockOptions provides set of configurations for StageBlock operation -type BlockBlobStageBlockOptions struct { - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - - LeaseAccessConditions *LeaseAccessConditions - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -func (o *BlockBlobStageBlockOptions) format() (*blockBlobClientStageBlockOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo) { - if o == nil { - return nil, nil, nil, nil - } - - return &blockBlobClientStageBlockOptions{ - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo -} - -// BlockBlobStageBlockResponse contains the response from method BlockBlobClient.StageBlock. -type BlockBlobStageBlockResponse struct { - blockBlobClientStageBlockResponse -} - -func toBlockBlobStageBlockResponse(resp blockBlobClientStageBlockResponse) BlockBlobStageBlockResponse { - return BlockBlobStageBlockResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobStageBlockFromURLOptions provides set of configurations for StageBlockFromURL operation -type BlockBlobStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - LeaseAccessConditions *LeaseAccessConditions - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentCRC64 []byte - - Offset *int64 - - Count *int64 - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo -} - -func (o *BlockBlobStageBlockFromURLOptions) format() (*blockBlobClientStageBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - options := &blockBlobClientStageBlockFromURLOptions{ - CopySourceAuthorization: o.CopySourceAuthorization, - SourceContentMD5: o.SourceContentMD5, - SourceContentcrc64: o.SourceContentCRC64, - SourceRange: getSourceRange(o.Offset, o.Count), - } - - return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions -} - -// BlockBlobStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. -type BlockBlobStageBlockFromURLResponse struct { - blockBlobClientStageBlockFromURLResponse -} - -func toBlockBlobStageBlockFromURLResponse(resp blockBlobClientStageBlockFromURLResponse) BlockBlobStageBlockFromURLResponse { - return BlockBlobStageBlockFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobCommitBlockListOptions provides set of configurations for CommitBlockList operation -type BlockBlobCommitBlockListOptions struct { - BlobTagsMap map[string]string - Metadata map[string]string - RequestID *string - Tier *AccessTier - Timeout *int32 - TransactionalContentCRC64 []byte - TransactionalContentMD5 []byte - BlobHTTPHeaders *BlobHTTPHeaders - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobCommitBlockListOptions) format() (*blockBlobClientCommitBlockListOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := &blockBlobClientCommitBlockListOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), - Metadata: o.Metadata, - RequestID: o.RequestID, - Tier: o.Tier, - Timeout: o.Timeout, - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.BlobHTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions -} - -// BlockBlobCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. -type BlockBlobCommitBlockListResponse struct { - blockBlobClientCommitBlockListResponse -} - -func toBlockBlobCommitBlockListResponse(resp blockBlobClientCommitBlockListResponse) BlockBlobCommitBlockListResponse { - return BlockBlobCommitBlockListResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobGetBlockListOptions provides set of configurations for GetBlockList operation -type BlockBlobGetBlockListOptions struct { - Snapshot *string - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobGetBlockListOptions) format() (*blockBlobClientGetBlockListOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &blockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions -} - -// BlockBlobGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. -type BlockBlobGetBlockListResponse struct { - blockBlobClientGetBlockListResponse -} - -func toBlockBlobGetBlockListResponse(resp blockBlobClientGetBlockListResponse) BlockBlobGetBlockListResponse { - return BlockBlobGetBlockListResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobCopyFromURLOptions provides set of configurations for CopyBlockBlobFromURL operation -type BlockBlobCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsMap map[string]string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobCopyFromURLOptions) format() (*blobClientCopyFromURLOptions, *SourceModifiedAccessConditions, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - options := &blobClientCopyFromURLOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), - CopySourceAuthorization: o.CopySourceAuthorization, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - Metadata: o.Metadata, - SourceContentMD5: o.SourceContentMD5, - Tier: o.Tier, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions -} - -// BlockBlobCopyFromURLResponse contains the response from method BlockBlobClient.CopyFromURL. -type BlockBlobCopyFromURLResponse struct { - blobClientCopyFromURLResponse -} - -func toBlockBlobCopyFromURLResponse(resp blobClientCopyFromURLResponse) BlockBlobCopyFromURLResponse { - return BlockBlobCopyFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go deleted file mode 100644 index 657a767dd54..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// ClientOptions adds additional client options while constructing connection -type ClientOptions struct { - // Logging configures the built-in logging policy. - Logging policy.LogOptions - - // Retry configures the built-in retry policy. - Retry policy.RetryOptions - - // Telemetry configures the built-in telemetry policy. - Telemetry policy.TelemetryOptions - - // Transport sets the transport for HTTP requests. - Transport policy.Transporter - - // PerCallPolicies contains custom policies to inject into the pipeline. - // Each policy is executed once per request. - PerCallPolicies []policy.Policy - - // PerRetryPolicies contains custom policies to inject into the pipeline. - // Each policy is executed once per request, and for each retry of that request. - PerRetryPolicies []policy.Policy -} - -func (c *ClientOptions) toPolicyOptions() *azcore.ClientOptions { - return &azcore.ClientOptions{ - Logging: c.Logging, - Retry: c.Retry, - Telemetry: c.Telemetry, - Transport: c.Transport, - PerCallPolicies: c.PerCallPolicies, - PerRetryPolicies: c.PerRetryPolicies, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -func getConnectionOptions(options *ClientOptions) *policy.ClientOptions { - if options == nil { - options = &ClientOptions{} - } - return options.toPolicyOptions() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go deleted file mode 100644 index a33103e4b77..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go +++ /dev/null @@ -1,271 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerCreateOptions provides set of configurations for CreateContainer operation -type ContainerCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - - // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string - - // Optional. Specifies the encryption scope settings to set on the container. - CpkScope *ContainerCpkScopeInfo -} - -func (o *ContainerCreateOptions) format() (*containerClientCreateOptions, *ContainerCpkScopeInfo) { - if o == nil { - return nil, nil - } - - basicOptions := containerClientCreateOptions{ - Access: o.Access, - Metadata: o.Metadata, - } - - return &basicOptions, o.CpkScope -} - -// ContainerCreateResponse is wrapper around containerClientCreateResponse -type ContainerCreateResponse struct { - containerClientCreateResponse -} - -func toContainerCreateResponse(resp containerClientCreateResponse) ContainerCreateResponse { - return ContainerCreateResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerDeleteOptions provides set of configurations for DeleteContainer operation -type ContainerDeleteOptions struct { - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerDeleteOptions) format() (*containerClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// ContainerDeleteResponse contains the response from method ContainerClient.Delete. -type ContainerDeleteResponse struct { - containerClientDeleteResponse -} - -func toContainerDeleteResponse(resp containerClientDeleteResponse) ContainerDeleteResponse { - return ContainerDeleteResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerGetPropertiesOptions provides set of configurations for GetPropertiesContainer operation -type ContainerGetPropertiesOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *ContainerGetPropertiesOptions) format() (*containerClientGetPropertiesOptions, *LeaseAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.LeaseAccessConditions -} - -// ContainerGetPropertiesResponse contains the response from method ContainerClient.GetProperties -type ContainerGetPropertiesResponse struct { - containerClientGetPropertiesResponse -} - -func toContainerGetPropertiesResponse(resp containerClientGetPropertiesResponse) ContainerGetPropertiesResponse { - return ContainerGetPropertiesResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerSetMetadataOptions provides set of configurations for SetMetadataContainer operation -type ContainerSetMetadataOptions struct { - Metadata map[string]string - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerSetMetadataOptions) format() (*containerClientSetMetadataOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return &containerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// ContainerSetMetadataResponse contains the response from method containerClient.SetMetadata -type ContainerSetMetadataResponse struct { - containerClientSetMetadataResponse -} - -func toContainerSetMetadataResponse(resp containerClientSetMetadataResponse) ContainerSetMetadataResponse { - return ContainerSetMetadataResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerGetAccessPolicyOptions provides set of configurations for GetAccessPolicy operation -type ContainerGetAccessPolicyOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *ContainerGetAccessPolicyOptions) format() (*containerClientGetAccessPolicyOptions, *LeaseAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.LeaseAccessConditions -} - -// ContainerGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. -type ContainerGetAccessPolicyResponse struct { - containerClientGetAccessPolicyResponse -} - -func toContainerGetAccessPolicyResponse(resp containerClientGetAccessPolicyResponse) ContainerGetAccessPolicyResponse { - return ContainerGetAccessPolicyResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerSetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation -type ContainerSetAccessPolicyOptions struct { - AccessConditions *ContainerAccessConditions - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // the acls for the container - ContainerACL []*SignedIdentifier - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -func (o *ContainerSetAccessPolicyOptions) format() (*containerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - mac, lac := o.AccessConditions.format() - return &containerClientSetAccessPolicyOptions{ - Access: o.Access, - ContainerACL: o.ContainerACL, - RequestID: o.RequestID, - }, lac, mac -} - -// ContainerSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy -type ContainerSetAccessPolicyResponse struct { - containerClientSetAccessPolicyResponse -} - -func toContainerSetAccessPolicyResponse(resp containerClientSetAccessPolicyResponse) ContainerSetAccessPolicyResponse { - return ContainerSetAccessPolicyResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerListBlobsFlatOptions provides set of configurations for SetAccessPolicy operation -type ContainerListBlobsFlatOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - MaxResults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -func (o *ContainerListBlobsFlatOptions) format() *containerClientListBlobFlatSegmentOptions { - if o == nil { - return nil - } - - return &containerClientListBlobFlatSegmentOptions{ - Include: o.Include, - Marker: o.Marker, - Maxresults: o.MaxResults, - Prefix: o.Prefix, - } -} - -// ContainerListBlobFlatPager provides operations for iterating over paged responses -type ContainerListBlobFlatPager struct { - *containerClientListBlobFlatSegmentPager -} - -func toContainerListBlobFlatSegmentPager(resp *containerClientListBlobFlatSegmentPager) *ContainerListBlobFlatPager { - return &ContainerListBlobFlatPager{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -//ContainerListBlobsHierarchyOptions provides set of configurations for ContainerClient.ListBlobsHierarchy -type ContainerListBlobsHierarchyOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - MaxResults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -func (o *ContainerListBlobsHierarchyOptions) format() *containerClientListBlobHierarchySegmentOptions { - if o == nil { - return nil - } - - return &containerClientListBlobHierarchySegmentOptions{ - Include: o.Include, - Marker: o.Marker, - Maxresults: o.MaxResults, - Prefix: o.Prefix, - } -} - -// ContainerListBlobHierarchyPager provides operations for iterating over paged responses. -type ContainerListBlobHierarchyPager struct { - containerClientListBlobHierarchySegmentPager -} - -func toContainerListBlobHierarchySegmentPager(resp *containerClientListBlobHierarchySegmentPager) *ContainerListBlobHierarchyPager { - if resp == nil { - return nil - } - return &ContainerListBlobHierarchyPager{*resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go deleted file mode 100644 index 87572e9178f..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go +++ /dev/null @@ -1,166 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -// --------------------------------------------------------------------------------------------------------------------- - -// LeaseBreakNaturally tells ContainerClient's or BlobClient's BreakLease method to break the lease using service semantics. -const LeaseBreakNaturally = -1 - -func leasePeriodPointer(period int32) *int32 { - if period != LeaseBreakNaturally { - return &period - } else { - return nil - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerAcquireLeaseOptions provides set of configurations for AcquireLeaseContainer operation -type ContainerAcquireLeaseOptions struct { - Duration *int32 - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerAcquireLeaseOptions) format() (containerClientAcquireLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return containerClientAcquireLeaseOptions{}, nil - } - containerAcquireLeaseOptions := containerClientAcquireLeaseOptions{ - Duration: o.Duration, - } - - return containerAcquireLeaseOptions, o.ModifiedAccessConditions -} - -// ContainerAcquireLeaseResponse contains the response from method ContainerLeaseClient.AcquireLease. -type ContainerAcquireLeaseResponse struct { - containerClientAcquireLeaseResponse -} - -func toContainerAcquireLeaseResponse(resp containerClientAcquireLeaseResponse) ContainerAcquireLeaseResponse { - return ContainerAcquireLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerBreakLeaseOptions provides set of configurations for BreakLeaseContainer operation -type ContainerBreakLeaseOptions struct { - BreakPeriod *int32 - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerBreakLeaseOptions) format() (*containerClientBreakLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - containerBreakLeaseOptions := &containerClientBreakLeaseOptions{ - BreakPeriod: o.BreakPeriod, - } - - return containerBreakLeaseOptions, o.ModifiedAccessConditions -} - -// ContainerBreakLeaseResponse contains the response from method ContainerLeaseClient.BreakLease. -type ContainerBreakLeaseResponse struct { - containerClientBreakLeaseResponse -} - -func toContainerBreakLeaseResponse(resp containerClientBreakLeaseResponse) ContainerBreakLeaseResponse { - return ContainerBreakLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerChangeLeaseOptions provides set of configurations for ChangeLeaseContainer operation -type ContainerChangeLeaseOptions struct { - ProposedLeaseID *string - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerChangeLeaseOptions) format() (*string, *containerClientChangeLeaseOptions, *ModifiedAccessConditions, error) { - generatedUuid, err := uuid.New() - if err != nil { - return nil, nil, nil, err - } - leaseID := to.Ptr(generatedUuid.String()) - if o == nil { - return leaseID, nil, nil, err - } - - if o.ProposedLeaseID == nil { - o.ProposedLeaseID = leaseID - } - - return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, err -} - -// ContainerChangeLeaseResponse contains the response from method ContainerLeaseClient.ChangeLease. -type ContainerChangeLeaseResponse struct { - containerClientChangeLeaseResponse -} - -func toContainerChangeLeaseResponse(resp containerClientChangeLeaseResponse) ContainerChangeLeaseResponse { - return ContainerChangeLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerReleaseLeaseOptions provides set of configurations for ReleaseLeaseContainer operation -type ContainerReleaseLeaseOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerReleaseLeaseOptions) format() (*containerClientReleaseLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// ContainerReleaseLeaseResponse contains the response from method ContainerLeaseClient.ReleaseLease. -type ContainerReleaseLeaseResponse struct { - containerClientReleaseLeaseResponse -} - -func toContainerReleaseLeaseResponse(resp containerClientReleaseLeaseResponse) ContainerReleaseLeaseResponse { - return ContainerReleaseLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerRenewLeaseOptions provides set of configurations for RenewLeaseContainer operation -type ContainerRenewLeaseOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerRenewLeaseOptions) format() (*containerClientRenewLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// ContainerRenewLeaseResponse contains the response from method ContainerLeaseClient.RenewLease. -type ContainerRenewLeaseResponse struct { - containerClientRenewLeaseResponse -} - -func toContainerRenewLeaseResponse(resp containerClientRenewLeaseResponse) ContainerRenewLeaseResponse { - return ContainerRenewLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go deleted file mode 100644 index c7a67abe774..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go +++ /dev/null @@ -1,201 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "fmt" -) - -const _1MiB = 1024 * 1024 - -// UploadOption identifies options used by the UploadBuffer and UploadFile functions. -type UploadOption struct { - // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. - // Note that the progress reporting is not always increasing; it can go down when retrying a request. - Progress func(bytesTransferred int64) - - // HTTPHeaders indicates the HTTP headers to be associated with the blob. - HTTPHeaders *BlobHTTPHeaders - - // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. - Metadata map[string]string - - // BlobAccessConditions indicates the access conditions for the block blob. - BlobAccessConditions *BlobAccessConditions - - // AccessTier indicates the tier of blob - AccessTier *AccessTier - - // TagsMap - TagsMap map[string]string - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - - // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) - Parallelism uint16 - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 *[]byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 *[]byte -} - -func (o *UploadOption) getStageBlockOptions() *BlockBlobStageBlockOptions { - leaseAccessConditions, _ := o.BlobAccessConditions.format() - return &BlockBlobStageBlockOptions{ - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - LeaseAccessConditions: leaseAccessConditions, - } -} - -func (o *UploadOption) getUploadBlockBlobOptions() *BlockBlobUploadOptions { - return &BlockBlobUploadOptions{ - TagsMap: o.TagsMap, - Metadata: o.Metadata, - Tier: o.AccessTier, - HTTPHeaders: o.HTTPHeaders, - BlobAccessConditions: o.BlobAccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - } -} - -func (o *UploadOption) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { - return &BlockBlobCommitBlockListOptions{ - BlobTagsMap: o.TagsMap, - Metadata: o.Metadata, - Tier: o.AccessTier, - BlobHTTPHeaders: o.HTTPHeaders, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadStreamOptions provides set of configurations for UploadStream operation -type UploadStreamOptions struct { - // TransferManager provides a TransferManager that controls buffer allocation/reuse and - // concurrency. This overrides BufferSize and MaxBuffers if set. - TransferManager TransferManager - transferMangerNotSet bool - // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. - BufferSize int - // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. - MaxBuffers int - HTTPHeaders *BlobHTTPHeaders - Metadata map[string]string - BlobAccessConditions *BlobAccessConditions - AccessTier *AccessTier - BlobTagsMap map[string]string - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo -} - -func (u *UploadStreamOptions) defaults() error { - if u.TransferManager != nil { - return nil - } - - if u.MaxBuffers == 0 { - u.MaxBuffers = 1 - } - - if u.BufferSize < _1MiB { - u.BufferSize = _1MiB - } - - var err error - u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) - if err != nil { - return fmt.Errorf("bug: default transfer manager could not be created: %s", err) - } - u.transferMangerNotSet = true - return nil -} - -func (u *UploadStreamOptions) getStageBlockOptions() *BlockBlobStageBlockOptions { - leaseAccessConditions, _ := u.BlobAccessConditions.format() - return &BlockBlobStageBlockOptions{ - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, - LeaseAccessConditions: leaseAccessConditions, - } -} - -func (u *UploadStreamOptions) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { - options := &BlockBlobCommitBlockListOptions{ - BlobTagsMap: u.BlobTagsMap, - Metadata: u.Metadata, - Tier: u.AccessTier, - BlobHTTPHeaders: u.HTTPHeaders, - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, - BlobAccessConditions: u.BlobAccessConditions, - } - - return options -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DownloadOptions identifies options used by the DownloadToBuffer and DownloadToFile functions. -type DownloadOptions struct { - // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are received. - Progress func(bytesTransferred int64) - - // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. - BlobAccessConditions *BlobAccessConditions - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - - // Parallelism indicates the maximum number of blocks to download in parallel (0=default) - Parallelism uint16 - - // RetryReaderOptionsPerBlock is used when downloading each block. - RetryReaderOptionsPerBlock RetryReaderOptions -} - -func (o *DownloadOptions) getBlobPropertiesOptions() *BlobGetPropertiesOptions { - return &BlobGetPropertiesOptions{ - BlobAccessConditions: o.BlobAccessConditions, - CpkInfo: o.CpkInfo, - } -} - -func (o *DownloadOptions) getDownloadBlobOptions(offSet, count int64, rangeGetContentMD5 *bool) *BlobDownloadOptions { - return &BlobDownloadOptions{ - BlobAccessConditions: o.BlobAccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - Offset: &offSet, - Count: &count, - RangeGetContentMD5: rangeGetContentMD5, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BatchTransferOptions identifies options used by DoBatchTransfer. -type BatchTransferOptions struct { - TransferSize int64 - ChunkSize int64 - Parallelism uint16 - Operation func(offset int64, chunkSize int64, ctx context.Context) error - OperationName string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go deleted file mode 100644 index 3cf85ca43b1..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "net/url" - "strings" -) - -func serializeBlobTagsToStrPtr(tagsMap map[string]string) *string { - if tagsMap == nil { - return nil - } - tags := make([]string, 0) - for key, val := range tagsMap { - tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) - } - //tags = tags[:len(tags)-1] - blobTagsString := strings.Join(tags, "&") - return &blobTagsString -} - -func serializeBlobTags(tagsMap map[string]string) *BlobTags { - if tagsMap == nil { - return nil - } - blobTagSet := make([]*BlobTag, 0) - for key, val := range tagsMap { - newKey, newVal := key, val - blobTagSet = append(blobTagSet, &BlobTag{Key: &newKey, Value: &newVal}) - } - return &BlobTags{BlobTagSet: blobTagSet} -} - -func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { - if policies == nil { - return nil - } - // For source blobs (blobs that have policy ids and rule ids applied to them), - // the header will be formatted as "x-ms-or-_: {Complete, Failed}". - // The value of this header is the status of the replication. - orPolicyStatusHeader := make(map[string]string) - for key, value := range policies { - if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { - orPolicyStatusHeader[key] = value - } - } - - parsedResult := make(map[string][]ObjectReplicationRules) - for key, value := range orPolicyStatusHeader { - policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") - policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] - - parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) - } - - for policyId, rules := range parsedResult { - objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ - PolicyId: &policyId, - Rules: &rules, - }) - } - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go deleted file mode 100644 index 747a94ee245..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceGetAccountInfoOptions provides set of options for ServiceClient.GetAccountInfo -type ServiceGetAccountInfoOptions struct { - // placeholder for future options -} - -func (o *ServiceGetAccountInfoOptions) format() *serviceClientGetAccountInfoOptions { - return nil -} - -// ServiceGetAccountInfoResponse contains the response from ServiceClient.GetAccountInfo -type ServiceGetAccountInfoResponse struct { - serviceClientGetAccountInfoResponse -} - -func toServiceGetAccountInfoResponse(resp serviceClientGetAccountInfoResponse) ServiceGetAccountInfoResponse { - return ServiceGetAccountInfoResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ListContainersDetail indicates what additional information the service should return with each container. -type ListContainersDetail struct { - // Tells the service whether to return metadata for each container. - Metadata bool - - // Tells the service whether to return soft-deleted containers. - Deleted bool -} - -// string produces the `Include` query parameter's value. -func (o *ListContainersDetail) format() []ListContainersIncludeType { - if !o.Metadata && !o.Deleted { - return nil - } - - items := make([]ListContainersIncludeType, 0, 2) - // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! - if o.Deleted { - items = append(items, ListContainersIncludeTypeDeleted) - } - if o.Metadata { - items = append(items, ListContainersIncludeTypeMetadata) - } - return items -} - -// ListContainersOptions provides set of configurations for ListContainers operation -type ListContainersOptions struct { - Include ListContainersDetail - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing operation did not return all containers - // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in - // a subsequent call to request the next page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, - // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible - // that the service will return fewer results than specified by max results, or than the default of 5000. - MaxResults *int32 - - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -func (o *ListContainersOptions) format() *serviceClientListContainersSegmentOptions { - if o == nil { - return nil - } - - return &serviceClientListContainersSegmentOptions{ - Include: o.Include.format(), - Marker: o.Marker, - Maxresults: o.MaxResults, - Prefix: o.Prefix, - } -} - -// ServiceListContainersSegmentPager provides operations for iterating over paged responses. -type ServiceListContainersSegmentPager struct { - serviceClientListContainersSegmentPager -} - -func toServiceListContainersSegmentPager(resp serviceClientListContainersSegmentPager) *ServiceListContainersSegmentPager { - return &ServiceListContainersSegmentPager{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceGetPropertiesOptions provides set of options for ServiceClient.GetProperties -type ServiceGetPropertiesOptions struct { - // placeholder for future options -} - -func (o *ServiceGetPropertiesOptions) format() *serviceClientGetPropertiesOptions { - return nil -} - -// ServiceGetPropertiesResponse contains the response from ServiceClient.GetProperties -type ServiceGetPropertiesResponse struct { - serviceClientGetPropertiesResponse -} - -func toServiceGetPropertiesResponse(resp serviceClientGetPropertiesResponse) ServiceGetPropertiesResponse { - return ServiceGetPropertiesResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceSetPropertiesOptions provides set of options for ServiceClient.SetProperties -type ServiceSetPropertiesOptions struct { - // The set of CORS rules. - Cors []*CorsRule - - // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions - DefaultServiceVersion *string - - // the retention policy which determines how long the associated data should persist - DeleteRetentionPolicy *RetentionPolicy - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - HourMetrics *Metrics - - // Azure Analytics Logging settings. - Logging *Logging - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - MinuteMetrics *Metrics - - // The properties that enable an account to host a static website - StaticWebsite *StaticWebsite -} - -func (o *ServiceSetPropertiesOptions) format() (StorageServiceProperties, *serviceClientSetPropertiesOptions) { - if o == nil { - return StorageServiceProperties{}, nil - } - - return StorageServiceProperties{ - Cors: o.Cors, - DefaultServiceVersion: o.DefaultServiceVersion, - DeleteRetentionPolicy: o.DeleteRetentionPolicy, - HourMetrics: o.HourMetrics, - Logging: o.Logging, - MinuteMetrics: o.MinuteMetrics, - StaticWebsite: o.StaticWebsite, - }, nil -} - -// ServiceSetPropertiesResponse contains the response from ServiceClient.SetProperties -type ServiceSetPropertiesResponse struct { - serviceClientSetPropertiesResponse -} - -func toServiceSetPropertiesResponse(resp serviceClientSetPropertiesResponse) ServiceSetPropertiesResponse { - return ServiceSetPropertiesResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceGetStatisticsOptions provides set of options for ServiceClient.GetStatistics -type ServiceGetStatisticsOptions struct { - // placeholder for future options -} - -func (o *ServiceGetStatisticsOptions) format() *serviceClientGetStatisticsOptions { - return nil -} - -// ServiceGetStatisticsResponse contains the response from ServiceClient.GetStatistics. -type ServiceGetStatisticsResponse struct { - serviceClientGetStatisticsResponse -} - -func toServiceGetStatisticsResponse(resp serviceClientGetStatisticsResponse) ServiceGetStatisticsResponse { - return ServiceGetStatisticsResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceFilterBlobsOptions provides set of configurations for ServiceClient.FindBlobsByTags -type ServiceFilterBlobsOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker - // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value - // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server - // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for - // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or - // than the default of 5000. - MaxResults *int32 - // Filters the results to return only to return only blobs whose tags match the specified expression. - Where *string -} - -func (o *ServiceFilterBlobsOptions) pointer() *serviceClientFilterBlobsOptions { - if o == nil { - return nil - } - return &serviceClientFilterBlobsOptions{ - Marker: o.Marker, - Maxresults: o.MaxResults, - Where: o.Where, - } -} - -// ServiceFilterBlobsResponse contains the response from ServiceClient.FindBlobsByTags -type ServiceFilterBlobsResponse struct { - serviceClientFilterBlobsResponse -} - -func toServiceFilterBlobsResponse(resp serviceClientFilterBlobsResponse) ServiceFilterBlobsResponse { - return ServiceFilterBlobsResponse{resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go deleted file mode 100644 index ca5aac8cd74..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go +++ /dev/null @@ -1,648 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -type appendBlobClient struct { - endpoint string - pl runtime.Pipeline -} - -// newAppendBlobClient creates a new instance of appendBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func newAppendBlobClient(endpoint string, pl runtime.Pipeline) *appendBlobClient { - client := &appendBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client -} - -// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append -// Block operation is permitted only if the blob was created with x-ms-blob-type set to -// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// body - Initial data -// appendBlobClientAppendBlockOptions - appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock -// method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *appendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientAppendBlockResponse, error) { - req, err := client.appendBlockCreateRequest(ctx, contentLength, body, appendBlobClientAppendBlockOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return appendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) - } - return client.appendBlockHandleResponse(resp) -} - -// appendBlockCreateRequest creates the AppendBlock request. -func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "appendblock") - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentMD5)) - } - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentCRC64)) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// appendBlockHandleResponse handles the AppendBlock response. -func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (appendBlobClientAppendBlockResponse, error) { - result := appendBlobClientAppendBlockResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where -// the contents are read from a source url. The Append Block operation is permitted only if the blob was -// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. -// If the operation fails it returns an *azcore.ResponseError type. -// sourceURL - Specify a URL to the copy source. -// contentLength - The length of the request. -// appendBlobClientAppendBlockFromURLOptions - appendBlobClientAppendBlockFromURLOptions contains the optional parameters -// for the appendBlobClient.AppendBlockFromURL method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (appendBlobClientAppendBlockFromURLResponse, error) { - req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, appendBlobClientAppendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return appendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.appendBlockFromURLHandleResponse(resp) -} - -// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. -func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "appendblock") - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-copy-source", sourceURL) - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceRange != nil { - req.Raw().Header.Set("x-ms-source-range", *appendBlobClientAppendBlockFromURLOptions.SourceRange) - } - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentMD5)) - } - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64 != nil { - req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5)) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockFromURLOptions.RequestID) - } - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. -func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (appendBlobClientAppendBlockFromURLResponse, error) { - result := appendBlobClientAppendBlockFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - return result, nil -} - -// Create - The Create Append Blob operation creates a new append blob. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// appendBlobClientCreateOptions - appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *appendBlobClient) Create(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, contentLength, appendBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return appendBlobClientCreateResponse{}, runtime.NewResponseError(resp) - } - return client.createHandleResponse(resp) -} - -// createCreateRequest creates the Create request. -func (client *appendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientCreateOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "AppendBlob") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Metadata != nil { - for k, v := range appendBlobClientCreateOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientCreateOptions.RequestID) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *appendBlobClientCreateOptions.BlobTagsString) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", appendBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*appendBlobClientCreateOptions.ImmutabilityPolicyMode)) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*appendBlobClientCreateOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// createHandleResponse handles the Create response. -func (client *appendBlobClient) createHandleResponse(resp *http.Response) (appendBlobClientCreateResponse, error) { - result := appendBlobClientCreateResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version -// or later. -// If the operation fails it returns an *azcore.ResponseError type. -// appendBlobClientSealOptions - appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock -// method. -func (client *appendBlobClient) Seal(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (appendBlobClientSealResponse, error) { - req, err := client.sealCreateRequest(ctx, appendBlobClientSealOptions, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) - if err != nil { - return appendBlobClientSealResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientSealResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return appendBlobClientSealResponse{}, runtime.NewResponseError(resp) - } - return client.sealHandleResponse(resp) -} - -// sealCreateRequest creates the Seal request. -func (client *appendBlobClient) sealCreateRequest(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "seal") - if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientSealOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientSealOptions.RequestID) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// sealHandleResponse handles the Seal response. -func (client *appendBlobClient) sealHandleResponse(resp *http.Response) (appendBlobClientSealResponse, error) { - result := appendBlobClientSealResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientSealResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { - isSealed, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientSealResponse{}, err - } - result.IsSealed = &isSealed - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go deleted file mode 100644 index 3f78a28aa40..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go +++ /dev/null @@ -1,953 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -type blockBlobClient struct { - endpoint string - pl runtime.Pipeline -} - -// newBlockBlobClient creates a new instance of blockBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func newBlockBlobClient(endpoint string, pl runtime.Pipeline) *blockBlobClient { - client := &blockBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client -} - -// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. -// In order to be written as part of a blob, a block must have been successfully written to the -// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that -// have changed, then committing the new and existing blocks together. You can do -// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit -// the most recently uploaded version of the block, whichever list it may -// belong to. -// If the operation fails it returns an *azcore.ResponseError type. -// blocks - Blob Blocks. -// blockBlobClientCommitBlockListOptions - blockBlobClientCommitBlockListOptions contains the optional parameters for the -// blockBlobClient.CommitBlockList method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientCommitBlockListResponse, error) { - req, err := client.commitBlockListCreateRequest(ctx, blocks, blockBlobClientCommitBlockListOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) - } - return client.commitBlockListHandleResponse(resp) -} - -// commitBlockListCreateRequest creates the CommitBlockList request. -func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "blocklist") - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientCommitBlockListOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentMD5)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentCRC64)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Metadata != nil { - for k, v := range blockBlobClientCommitBlockListOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientCommitBlockListOptions.Tier)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientCommitBlockListOptions.RequestID) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blockBlobClientCommitBlockListOptions.BlobTagsString) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientCommitBlockListOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, runtime.MarshalAsXML(req, blocks) -} - -// commitBlockListHandleResponse handles the CommitBlockList response. -func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response) (blockBlobClientCommitBlockListResponse, error) { - result := blockBlobClientCommitBlockListResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob -// If the operation fails it returns an *azcore.ResponseError type. -// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. -// blockBlobClientGetBlockListOptions - blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientGetBlockListResponse, error) { - req, err := client.getBlockListCreateRequest(ctx, listType, blockBlobClientGetBlockListOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return blockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) - } - return client.getBlockListHandleResponse(resp) -} - -// getBlockListCreateRequest creates the GetBlockList request. -func (client *blockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "blocklist") - if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Snapshot != nil { - reqQP.Set("snapshot", *blockBlobClientGetBlockListOptions.Snapshot) - } - reqQP.Set("blocklisttype", string(listType)) - if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientGetBlockListOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientGetBlockListOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// getBlockListHandleResponse handles the GetBlockList response. -func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (blockBlobClientGetBlockListResponse, error) { - result := blockBlobClientGetBlockListResponse{RawResponse: resp} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } - if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - return result, nil -} - -// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from -// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not -// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform -// partial updates to a block blob’s contents using a source URL, use the Put -// Block from URL API in conjunction with Put Block List. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// blockBlobClientPutBlobFromURLOptions - blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientPutBlobFromURLResponse, error) { - req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.putBlobFromURLHandleResponse(resp) -} - -// putBlobFromURLCreateRequest creates the PutBlobFromURL request. -func (client *blockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientPutBlobFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Metadata != nil { - for k, v := range blockBlobClientPutBlobFromURLOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientPutBlobFromURLOptions.Tier)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { - req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientPutBlobFromURLOptions.RequestID) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.SourceContentMD5)) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blockBlobClientPutBlobFromURLOptions.BlobTagsString) - } - req.Raw().Header.Set("x-ms-copy-source", copySource) - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties != nil { - req.Raw().Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties)) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// putBlobFromURLHandleResponse handles the PutBlobFromURL response. -func (client *blockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (blockBlobClientPutBlobFromURLResponse, error) { - result := blockBlobClientPutBlobFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob -// If the operation fails it returns an *azcore.ResponseError type. -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// body - Initial data -// blockBlobClientStageBlockOptions - blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -func (client *blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (blockBlobClientStageBlockResponse, error) { - req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, blockBlobClientStageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) - } - return client.stageBlockHandleResponse(resp) -} - -// stageBlockCreateRequest creates the StageBlock request. -func (client *blockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") - reqQP.Set("blockid", blockID) - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentMD5)) - } - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentCRC64)) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// stageBlockHandleResponse handles the StageBlock response. -func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (blockBlobClientStageBlockResponse, error) { - result := blockBlobClientStageBlockResponse{RawResponse: resp} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents -// are read from a URL. -// If the operation fails it returns an *azcore.ResponseError type. -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// sourceURL - Specify a URL to the copy source. -// blockBlobClientStageBlockFromURLOptions - blockBlobClientStageBlockFromURLOptions contains the optional parameters for -// the blockBlobClient.StageBlockFromURL method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientStageBlockFromURLResponse, error) { - req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, blockBlobClientStageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.stageBlockFromURLHandleResponse(resp) -} - -// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. -func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") - reqQP.Set("blockid", blockID) - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("x-ms-copy-source", sourceURL) - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceRange != nil { - req.Raw().Header.Set("x-ms-source-range", *blockBlobClientStageBlockFromURLOptions.SourceRange) - } - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentMD5)) - } - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentcrc64 != nil { - req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentcrc64)) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockFromURLOptions.RequestID) - } - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. -func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (blockBlobClientStageBlockFromURLResponse, error) { - result := blockBlobClientStageBlockFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob -// overwrites any existing metadata on the blob. Partial updates are not supported with Put -// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of -// the content of a block blob, use the Put Block List operation. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// body - Initial data -// blockBlobClientUploadOptions - blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientUploadResponse, error) { - req, err := client.uploadCreateRequest(ctx, contentLength, body, blockBlobClientUploadOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientUploadResponse{}, runtime.NewResponseError(resp) - } - return client.uploadHandleResponse(resp) -} - -// uploadCreateRequest creates the Upload request. -func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientUploadOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientUploadOptions.TransactionalContentMD5)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Metadata != nil { - for k, v := range blockBlobClientUploadOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientUploadOptions.Tier)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientUploadOptions.RequestID) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blockBlobClientUploadOptions.BlobTagsString) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientUploadOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientUploadOptions.ImmutabilityPolicyMode)) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientUploadOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// uploadHandleResponse handles the Upload response. -func (client *blockBlobClient) uploadHandleResponse(resp *http.Response) (blockBlobClientUploadResponse, error) { - result := blockBlobClientUploadResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go deleted file mode 100644 index bad81201ba2..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go +++ /dev/null @@ -1,1247 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -type pageBlobClient struct { - endpoint string - pl runtime.Pipeline -} - -// newPageBlobClient creates a new instance of pageBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func newPageBlobClient(endpoint string, pl runtime.Pipeline) *pageBlobClient { - client := &pageBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client -} - -// ClearPages - The Clear Pages operation clears a set of pages from a page blob -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// pageBlobClientClearPagesOptions - pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) ClearPages(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientClearPagesResponse, error) { - req, err := client.clearPagesCreateRequest(ctx, contentLength, pageBlobClientClearPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) - } - return client.clearPagesHandleResponse(resp) -} - -// clearPagesCreateRequest creates the ClearPages request. -func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientClearPagesOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-page-write", "clear") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientClearPagesOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientClearPagesOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// clearPagesHandleResponse handles the ClearPages response. -func (client *pageBlobClient) clearPagesHandleResponse(resp *http.Response) (pageBlobClientClearPagesResponse, error) { - result := pageBlobClientClearPagesResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } - return result, nil -} - -// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. -// The snapshot is copied such that only the differential changes between the previously copied -// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can -// be read or copied from as usual. This API is supported since REST version -// 2016-05-31. -// If the operation fails it returns an *azcore.ResponseError type. -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// pageBlobClientCopyIncrementalOptions - pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) CopyIncremental(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCopyIncrementalResponse, error) { - req, err := client.copyIncrementalCreateRequest(ctx, copySource, pageBlobClientCopyIncrementalOptions, modifiedAccessConditions) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return pageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) - } - return client.copyIncrementalHandleResponse(resp) -} - -// copyIncrementalCreateRequest creates the CopyIncremental request. -func (client *pageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "incrementalcopy") - if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCopyIncrementalOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-copy-source", copySource) - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCopyIncrementalOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// copyIncrementalHandleResponse handles the CopyIncremental response. -func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (pageBlobClientCopyIncrementalResponse, error) { - result := pageBlobClientCopyIncrementalResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } - return result, nil -} - -// Create - The Create operation creates a new page blob. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// pageBlobClientCreateOptions - pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, pageBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientCreateResponse{}, runtime.NewResponseError(resp) - } - return client.createHandleResponse(resp) -} - -// createCreateRequest creates the Create request. -func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCreateOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "PageBlob") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*pageBlobClientCreateOptions.Tier)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Metadata != nil { - for k, v := range pageBlobClientCreateOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobSequenceNumber != nil { - req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientCreateOptions.BlobSequenceNumber, 10)) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCreateOptions.RequestID) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *pageBlobClientCreateOptions.BlobTagsString) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", pageBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*pageBlobClientCreateOptions.ImmutabilityPolicyMode)) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*pageBlobClientCreateOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// createHandleResponse handles the Create response. -func (client *pageBlobClient) createHandleResponse(resp *http.Response) (pageBlobClientCreateResponse, error) { - result := pageBlobClientCreateResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// GetPageRanges - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page -// blob -// If the operation fails it returns an *azcore.ResponseError type. -// pageBlobClientGetPageRangesOptions - pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) GetPageRanges(pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesPager { - return &pageBlobClientGetPageRangesPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.getPageRangesCreateRequest(ctx, pageBlobClientGetPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) - }, - advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) - }, - } -} - -// getPageRangesCreateRequest creates the GetPageRanges request. -func (client *pageBlobClient) getPageRangesCreateRequest(ctx context.Context, pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "pagelist") - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Snapshot != nil { - reqQP.Set("snapshot", *pageBlobClientGetPageRangesOptions.Snapshot) - } - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Timeout), 10)) - } - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Marker != nil { - reqQP.Set("marker", *pageBlobClientGetPageRangesOptions.Marker) - } - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Maxresults), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// getPageRangesHandleResponse handles the GetPageRanges response. -func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesResponse, error) { - result := pageBlobClientGetPageRangesResponse{RawResponse: resp} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - result.Date = &date - } - if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - return result, nil -} - -// GetPageRangesDiff - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were -// changed between target blob and previous snapshot. -// If the operation fails it returns an *azcore.ResponseError type. -// pageBlobClientGetPageRangesDiffOptions - pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the -// pageBlobClient.GetPageRangesDiff method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) GetPageRangesDiff(pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesDiffPager { - return &pageBlobClientGetPageRangesDiffPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.getPageRangesDiffCreateRequest(ctx, pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) - }, - advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) - }, - } -} - -// getPageRangesDiffCreateRequest creates the GetPageRangesDiff request. -func (client *pageBlobClient) getPageRangesDiffCreateRequest(ctx context.Context, pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "pagelist") - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Snapshot != nil { - reqQP.Set("snapshot", *pageBlobClientGetPageRangesDiffOptions.Snapshot) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Timeout), 10)) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Prevsnapshot != nil { - reqQP.Set("prevsnapshot", *pageBlobClientGetPageRangesDiffOptions.Prevsnapshot) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Marker != nil { - reqQP.Set("marker", *pageBlobClientGetPageRangesDiffOptions.Marker) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Maxresults), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL != nil { - req.Raw().Header.Set("x-ms-previous-snapshot-url", *pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesDiffOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesDiffOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// getPageRangesDiffHandleResponse handles the GetPageRangesDiff response. -func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesDiffResponse, error) { - result := pageBlobClientGetPageRangesDiffResponse{RawResponse: resp} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - result.Date = &date - } - if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - return result, nil -} - -// Resize - Resize the Blob -// If the operation fails it returns an *azcore.ResponseError type. -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// pageBlobClientResizeOptions - pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) Resize(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientResizeResponse, error) { - req, err := client.resizeCreateRequest(ctx, blobContentLength, pageBlobClientResizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return pageBlobClientResizeResponse{}, runtime.NewResponseError(resp) - } - return client.resizeHandleResponse(resp) -} - -// resizeCreateRequest creates the Resize request. -func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "properties") - if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientResizeOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientResizeOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// resizeHandleResponse handles the Resize response. -func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (pageBlobClientResizeResponse, error) { - result := pageBlobClientResizeResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - result.Date = &date - } - return result, nil -} - -// UpdateSequenceNumber - Update the sequence number of the blob -// If the operation fails it returns an *azcore.ResponseError type. -// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to -// page blobs only. This property indicates how the service should modify the blob's sequence number -// pageBlobClientUpdateSequenceNumberOptions - pageBlobClientUpdateSequenceNumberOptions contains the optional parameters -// for the pageBlobClient.UpdateSequenceNumber method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUpdateSequenceNumberResponse, error) { - req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return pageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) - } - return client.updateSequenceNumberHandleResponse(resp) -} - -// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. -func (client *pageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "properties") - if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUpdateSequenceNumberOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) - if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber != nil { - req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber, 10)) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUpdateSequenceNumberOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. -func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (pageBlobClientUpdateSequenceNumberResponse, error) { - result := pageBlobClientUpdateSequenceNumberResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - result.Date = &date - } - return result, nil -} - -// UploadPages - The Upload Pages operation writes a range of pages to a page blob -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// body - Initial data -// pageBlobClientUploadPagesOptions - pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUploadPagesResponse, error) { - req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, pageBlobClientUploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) - } - return client.uploadPagesHandleResponse(resp) -} - -// uploadPagesCreateRequest creates the UploadPages request. -func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-page-write", "update") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentMD5)) - } - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentCRC64)) - } - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientUploadPagesOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// uploadPagesHandleResponse handles the UploadPages response. -func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (pageBlobClientUploadPagesResponse, error) { - result := pageBlobClientUploadPagesResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from -// a URL -// If the operation fails it returns an *azcore.ResponseError type. -// sourceURL - Specify a URL to the copy source. -// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header -// and x-ms-range/Range destination range header. -// contentLength - The length of the request. -// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end -// is required. -// pageBlobClientUploadPagesFromURLOptions - pageBlobClientUploadPagesFromURLOptions contains the optional parameters for -// the pageBlobClient.UploadPagesFromURL method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (pageBlobClientUploadPagesFromURLResponse, error) { - req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, pageBlobClientUploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.uploadPagesFromURLHandleResponse(resp) -} - -// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. -func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-page-write", "update") - req.Raw().Header.Set("x-ms-copy-source", sourceURL) - req.Raw().Header.Set("x-ms-source-range", sourceRange) - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentMD5)) - } - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64 != nil { - req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("x-ms-range", rangeParam) - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesFromURLOptions.RequestID) - } - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. -func (client *pageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (pageBlobClientUploadPagesFromURLResponse, error) { - result := pageBlobClientUploadPagesFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go deleted file mode 100644 index 9f0cc4629fd..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go +++ /dev/null @@ -1,287 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "net/http" - "reflect" -) - -// containerClientListBlobFlatSegmentPager provides operations for iterating over paged responses. -type containerClientListBlobFlatSegmentPager struct { - client *containerClient - current containerClientListBlobFlatSegmentResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, containerClientListBlobFlatSegmentResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *containerClientListBlobFlatSegmentPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *containerClientListBlobFlatSegmentPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.ListBlobsFlatSegmentResponse.NextMarker == nil || len(*p.current.ListBlobsFlatSegmentResponse.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.listBlobFlatSegmentHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current containerClientListBlobFlatSegmentResponse page. -func (p *containerClientListBlobFlatSegmentPager) PageResponse() containerClientListBlobFlatSegmentResponse { - return p.current -} - -// containerClientListBlobHierarchySegmentPager provides operations for iterating over paged responses. -type containerClientListBlobHierarchySegmentPager struct { - client *containerClient - current containerClientListBlobHierarchySegmentResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *containerClientListBlobHierarchySegmentPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *containerClientListBlobHierarchySegmentPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.ListBlobsHierarchySegmentResponse.NextMarker == nil || len(*p.current.ListBlobsHierarchySegmentResponse.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.listBlobHierarchySegmentHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current containerClientListBlobHierarchySegmentResponse page. -func (p *containerClientListBlobHierarchySegmentPager) PageResponse() containerClientListBlobHierarchySegmentResponse { - return p.current -} - -// pageBlobClientGetPageRangesDiffPager provides operations for iterating over paged responses. -type pageBlobClientGetPageRangesDiffPager struct { - client *pageBlobClient - current pageBlobClientGetPageRangesDiffResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *pageBlobClientGetPageRangesDiffPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *pageBlobClientGetPageRangesDiffPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.getPageRangesDiffHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current pageBlobClientGetPageRangesDiffResponse page. -func (p *pageBlobClientGetPageRangesDiffPager) PageResponse() pageBlobClientGetPageRangesDiffResponse { - return p.current -} - -// pageBlobClientGetPageRangesPager provides operations for iterating over paged responses. -type pageBlobClientGetPageRangesPager struct { - client *pageBlobClient - current pageBlobClientGetPageRangesResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, pageBlobClientGetPageRangesResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *pageBlobClientGetPageRangesPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *pageBlobClientGetPageRangesPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.getPageRangesHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current pageBlobClientGetPageRangesResponse page. -func (p *pageBlobClientGetPageRangesPager) PageResponse() pageBlobClientGetPageRangesResponse { - return p.current -} - -// serviceClientListContainersSegmentPager provides operations for iterating over paged responses. -type serviceClientListContainersSegmentPager struct { - client *serviceClient - current serviceClientListContainersSegmentResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, serviceClientListContainersSegmentResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *serviceClientListContainersSegmentPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *serviceClientListContainersSegmentPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.ListContainersSegmentResponse.NextMarker == nil || len(*p.current.ListContainersSegmentResponse.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.listContainersSegmentHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current serviceClientListContainersSegmentResponse page. -func (p *serviceClientListContainersSegmentPager) PageResponse() serviceClientListContainersSegmentResponse { - return p.current -} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 1c2c6ae24d1..11a33de73a1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -12,7 +12,9 @@ package confidential import ( "context" "crypto" + "crypto/rsa" "crypto/x509" + "encoding/base64" "encoding/pem" "errors" "fmt" @@ -20,6 +22,7 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" @@ -60,12 +63,9 @@ type AuthResult = base.AuthResult type Account = shared.Account // CertFromPEM converts a PEM file (.pem or .key) for use with NewCredFromCert(). The file -// must have the public certificate and the private key encoded. The private key must be encoded -// in PKCS8 (not PKCS1). This is usually denoted by the section "PRIVATE KEY" (instead of PKCS1's -// "RSA PRIVATE KEY"). If a PEM block is encoded and password is not an empty string, it attempts -// to decrypt the PEM blocks using the password. This will return multiple x509 certificates, -// though this use case should have a single cert. Multiple certs are due to certificate -// chaining for use cases like TLS that sign from root to leaf. +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { var certs []*x509.Certificate var priv crypto.PrivateKey @@ -79,7 +79,7 @@ func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.P if x509.IsEncryptedPEMBlock(block) { b, err := x509.DecryptPEMBlock(block, []byte(password)) if err != nil { - return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %w", err) + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) } block, _ = pem.Decode(b) if block == nil { @@ -91,18 +91,27 @@ func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.P case "CERTIFICATE": cert, err := x509.ParseCertificate(block.Bytes) if err != nil { - return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be pared by x509: %w", err) + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) } certs = append(certs, cert) case "PRIVATE KEY": if priv != nil { - return nil, nil, fmt.Errorf("found multiple blocks labelled 'PRIVATE KEY'") + return nil, nil, errors.New("found multiple private key blocks") } var err error - priv, err = parsePrivateKey(block.Bytes) + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) if err != nil { - return nil, nil, fmt.Errorf("could not decode private key: %w", err) + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) } } pemData = rest @@ -119,15 +128,8 @@ func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.P return certs, priv, nil } -// parsePrivateKey is based on https://gist.github.com/ukautz/cd118e298bbd8f0a88fc . I don't *think* -// we need to do the extra decoding in the example, but *maybe*? -func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { - key, err := x509.ParsePKCS8PrivateKey(der) - if err != nil { - return nil, fmt.Errorf("problems decoding private key using PKCS8: %w", err) - } - return key, nil -} +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions // Credential represents the credential used in confidential client flows. type Credential struct { @@ -135,16 +137,37 @@ type Credential struct { cert *x509.Certificate key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) - assertion string + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) } // toInternal returns the accesstokens.Credential that is used internally. The current structure of the // code requires that client.go, requests.go and confidential.go share a credential type without // having import recursion. That requires the type used between is in a shared package. Therefore // we have this. -func (c Credential) toInternal() *accesstokens.Credential { - return &accesstokens.Credential{Secret: c.secret, Cert: c.cert, Key: c.key, Assertion: c.assertion} +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") } // NewCredFromSecret creates a Credential from a secret. @@ -156,17 +179,69 @@ func NewCredFromSecret(secret string) (Credential, error) { } // NewCredFromAssertion creates a Credential from a signed assertion. +// +// Deprecated: a Credential created by this function can't refresh the +// assertion when it expires. Use NewCredFromAssertionCallback instead. func NewCredFromAssertion(assertion string) (Credential, error) { if assertion == "" { return Credential{}, errors.New("assertion can't be empty string") } - return Credential{assertion: assertion}, nil + return NewCredFromAssertionCallback(func(context.Context, AssertionRequestOptions) (string, error) { return assertion, nil }), nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} } -// NewCredFromCert creates a Credential from an x509.Certificate and a PKCS8 DER encoded private key. -// CertFromPEM() can be used to get these values from a PEM file storing a PKCS8 private key. +// NewCredFromCert creates a Credential from an x509.Certificate and an RSA private key. +// CertFromPEM() can be used to get these values from a PEM file. func NewCredFromCert(cert *x509.Certificate, key crypto.PrivateKey) Credential { - return Credential{cert: cert, key: key} + cred, _ := NewCredFromCertChain([]*x509.Certificate{cert}, key) + return cred +} + +// NewCredFromCertChain creates a Credential from a chain of x509.Certificates and an RSA private key +// as returned by CertFromPEM(). +func NewCredFromCertChain(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} } // AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. @@ -274,6 +349,11 @@ func WithAzureRegion(val string) Option { // will store credentials for (a Client is per user). clientID is the Azure clientID and cred is // the type of credential to use. func New(clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + opts := Options{ Authority: base.AuthorityPublicCloud, HTTPClient: shared.DefaultClient, @@ -286,15 +366,28 @@ func New(clientID string, cred Credential, options ...Option) (Client, error) { return Client{}, err } - base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithX5C(opts.SendX5C), base.WithCacheAccessor(opts.Accessor), base.WithRegionDetection(opts.AzureRegion)) + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.Accessor), + base.WithRegionDetection(opts.AzureRegion), + base.WithX5C(opts.SendX5C), + } + if cred.tokenProvider != nil { + // The caller will handle all details of authentication, using Client only as a token cache. + // Declaring the authority host known prevents unnecessary metadata discovery requests. (The + // authority is irrelevant to Client and friends because the token provider is responsible + // for authentication.) + parsed, err := url.Parse(opts.Authority) + if err != nil { + return Client{}, errors.New("invalid authority") + } + baseOpts = append(baseOpts, base.WithKnownAuthorityHosts([]string{parsed.Hostname()})) + } + base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), baseOpts...) if err != nil { return Client{}, err } - return Client{ - base: base, - cred: cred.toInternal(), - }, nil + return Client{base: base, cred: internalCred}, nil } // UserID is the unique user identifier this client if for. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go index c8adf3da239..c9b8dbed088 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "reflect" "strings" @@ -21,7 +20,7 @@ var prettyConf = &pretty.Config{ TrackCycles: true, Formatter: map[reflect.Type]interface{}{ reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return "could not read io.Reader content" } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index ac291643361..a86f06400ba 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -37,9 +37,9 @@ type manager interface { } // partitionedManager provides an internal cache. It is defined to allow faking the cache in tests. -// In all production use it is a *storage.Manager. +// In all production use it is a *storage.PartitionedManager. type partitionedManager interface { - Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (storage.TokenResponse, error) + Read(ctx context.Context, authParameters authority.AuthParams) (storage.TokenResponse, error) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) } @@ -147,6 +147,15 @@ func WithCacheAccessor(ca cache.ExportReplace) Option { } } +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + } +} + // WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. func WithX5C(sendX5C bool) Option { return func(c *Client) { @@ -230,7 +239,7 @@ func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, s func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and authParams is not a pointer. authParams.Scopes = silent.Scopes - authParams.HomeaccountID = silent.Account.HomeAccountID + authParams.HomeAccountID = silent.Account.HomeAccountID authParams.AuthorizationType = silent.AuthorizationType authParams.UserAssertion = silent.UserAssertion @@ -242,7 +251,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen b.cacheAccessor.Replace(s, suggestedCacheKey) defer b.cacheAccessor.Export(s, suggestedCacheKey) } - storageTokenResponse, err = b.pmanager.Read(ctx, authParams, silent.Account) + storageTokenResponse, err = b.pmanager.Read(ctx, authParams) if err != nil { return AuthResult{}, err } @@ -262,7 +271,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen result, err := AuthResultFromStorage(storageTokenResponse) if err != nil { if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { - return AuthResult{}, errors.New("no refresh token found") + return AuthResult{}, errors.New("no token found") } var cc *accesstokens.Credential @@ -270,7 +279,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen cc = silent.Credential } - token, err := b.Token.Refresh(ctx, silent.RequestType, b.AuthParams, cc, storageTokenResponse.RefreshToken) + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) if err != nil { return AuthResult{}, err } @@ -376,7 +385,7 @@ func (b Client) AllAccounts() []shared.Account { func (b Client) Account(homeAccountID string) shared.Account { authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. authParams.AuthorizationType = authority.AccountByID - authParams.HomeaccountID = homeAccountID + authParams.HomeAccountID = homeAccountID if s, ok := b.manager.(cache.Serializer); ok { suggestedCacheKey := b.AuthParams.CacheKey(false) b.cacheAccessor.Replace(s, suggestedCacheKey) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go index 1e8d9a8414f..548c2faebf9 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -103,8 +103,14 @@ func (a AccessToken) Key() string { ) } +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + // Validate validates that this AccessToken can be used. func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } if a.CachedAt.T.After(time.Now()) { return errors.New("access token isn't valid, it was cached at a future time") } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go index 4c7c1f1b553..d17e7c034a4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -36,7 +36,7 @@ func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { } // Read reads a storage token from the cache if it exists. -func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) { +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes @@ -69,7 +69,7 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. return TokenResponse{}, err } - account, err = m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID) + account, err := m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID) if err != nil { return TokenResponse{}, err } @@ -83,8 +83,8 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. // Write writes a token response to the cache and returns the account information the token is stored with. func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { - authParameters.HomeaccountID = tokenResponse.ClientInfo.HomeAccountID() - homeAccountID := authParameters.HomeaccountID + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go index 881bd7c684e..b759408b5b4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -84,20 +84,22 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool { // Read reads a storage token from the cache if it exists. func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) { - homeAccountID := authParameters.HomeaccountID + homeAccountID := authParameters.HomeAccountID realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes - metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) - if err != nil { - return TokenResponse{}, err + // fetch metadata if and only if the authority isn't explicitly trusted + aliases := authParameters.KnownAuthorityHosts + if len(aliases) == 0 { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases } - accessToken, err := m.readAccessToken(homeAccountID, metadata.Aliases, realm, clientID, scopes) - if err != nil { - return TokenResponse{}, err - } + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) if account.IsZero() { return TokenResponse{ @@ -107,22 +109,22 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, Account: shared.Account{}, }, nil } - idToken, err := m.readIDToken(homeAccountID, metadata.Aliases, realm, clientID) + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) if err != nil { return TokenResponse{}, err } - AppMetaData, err := m.readAppMetaData(metadata.Aliases, clientID) + AppMetaData, err := m.readAppMetaData(aliases, clientID) if err != nil { return TokenResponse{}, err } familyID := AppMetaData.FamilyID - refreshToken, err := m.readRefreshToken(homeAccountID, metadata.Aliases, familyID, clientID) + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) if err != nil { return TokenResponse{}, err } - account, err = m.readAccount(homeAccountID, metadata.Aliases, realm) + account, err = m.readAccount(homeAccountID, aliases, realm) if err != nil { return TokenResponse{}, err } @@ -138,8 +140,8 @@ const scopeSeparator = " " // Write writes a token response to the cache and returns the account information the token is stored with. func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { - authParameters.HomeaccountID = tokenResponse.ClientInfo.HomeAccountID() - homeAccountID := authParameters.HomeaccountID + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID @@ -249,7 +251,7 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) return m.aadCache[authorityInfo.Host], nil } -func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) (AccessToken, error) { +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { m.contractMu.RLock() defer m.contractMu.RUnlock() // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. @@ -259,12 +261,12 @@ func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, cli if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { if checkAlias(at.Environment, envAliases) { if isMatchingScopes(scopes, at.Scopes) { - return at, nil + return at } } } } - return AccessToken{}, fmt.Errorf("access token not found") + return AccessToken{} } func (m *Manager) writeAccessToken(accessToken AccessToken) error { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 00000000000..7b673e3fe12 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 83bd60d419b..2238521f5f9 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -52,7 +52,7 @@ func Marshal(i interface{}) ([]byte, error) { if v.Kind() != reflect.Ptr && v.CanAddr() { v = v.Addr() } - err := marshalStruct(reflect.ValueOf(i), &buff, enc) + err := marshalStruct(v, &buff, enc) if err != nil { return nil, err } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index 41f4373fa58..04236ff3127 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -11,6 +11,7 @@ import ( "net/http" "strconv" "strings" + "time" ) var okPage = []byte(` @@ -84,7 +85,7 @@ func New(reqState string, port int) (*Server, error) { serv := &Server{ Addr: fmt.Sprintf("http://localhost:%s", portStr), - s: &http.Server{Addr: "localhost:0"}, + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, reqState: reqState, resultCh: make(chan Result, 1), } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index 825f55fb732..6b4016c1166 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -7,15 +7,18 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" ) // ResolveEndpointer contains the methods for resolving authority endpoints. @@ -92,6 +95,27 @@ func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) // Credential acquires a token from the authority using a client credentials grant. func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + CorrelationID: uuid.New().String(), + Scopes: scopes, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { return accesstokens.TokenResponse{}, err } @@ -99,7 +123,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams if cred.Secret != "" { return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) } - jwt, err := cred.JWT(authParams) + jwt, err := cred.JWT(ctx, authParams) if err != nil { return accesstokens.TokenResponse{}, err } @@ -116,7 +140,7 @@ func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) } - jwt, err := cred.JWT(authParams) + jwt, err := cred.JWT(ctx, authParams) if err != nil { return accesstokens.TokenResponse{}, err } @@ -233,7 +257,7 @@ func isWaitDeviceCodeErr(err error) bool { } var dCErr deviceCodeError defer c.Resp.Body.Close() - body, err := ioutil.ReadAll(c.Resp.Body) + body, err := io.ReadAll(c.Resp.Body) if err != nil { return false } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go index 65831b0b7a2..eaeb2ef5f08 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -19,17 +19,18 @@ import ( "crypto/sha1" "crypto/x509" "encoding/base64" + "encoding/json" "fmt" "net/url" "strconv" "strings" - "sync" "time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" - "github.com/golang-jwt/jwt" + "github.com/golang-jwt/jwt/v4" "github.com/google/uuid" ) @@ -43,9 +44,6 @@ const ( password = "password" ) -// assertionLifetime allows tests to control the expiration time of JWT assertions created by Credential. -var assertionLifetime = 10 * time.Minute - //go:generate stringer -type=AppType // AppType is whether the authorization code flow is for a public or confidential client. @@ -90,44 +88,37 @@ type Credential struct { // Secret contains the credential secret if we are doing auth by secret. Secret string - // Cert is the public x509 certificate if we are doing any auth other than secret. + // Cert is the public certificate, if we're authenticating by certificate. Cert *x509.Certificate - // Key is the private key for signing if we are doing any auth other than secret. + // Key is the private key for signing, if we're authenticating by certificate. Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) - // mu protects everything below. - mu sync.Mutex - // Assertion is the signed JWT assertion if we have retrieved it or if it was passed. - Assertion string - // Expires is when the Assertion expires. Public to allow faking in tests. - // Any use outside msal is not supported by a compatibility promise. - Expires time.Time + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) } // JWT gets the jwt assertion when the credential is not using a secret. -func (c *Credential) JWT(authParams authority.AuthParams) (string, error) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.Expires.After(time.Now()) { - return c.Assertion, nil - } else if c.Cert == nil || c.Key == nil { - // The assertion has expired and this Credential can't generate a new one. The assertion - // was presumably provided by the application via confidential.NewCredFromAssertion(). We - // return it despite its expiration to maintain the behavior of previous versions, and - // because there's no API enabling the application to replace the assertion - // (see https://github.com/AzureAD/microsoft-authentication-library-for-go/issues/292). - return c.Assertion, nil +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) } - expires := time.Now().Add(assertionLifetime) - token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ "aud": authParams.Endpoints.TokenEndpoint, - "exp": strconv.FormatInt(expires.Unix(), 10), + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), "iss": authParams.ClientID, "jti": uuid.New().String(), - "nbf": strconv.FormatInt(time.Now().Unix(), 10), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), "sub": authParams.ClientID, }) token.Header = map[string]interface{}{ @@ -137,16 +128,14 @@ func (c *Credential) JWT(authParams authority.AuthParams) (string, error) { } if authParams.SendX5C { - token.Header["x5c"] = []string{base64.StdEncoding.EncodeToString(c.Cert.Raw)} + token.Header["x5c"] = c.X5c } - var err error - c.Assertion, err = token.SignedString(c.Key) + + assertion, err := token.SignedString(c.Key) if err != nil { return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) } - - c.Expires = expires - return c.Assertion, nil + return assertion, nil } // thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. @@ -213,7 +202,7 @@ func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenRes if req.Credential == nil { return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") } - qv, err = prepURLVals(req.Credential, req.AuthParams) + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) if err != nil { return TokenResponse{}, err } @@ -239,7 +228,7 @@ func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParam qv := url.Values{} if appType == ATConfidential { var err error - qv, err = prepURLVals(cc, authParams) + qv, err = prepURLVals(ctx, cc, authParams) if err != nil { return TokenResponse{}, err } @@ -374,14 +363,14 @@ func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams // prepURLVals returns an url.Values that sets various key/values if we are doing secrets // or JWT assertions. -func prepURLVals(cc *Credential, authParams authority.AuthParams) (url.Values, error) { +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { params := url.Values{} if cc.Secret != "" { params.Set("client_secret", cc.Secret) return params, nil } - jwt, err := cc.JWT(authParams) + jwt, err := cc.JWT(ctx, authParams) if err != nil { return nil, err } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index dcc6c53c206..4724d944ff8 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -9,7 +9,7 @@ import ( "encoding/base64" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -22,7 +22,7 @@ import ( const ( authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" - TenantDiscoveryEndpointWithRegion = "https://%v.r.%v/%v/v2.0/.well-known/openid-configuration" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" regionName = "REGION_NAME" defaultAPIVersion = "2021-10-01" imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion @@ -133,7 +133,7 @@ type AuthParams struct { ClientID string // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). Redirecturi string - HomeaccountID string + HomeAccountID string // Username is the user-name portion for username/password auth flow. Username string // Password is the password portion for username/password auth flow. @@ -156,6 +156,9 @@ type AuthParams struct { SendX5C bool // UserAssertion is the access token used to acquire token on behalf of user UserAssertion string + + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string } // NewAuthParams creates an authorization parameters object. @@ -243,7 +246,7 @@ const ( Managed UserRealmAccountType = "Managed" ) -//UserRealm is used for the username password request to determine user type +// UserRealm is used for the username password request to determine user type type UserRealm struct { AccountType UserRealmAccountType `json:"account_type"` DomainName string `json:"domain_name"` @@ -332,7 +335,12 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I region = detectRegion(ctx) } if region != "" { - resp.TenantDiscoveryEndpoint = fmt.Sprintf(TenantDiscoveryEndpointWithRegion, region, authorityInfo.Host, authorityInfo.Tenant) + environment := authorityInfo.Host + switch environment { + case "login.microsoft.com", "login.windows.net", "sts.windows.net", defaultHost: + environment = "r." + defaultHost + } + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) metadata := InstanceDiscoveryMetadata{ PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), PreferredCache: authorityInfo.Host, @@ -378,7 +386,7 @@ func detectRegion(ctx context.Context) string { } } defer resp.Body.Close() - response, err := ioutil.ReadAll(resp.Body) + response, err := io.ReadAll(resp.Body) if err != nil { return "" } @@ -393,7 +401,7 @@ func (a *AuthParams) CacheKey(isAppCache bool) string { return a.AppKey() } if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { - return a.HomeaccountID + return a.HomeAccountID } return "" } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index 0620b3e4134..7d9ec7cd374 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -11,7 +11,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "net/url" "reflect" @@ -89,7 +88,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea if err != nil { return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) } - req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + req.Body = io.NopCloser(bytes.NewBuffer(data)) req.Method = http.MethodPost } @@ -163,7 +162,7 @@ func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, b if len(body) > 0 { req.Method = http.MethodPost - req.Body = ioutil.NopCloser(strings.NewReader(body)) + req.Body = io.NopCloser(strings.NewReader(body)) } data, err := c.do(ctx, req) @@ -201,9 +200,9 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values URL: u, Header: headers, ContentLength: int64(len(enc)), - Body: ioutil.NopCloser(strings.NewReader(enc)), + Body: io.NopCloser(strings.NewReader(enc)), GetBody: func() (io.ReadCloser, error) { - return ioutil.NopCloser(strings.NewReader(enc)), nil + return io.NopCloser(strings.NewReader(enc)), nil }, } @@ -248,7 +247,7 @@ func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { if err != nil { return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) } - reply.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + reply.Body = io.NopCloser(bytes.NewBuffer(data)) // NOTE: This doesn't happen immediately after the call so that we can get an error message // from the server and include it in our error. @@ -297,7 +296,7 @@ func (c *Client) readBody(resp *http.Response) ([]byte, error) { default: return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) } - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } var testID string diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go index 01060ac6210..1f9c543fa3b 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -6,6 +6,7 @@ Package ops provides operations to various backend services using REST clients. The REST type provides several clients that can be used to communicate to backends. Usage is simple: + rest := ops.New() // Creates an authority client and calls the UserRealm() method. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go index e50d5e97f15..5e1ea912912 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -5,4 +5,4 @@ package version // Version is the version of this client package that is communicated to the server. -const Version = "0.5.1" +const Version = "0.7.0" diff --git a/vendor/github.com/efficientgo/tools/extkingpin/LICENSE b/vendor/github.com/efficientgo/tools/extkingpin/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/efficientgo/tools/extkingpin/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/efficientgo/tools/extkingpin/doc.go b/vendor/github.com/efficientgo/tools/extkingpin/doc.go new file mode 100644 index 00000000000..f07082b7a7a --- /dev/null +++ b/vendor/github.com/efficientgo/tools/extkingpin/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +package extkingpin + +// PathOrContent is a flag type that defines two flags to fetch bytes. Either from file (*-file flag) or content (* flag). +// Also returns content of YAML file with substituted environment variables. +// Follows K8s convention, i.e $(...), as mentioned here https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/. + +// RegisterPathOrContent registers PathOrContent flag in kingpinCmdClause. + +// Content returns the content of the file when given or directly the content that has been passed to the flag. +// It returns an error when: +// * The file and content flags are both not empty. +// * The file flag is not empty but the file can't be read. +// * The content is empty and the flag has been defined as required. + +// Option is a functional option type for PathOrContent objects. +// WithRequired allows you to override default required option. +// WithEnvSubstitution allows you to override default envSubstitution option. diff --git a/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go b/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go new file mode 100644 index 00000000000..0a4b5ff17c7 --- /dev/null +++ b/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go @@ -0,0 +1,143 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Taken from Thanos project. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. +package extkingpin + +import ( + "fmt" + "io/ioutil" + "os" + "regexp" + + "github.com/pkg/errors" + "gopkg.in/alecthomas/kingpin.v2" +) + +// PathOrContent is a flag type that defines two flags to fetch bytes. Either from file (*-file flag) or content (* flag). +type PathOrContent struct { + flagName string + + envSubstitution bool + required bool + hidden bool + + path *string + content *string +} + +// Option is a functional option type for PathOrContent objects. +type Option func(*PathOrContent) + +type FlagClause interface { + Flag(name, help string) *kingpin.FlagClause +} + +// RegisterPathOrContent registers PathOrContent flag in kingpinCmdClause. +func RegisterPathOrContent(cmd FlagClause, flagName string, help string, opts ...Option) *PathOrContent { + fileFlagName := fmt.Sprintf("%s-file", flagName) + fileFlag := cmd.Flag(fileFlagName, fmt.Sprintf("Path to %s", help)).PlaceHolder("") + contentFlag := cmd.Flag(flagName, fmt.Sprintf("Alternative to '%s' flag (mutually exclusive). Content of %s", fileFlagName, help)).PlaceHolder("") + + p := &PathOrContent{ + flagName: flagName, + } + + for _, opt := range opts { + opt(p) + } + + if p.hidden { + fileFlag = fileFlag.Hidden() + contentFlag = contentFlag.Hidden() + } + + p.path = fileFlag.String() + p.content = contentFlag.String() + return p +} + +// Content returns the content of the file when given or directly the content that has been passed to the flag. +// It returns an error when: +// * The file and content flags are both not empty. +// * The file flag is not empty but the file can't be read. +// * The content is empty and the flag has been defined as required. +func (p *PathOrContent) Content() ([]byte, error) { + fileFlagName := fmt.Sprintf("%s-file", p.flagName) + + if len(*p.path) > 0 && len(*p.content) > 0 { + return nil, errors.Errorf("both %s and %s flags set.", fileFlagName, p.flagName) + } + + var content []byte + if len(*p.path) > 0 { + c, err := ioutil.ReadFile(*p.path) + if err != nil { + return nil, errors.Wrapf(err, "loading file %s for %s", *p.path, fileFlagName) + } + content = c + } else { + content = []byte(*p.content) + } + + if len(content) == 0 && p.required { + return nil, errors.Errorf("flag %s or %s is required for running this command and content cannot be empty.", fileFlagName, p.flagName) + } + if p.envSubstitution { + replace, err := expandEnv(content) + if err != nil { + return nil, err + } + content = replace + } + return content, nil +} + +// Path returns the file's path. +func (p *PathOrContent) Path() string { + return *p.path +} + +// WithRequired allows you to override default required option. +func WithRequired() Option { + return func(p *PathOrContent) { + p.required = true + } +} + +// WithEnvSubstitution allows you to override default envSubstitution option. +func WithEnvSubstitution() Option { + return func(p *PathOrContent) { + p.envSubstitution = true + } +} + +// WithHidden allows you to override the default option and keeps the flag hidden. +func WithHidden() Option { + return func(p *PathOrContent) { + p.hidden = true + } +} + +// expandEnv returns content of YAML file with substituted environment variables. +// Follows K8s convention, i.e $(...), as mentioned here https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/. +func expandEnv(b []byte) (r []byte, err error) { + var envRe = regexp.MustCompile(`\$\(([a-zA-Z_0-9]+)\)`) + r = envRe.ReplaceAllFunc(b, func(n []byte) []byte { + if err != nil { + return nil + } + n = n[2 : len(n)-1] + + v, ok := os.LookupEnv(string(n)) + if !ok { + err = errors.Errorf("found reference to unset environment variable %q", n) + return nil + } + return []byte(v) + }) + return r, err +} diff --git a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md index 1a9a27bcf6e..8f349c4b8f5 100644 --- a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md +++ b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md @@ -1,48 +1,82 @@ # Changelog + All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.0] - 2023-01-30 + +[0.6.0]: https://github.com/go-logfmt/logfmt/compare/v0.5.1...v0.6.0 + +### Added + +- NewDecoderSize by [@alexanderjophus] + +## [0.5.1] - 2021-08-18 + +[0.5.1]: https://github.com/go-logfmt/logfmt/compare/v0.5.0...v0.5.1 + +### Changed + +- Update the `go.mod` file for Go 1.17 as described in the [Go 1.17 release + notes](https://golang.org/doc/go1.17#go-command) + ## [0.5.0] - 2020-01-03 +[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0 + ### Changed + - Remove the dependency on github.com/kr/logfmt by [@ChrisHines] - Move fuzz code to github.com/go-logfmt/fuzzlogfmt by [@ChrisHines] ## [0.4.0] - 2018-11-21 +[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0 + ### Added + - Go module support by [@ChrisHines] - CHANGELOG by [@ChrisHines] ### Changed + - Drop invalid runes from keys instead of returning ErrInvalidKey by [@ChrisHines] - On panic while printing, attempt to print panic value by [@bboreham] ## [0.3.0] - 2016-11-15 + +[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0 + ### Added + - Pool buffers for quoted strings and byte slices by [@nussjustin] + ### Fixed + - Fuzz fix, quote invalid UTF-8 values by [@judwhite] ## [0.2.0] - 2016-05-08 + +[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0 + ### Added + - Encoder.EncodeKeyvals by [@ChrisHines] ## [0.1.0] - 2016-03-28 + +[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0 + ### Added + - Encoder by [@ChrisHines] - Decoder by [@ChrisHines] - MarshalKeyvals by [@ChrisHines] -[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0 -[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0 -[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0 -[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0 -[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0 - [@ChrisHines]: https://github.com/ChrisHines [@bboreham]: https://github.com/bboreham [@judwhite]: https://github.com/judwhite [@nussjustin]: https://github.com/nussjustin +[@alexanderjophus]: https://github.com/alexanderjophus diff --git a/vendor/github.com/go-logfmt/logfmt/README.md b/vendor/github.com/go-logfmt/logfmt/README.md index 8e48fcd3ab7..71c57944e23 100644 --- a/vendor/github.com/go-logfmt/logfmt/README.md +++ b/vendor/github.com/go-logfmt/logfmt/README.md @@ -1,20 +1,25 @@ +# logfmt + [![Go Reference](https://pkg.go.dev/badge/github.com/go-logfmt/logfmt.svg)](https://pkg.go.dev/github.com/go-logfmt/logfmt) [![Go Report Card](https://goreportcard.com/badge/go-logfmt/logfmt)](https://goreportcard.com/report/go-logfmt/logfmt) [![Github Actions](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml/badge.svg)](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml) -[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=master) - -# logfmt +[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=main) Package logfmt implements utilities to marshal and unmarshal data in the [logfmt -format](https://brandur.org/logfmt). It provides an API similar to -[encoding/json](http://golang.org/pkg/encoding/json/) and -[encoding/xml](http://golang.org/pkg/encoding/xml/). +format][fmt]. It provides an API similar to [encoding/json][json] and +[encoding/xml][xml]. + +[fmt]: https://brandur.org/logfmt +[json]: https://pkg.go.dev/encoding/json +[xml]: https://pkg.go.dev/encoding/xml The logfmt format was first documented by Brandur Leach in [this -article](https://brandur.org/logfmt). The format has not been formally -standardized. The most authoritative public specification to date has been the -documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt) -written by Blake Mizerany and Keith Rarick. +article][origin]. The format has not been formally standardized. The most +authoritative public specification to date has been the documentation of a Go +Language [package][parser] written by Blake Mizerany and Keith Rarick. + +[origin]: https://brandur.org/logfmt +[parser]: https://pkg.go.dev/github.com/kr/logfmt ## Goals @@ -30,4 +35,7 @@ standard as a goal. ## Versioning -Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'. +This project publishes releases according to the Go language guidelines for +[developing and publishing modules][pub]. + +[pub]: https://go.dev/doc/modules/developing diff --git a/vendor/github.com/go-logfmt/logfmt/decode.go b/vendor/github.com/go-logfmt/logfmt/decode.go index 2013708e485..a1c22dcbda9 100644 --- a/vendor/github.com/go-logfmt/logfmt/decode.go +++ b/vendor/github.com/go-logfmt/logfmt/decode.go @@ -29,6 +29,23 @@ func NewDecoder(r io.Reader) *Decoder { return dec } +// NewDecoderSize returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r beyond +// the logfmt records requested. +// The size argument specifies the size of the initial buffer that the +// Decoder will use to read records from r. +// If a log line is longer than the size argument, the Decoder will return +// a bufio.ErrTooLong error. +func NewDecoderSize(r io.Reader, size int) *Decoder { + scanner := bufio.NewScanner(r) + scanner.Buffer(make([]byte, 0, size), size) + dec := &Decoder{ + s: scanner, + } + return dec +} + // ScanRecord advances the Decoder to the next record, which can then be // parsed with the ScanKeyval method. It returns false when decoding stops, // either by reaching the end of the input or an error. After ScanRecord diff --git a/vendor/github.com/golang-jwt/jwt/.travis.yml b/vendor/github.com/golang-jwt/jwt/.travis.yml deleted file mode 100644 index 036a862f87f..00000000000 --- a/vendor/github.com/golang-jwt/jwt/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -script: - - go vet ./... - - go test -v ./... - -go: - - 1.7 - - 1.8 - - 1.9 - - 1.10 diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md deleted file mode 100644 index c4efbd2a8c5..00000000000 --- a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md +++ /dev/null @@ -1,22 +0,0 @@ -## Migration Guide (v3.2.1) - -Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. - -### go.mod replacement - -In a first step, the easiest way is to use `go mod edit` to issue a replacement. - -``` -go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible -go mod tidy -``` - -This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work. - -### Cleanup - -If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed. - -## Older releases (before v3.2.0) - -The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. \ No newline at end of file diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go deleted file mode 100644 index f1dba3cb916..00000000000 --- a/vendor/github.com/golang-jwt/jwt/claims.go +++ /dev/null @@ -1,146 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if !c.VerifyExpiresAt(now, false) { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if !c.VerifyIssuedAt(now, false) { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if !c.VerifyNotBefore(now, false) { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud([]string{c.Audience}, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud []string, cmp string, required bool) bool { - if len(aud) == 0 { - return !required - } - // use a var here to keep constant time compare when looping over a number of claims - result := false - - var stringClaims string - for _, a := range aud { - if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { - result = true - } - stringClaims = stringClaims + a - } - - // case where "" is sent in one or many aud claims - if len(stringClaims) == 0 { - return !required - } - - return result -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/golang-jwt/jwt/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go deleted file mode 100644 index 1c93024aad2..00000000000 --- a/vendor/github.com/golang-jwt/jwt/errors.go +++ /dev/null @@ -1,59 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } -} - -// No errors -func (e *ValidationError) valid() bool { - return e.Errors == 0 -} diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go deleted file mode 100644 index 99868d29b9f..00000000000 --- a/vendor/github.com/golang-jwt/jwt/token.go +++ /dev/null @@ -1,108 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/golang-jwt/jwt/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore similarity index 100% rename from vendor/github.com/golang-jwt/jwt/.gitignore rename to vendor/github.com/golang-jwt/jwt/v4/.gitignore diff --git a/vendor/github.com/golang-jwt/jwt/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE similarity index 100% rename from vendor/github.com/golang-jwt/jwt/LICENSE rename to vendor/github.com/golang-jwt/jwt/v4/LICENSE diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..32966f59818 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v4.0.0) + +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: + + "github.com/golang-jwt/jwt/v4" + +The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as +`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having +troubles migrating, please open an issue. + +You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. + +And then you'd typically run: + +``` +go get github.com/golang-jwt/jwt/v4 +go mod tidy +``` + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md similarity index 61% rename from vendor/github.com/golang-jwt/jwt/README.md rename to vendor/github.com/golang-jwt/jwt/v4/README.md index 13c31c09b6f..30f2f2a6f70 100644 --- a/vendor/github.com/golang-jwt/jwt/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -1,18 +1,27 @@ # jwt-go [![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) -[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). -**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. +See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. -Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. +> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. -**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. **SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + ## What the heck is a JWT? JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. @@ -27,23 +36,45 @@ The part in the middle is the interesting bit. It's called the Claims and conta This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. +## Installation Guidelines + +1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v4 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v4" +``` + ## Examples -See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: -* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) -* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) -* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples) ## Extensions -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. +This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. + +A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. -Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | + +*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers ## Compliance -This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: * In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. @@ -53,10 +84,8 @@ This library is considered production ready. Feedback and feature requests are This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning. - **BREAKING CHANGES:*** -* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. +A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. ## Usage Tips @@ -67,7 +96,7 @@ A token is simply a JSON object that is signed by its author. this tells you exa * The author of the token was in the possession of the signing secret * The data has not been modified since it was signed -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard. ### Choosing a Signing Method @@ -81,9 +110,10 @@ Asymmetric signing methods, such as RSA, use different keys for signing and veri Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: -* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation ### JWT and OAuth @@ -101,6 +131,8 @@ This library uses descriptive error messages whenever possible. If you are not g ## More -Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. + +[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md new file mode 100644 index 00000000000..b08402c3427 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md similarity index 89% rename from vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md rename to vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md index dac737bcaa4..afbfc4e408d 100644 --- a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md +++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md @@ -1,5 +1,16 @@ ## `jwt-go` Version History +#### 4.0.0 + +* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. + +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + #### 3.2.1 * **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go new file mode 100644 index 00000000000..364cec8773c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -0,0 +1,269 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// Claims must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical usecase +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c RegisteredClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := now.Sub(c.ExpiresAt.Time) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { + if c.ExpiresAt == nil { + return verifyExp(nil, cmp, req) + } + + return verifyExp(&c.ExpiresAt.Time, cmp, req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { + if c.IssuedAt == nil { + return verifyIat(nil, cmp, req) + } + + return verifyIat(&c.IssuedAt.Time, cmp, req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { + if c.NotBefore == nil { + return verifyNbf(nil, cmp, req) + } + + return verifyNbf(&c.NotBefore.Time, cmp, req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// StandardClaims are a structured version of the JWT Claims Set, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the +// specification exactly, since they were based on an earlier draft of the +// specification and not updated. The main difference is that they only +// support integer-based date fields and singular audiences. This might lead to +// incompatibilities with other JWT implementations. The use of this is discouraged, instead +// the newer RegisteredClaims struct should be used. +// +// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + if c.ExpiresAt == 0 { + return verifyExp(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.ExpiresAt, 0) + return verifyExp(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + if c.IssuedAt == 0 { + return verifyIat(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.IssuedAt, 0) + return verifyIat(&t, time.Unix(cmp, 0), req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + if c.NotBefore == 0 { + return verifyNbf(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.NotBefore, 0) + return verifyNbf(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp *time.Time, now time.Time, required bool) bool { + if exp == nil { + return !required + } + return now.Before(*exp) +} + +func verifyIat(iat *time.Time, now time.Time, required bool) bool { + if iat == nil { + return !required + } + return now.After(*iat) || now.Equal(*iat) +} + +func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { + if nbf == nil { + return !required + } + return now.After(*nbf) || now.Equal(*nbf) +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 +} diff --git a/vendor/github.com/golang-jwt/jwt/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go similarity index 100% rename from vendor/github.com/golang-jwt/jwt/doc.go rename to vendor/github.com/golang-jwt/jwt/v4/doc.go diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go similarity index 81% rename from vendor/github.com/golang-jwt/jwt/ecdsa.go rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa.go index d310af1c7c0..eac023fc6c8 100644 --- a/vendor/github.com/golang-jwt/jwt/ecdsa.go +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go @@ -13,7 +13,7 @@ var ( ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") ) -// Implements the ECDSA family of signing methods signing methods +// SigningMethodECDSA implements the ECDSA family of signing methods. // Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification type SigningMethodECDSA struct { Name string @@ -53,7 +53,7 @@ func (m *SigningMethodECDSA) Alg() string { return m.Name } -// Implements the Verify method from SigningMethod +// Verify implements token verification for the SigningMethod. // For this verify method, key must be an ecdsa.PublicKey struct func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -95,7 +95,7 @@ func (m *SigningMethodECDSA) Verify(signingString, signature string, key interfa return ErrECDSAVerification } -// Implements the Sign method from SigningMethod +// Sign implements token signing for the SigningMethod. // For this signing method, key must be an ecdsa.PrivateKey struct func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { // Get the key @@ -128,18 +128,12 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string keyBytes += 1 } - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. return EncodeSegment(out), nil } else { diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go similarity index 81% rename from vendor/github.com/golang-jwt/jwt/ecdsa_utils.go rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go index db9f4be7d8e..5700636d35b 100644 --- a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go @@ -8,11 +8,11 @@ import ( ) var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") ) -// Parse PEM encoded Elliptic Curve Private Key Structure +// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { var err error @@ -39,7 +39,7 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { return pkey, nil } -// Parse PEM encoded PKCS1 or PKCS8 public key +// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { var err error diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go new file mode 100644 index 00000000000..07d3aacd631 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go @@ -0,0 +1,85 @@ +package jwt + +import ( + "errors" + + "crypto" + "crypto/ed25519" + "crypto/rand" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// SigningMethodEd25519 implements the EdDSA family. +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key crypto.Signer + var ok bool + + if ed25519Key, ok = key.(crypto.Signer); !ok { + return "", ErrInvalidKeyType + } + + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return "", err + } + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go new file mode 100644 index 00000000000..cdb5e68e876 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") +) + +// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go new file mode 100644 index 00000000000..10ac8835cc8 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -0,0 +1,112 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") + + ErrTokenMalformed = errors.New("token is malformed") + ErrTokenUnverifiable = errors.New("token is unverifiable") + ErrTokenSignatureInvalid = errors.New("token signature is invalid") + + ErrTokenInvalidAudience = errors.New("token has invalid audience") + ErrTokenExpired = errors.New("token is expired") + ErrTokenUsedBeforeIssued = errors.New("token used before issued") + ErrTokenInvalidIssuer = errors.New("token has invalid issuer") + ErrTokenNotValidYet = errors.New("token is not valid yet") + ErrTokenInvalidId = errors.New("token has invalid id") + ErrTokenInvalidClaims = errors.New("token has invalid claims") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// NewValidationError is a helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// ValidationError represents an error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Error is the implementation of the err interface. +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// Unwrap gives errors.Is and errors.As access to the inner error. +func (e *ValidationError) Unwrap() error { + return e.Inner +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} + +// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message +// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use +// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables. +func (e *ValidationError) Is(err error) bool { + // Check, if our inner error is a direct match + if errors.Is(errors.Unwrap(e), err) { + return true + } + + // Otherwise, we need to match using our error flags + switch err { + case ErrTokenMalformed: + return e.Errors&ValidationErrorMalformed != 0 + case ErrTokenUnverifiable: + return e.Errors&ValidationErrorUnverifiable != 0 + case ErrTokenSignatureInvalid: + return e.Errors&ValidationErrorSignatureInvalid != 0 + case ErrTokenInvalidAudience: + return e.Errors&ValidationErrorAudience != 0 + case ErrTokenExpired: + return e.Errors&ValidationErrorExpired != 0 + case ErrTokenUsedBeforeIssued: + return e.Errors&ValidationErrorIssuedAt != 0 + case ErrTokenInvalidIssuer: + return e.Errors&ValidationErrorIssuer != 0 + case ErrTokenNotValidYet: + return e.Errors&ValidationErrorNotValidYet != 0 + case ErrTokenInvalidId: + return e.Errors&ValidationErrorId != 0 + case ErrTokenInvalidClaims: + return e.Errors&ValidationErrorClaimsInvalid != 0 + } + + return false +} diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go similarity index 90% rename from vendor/github.com/golang-jwt/jwt/hmac.go rename to vendor/github.com/golang-jwt/jwt/v4/hmac.go index addbe5d4018..011f68a2744 100644 --- a/vendor/github.com/golang-jwt/jwt/hmac.go +++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go @@ -6,7 +6,7 @@ import ( "errors" ) -// Implements the HMAC-SHA family of signing methods signing methods +// SigningMethodHMAC implements the HMAC-SHA family of signing methods. // Expects key type of []byte for both signing and validation type SigningMethodHMAC struct { Name string @@ -45,7 +45,7 @@ func (m *SigningMethodHMAC) Alg() string { return m.Name } -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid. func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { // Verify the key is the right type keyBytes, ok := key.([]byte) @@ -77,7 +77,7 @@ func (m *SigningMethodHMAC) Verify(signingString, signature string, key interfac return nil } -// Implements the Sign method from SigningMethod for this signing method. +// Sign implements token signing for the SigningMethod. // Key must be []byte func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { if keyBytes, ok := key.([]byte); ok { diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go similarity index 50% rename from vendor/github.com/golang-jwt/jwt/map_claims.go rename to vendor/github.com/golang-jwt/jwt/v4/map_claims.go index ba290f429af..2700d64a0d0 100644 --- a/vendor/github.com/golang-jwt/jwt/map_claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -3,10 +3,11 @@ package jwt import ( "encoding/json" "errors" + "time" // "fmt" ) -// Claims type that uses the map[string]interface{} for JSON decoding +// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding. // This is the default claims type if you don't supply one type MapClaims map[string]interface{} @@ -31,53 +32,92 @@ func (m MapClaims) VerifyAudience(cmp string, req bool) bool { return verifyAud(aud, cmp, req) } -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - switch exp := m["exp"].(type) { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["exp"] + if !ok { + return !req + } + + switch exp := v.(type) { case float64: - return verifyExp(int64(exp), cmp, req) + if exp == 0 { + return verifyExp(nil, cmpTime, req) + } + + return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) case json.Number: - v, _ := exp.Int64() - return verifyExp(v, cmp, req) + v, _ := exp.Float64() + + return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) } - return !req + + return false } -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - switch iat := m["iat"].(type) { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["iat"] + if !ok { + return !req + } + + switch iat := v.(type) { case float64: - return verifyIat(int64(iat), cmp, req) + if iat == 0 { + return verifyIat(nil, cmpTime, req) + } + + return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) case json.Number: - v, _ := iat.Int64() - return verifyIat(v, cmp, req) + v, _ := iat.Float64() + + return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) } - return !req -} -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) + return false } -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - switch nbf := m["nbf"].(type) { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["nbf"] + if !ok { + return !req + } + + switch nbf := v.(type) { case float64: - return verifyNbf(int64(nbf), cmp, req) + if nbf == 0 { + return verifyNbf(nil, cmpTime, req) + } + + return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) case json.Number: - v, _ := nbf.Int64() - return verifyNbf(v, cmp, req) + v, _ := nbf.Float64() + + return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) } - return !req + + return false +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) } -// Validates time based claims "exp, iat, nbf". +// Valid validates time based claims "exp, iat, nbf". // There is no accounting for clock skew. // As well, if any of the above claims are not in the token, it will still // be considered a valid claim. @@ -86,16 +126,19 @@ func (m MapClaims) Valid() error { now := TimeFunc().Unix() if !m.VerifyExpiresAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenExpired vErr.Inner = errors.New("Token is expired") vErr.Errors |= ValidationErrorExpired } if !m.VerifyIssuedAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued vErr.Inner = errors.New("Token used before issued") vErr.Errors |= ValidationErrorIssuedAt } if !m.VerifyNotBefore(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenNotValidYet vErr.Inner = errors.New("Token is not valid yet") vErr.Errors |= ValidationErrorNotValidYet } diff --git a/vendor/github.com/golang-jwt/jwt/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go similarity index 94% rename from vendor/github.com/golang-jwt/jwt/none.go rename to vendor/github.com/golang-jwt/jwt/v4/none.go index f04d189d067..f19835d2078 100644 --- a/vendor/github.com/golang-jwt/jwt/none.go +++ b/vendor/github.com/golang-jwt/jwt/v4/none.go @@ -1,6 +1,6 @@ package jwt -// Implements the none signing method. This is required by the spec +// SigningMethodNone implements the none signing method. This is required by the spec // but you probably should never use it. var SigningMethodNone *signingMethodNone diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go similarity index 70% rename from vendor/github.com/golang-jwt/jwt/parser.go rename to vendor/github.com/golang-jwt/jwt/v4/parser.go index d6901d9adb5..c0a6f692791 100644 --- a/vendor/github.com/golang-jwt/jwt/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -8,18 +8,47 @@ import ( ) type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing + // If populated, only these methods will be considered valid. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + ValidMethods []string + + // Use JSON Number format in JSON decoder. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + UseJSONNumber bool + + // Skip claims validation during token parsing. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + SkipClaimsValidation bool } -// Parse, validate, and return a token. +// NewParser creates a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{} + + // loop through our parsing options and apply them + for _, option := range options { + option(p) + } + + return p +} + +// Parse parses, validates, verifies the signature and returns the parsed token. // keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { @@ -87,12 +116,12 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, vErr } -// WARNING: Don't use this method unless you know what you're doing +// ParseUnverified parses the token but doesn't validate the signature. +// +// WARNING: Don't use this method unless you know what you're doing. // -// This method parses the token but doesn't validate the signature. It's only -// ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from -// it. +// It's only ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { parts = strings.Split(tokenString, ".") if len(parts) != 3 { diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go new file mode 100644 index 00000000000..6ea6f9527de --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go @@ -0,0 +1,29 @@ +package jwt + +// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add +// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that +// takes a *Parser type as input and manipulates its configuration accordingly. +type ParserOption func(*Parser) + +// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid. +// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. +func WithValidMethods(methods []string) ParserOption { + return func(p *Parser) { + p.ValidMethods = methods + } +} + +// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.UseJSONNumber = true + } +} + +// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know +// what you are doing. +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.SkipClaimsValidation = true + } +} diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go similarity index 92% rename from vendor/github.com/golang-jwt/jwt/rsa.go rename to vendor/github.com/golang-jwt/jwt/v4/rsa.go index e4caf1ca4a1..b910b19c0b5 100644 --- a/vendor/github.com/golang-jwt/jwt/rsa.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go @@ -6,7 +6,7 @@ import ( "crypto/rsa" ) -// Implements the RSA family of signing methods signing methods +// SigningMethodRSA implements the RSA family of signing methods. // Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation type SigningMethodRSA struct { Name string @@ -44,7 +44,7 @@ func (m *SigningMethodRSA) Alg() string { return m.Name } -// Implements the Verify method from SigningMethod +// Verify implements token verification for the SigningMethod // For this signing method, must be an *rsa.PublicKey structure. func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -73,7 +73,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) } -// Implements the Sign method from SigningMethod +// Sign implements token signing for the SigningMethod // For this signing method, must be an *rsa.PrivateKey structure. func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go similarity index 93% rename from vendor/github.com/golang-jwt/jwt/rsa_pss.go rename to vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go index c0147086480..4fd6f9e610b 100644 --- a/vendor/github.com/golang-jwt/jwt/rsa_pss.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -1,3 +1,4 @@ +//go:build go1.4 // +build go1.4 package jwt @@ -8,7 +9,7 @@ import ( "crypto/rsa" ) -// Implements the RSAPSS family of signing methods signing methods +// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods type SigningMethodRSAPSS struct { *SigningMethodRSA Options *rsa.PSSOptions @@ -79,7 +80,7 @@ func init() { }) } -// Implements the Verify method from SigningMethod +// Verify implements token verification for the SigningMethod. // For this verify method, key must be an rsa.PublicKey struct func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { var err error @@ -113,7 +114,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interf return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) } -// Implements the Sign method from SigningMethod +// Sign implements token signing for the SigningMethod. // For this signing method, key must be an rsa.PrivateKey struct func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go similarity index 72% rename from vendor/github.com/golang-jwt/jwt/rsa_utils.go rename to vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go index 14c78c292a9..1966c450bf8 100644 --- a/vendor/github.com/golang-jwt/jwt/rsa_utils.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go @@ -8,12 +8,12 @@ import ( ) var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") + ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") ) -// Parse PEM encoded PKCS1 or PKCS8 private key +// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { var err error @@ -39,7 +39,11 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { return pkey, nil } -// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password +// +// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock +// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative +// in the Go standard library for now. See https://github.com/golang/go/issues/8860. func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { var err error @@ -71,7 +75,7 @@ func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.Pr return pkey, nil } -// Parse PEM encoded PKCS1 or PKCS8 public key +// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { var err error diff --git a/vendor/github.com/golang-jwt/jwt/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go similarity index 66% rename from vendor/github.com/golang-jwt/jwt/signing_method.go rename to vendor/github.com/golang-jwt/jwt/v4/signing_method.go index ed1f212b21e..241ae9c60d0 100644 --- a/vendor/github.com/golang-jwt/jwt/signing_method.go +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -7,14 +7,14 @@ import ( var signingMethods = map[string]func() SigningMethod{} var signingMethodLock = new(sync.RWMutex) -// Implement SigningMethod to add new methods for signing or verifying tokens. +// SigningMethod can be used add new methods for signing or verifying tokens. type SigningMethod interface { Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error Alg() string // returns the alg identifier for this method (example: 'HS256') } -// Register the "alg" name and a factory function for signing method. +// RegisterSigningMethod registers the "alg" name and a factory function for signing method. // This is typically done during init() in the method's implementation func RegisterSigningMethod(alg string, f func() SigningMethod) { signingMethodLock.Lock() @@ -23,7 +23,7 @@ func RegisterSigningMethod(alg string, f func() SigningMethod) { signingMethods[alg] = f } -// Get a signing method from an "alg" string +// GetSigningMethod retrieves a signing method from an "alg" string func GetSigningMethod(alg string) (method SigningMethod) { signingMethodLock.RLock() defer signingMethodLock.RUnlock() @@ -33,3 +33,14 @@ func GetSigningMethod(alg string) (method SigningMethod) { } return } + +// GetAlgorithms returns a list of registered "alg" names +func GetAlgorithms() (algs []string) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + for alg := range signingMethods { + algs = append(algs, alg) + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf new file mode 100644 index 00000000000..53745d51d7c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go new file mode 100644 index 00000000000..71e909ea653 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -0,0 +1,132 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 +// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations +// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global +// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use the non-recommended decoding, set this boolean to `true` prior to using this package. +var DecodePaddingAllowed bool + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Keyfunc will be used by the Parse methods as a callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// Token represents a JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// New creates a new Token with the specified signing method and an empty map of claims. +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +// NewWithClaims creates a new Token with the specified signing method and claims. +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString creates and returns a complete, signed JWT. +// The token is signed using the SigningMethod specified in the token. +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// SigningString generates the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + var jsonValue []byte + + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + header := EncodeSegment(jsonValue) + + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + claim := EncodeSegment(jsonValue) + + return strings.Join([]string{header, claim}, "."), nil +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the cryptographic key +// for verifying the signature. +// The caller is strongly encouraged to set the WithValidMethods option to +// validate the 'alg' claim in the token matches the expected algorithm. +// For more details about the importance of validating the 'alg' claim, +// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) +} + +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) +} + +// EncodeSegment encodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// DecodeSegment decodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func DecodeSegment(seg string) ([]byte, error) { + if DecodePaddingAllowed { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + return base64.URLEncoding.DecodeString(seg) + } + + return base64.RawURLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go new file mode 100644 index 00000000000..ac8e140eb11 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -0,0 +1,145 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. +// This has an influence on the precision of times when comparing expiry or +// other related time fields. Furthermore, it is also the precision of times +// when serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially +// its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the behaviour +// of the underlying []string. If it is set to false, it will serialize to a single +// string, if it contains one element. Otherwise, it will serialize to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + round, frac := math.Modf(f) + return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + var prec int + if TimePrecision < time.Second { + prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) + } + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which retuns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a +// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch +// with either integer or non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. +// This type is necessary, since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, + // only contains one element, it MAY be serialized as a single string. This may or may not be + // desired based on the ecosystem of other JWT library used, so we make it configurable by the + // variable MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index 0ba9da7d6bf..aecaff59ee8 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -15,6 +15,8 @@ import ( "encoding/gob" "fmt" "io" + "io/ioutil" + "log" "net/rpc" "os" "os/exec" @@ -42,6 +44,17 @@ func (c *Connection) Close() error { return werr } +// If ECP Logging is enabled return true +// Otherwise return false +func enableECPLogging() bool { + if os.Getenv("ENABLE_ENTERPRISE_CERTIFICATE_LOGS") != "" { + return true + } + + log.SetOutput(ioutil.Discard) + return false +} + func init() { gob.Register(crypto.SHA256) gob.Register(&rsa.PSSOptions{}) @@ -72,9 +85,9 @@ func (k *Key) Close() error { if err := k.cmd.Process.Kill(); err != nil { return fmt.Errorf("failed to kill signer process: %w", err) } - if err := k.cmd.Wait(); err.Error() != "signal: killed" { - return fmt.Errorf("signer process was not killed: %w", err) - } + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + k.cmd.Wait() // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. if err := k.client.Close(); err.Error() != "close |0: file already closed" { @@ -105,6 +118,7 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [ // // The config file also specifies which certificate the signer should use. func Cred(configFilePath string) (*Key, error) { + enableECPLogging() if configFilePath == "" { configFilePath = util.GetDefaultConfigFilePath() } diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 543399e36e6..9eb440413fd 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -63,7 +63,7 @@ func (s Sample) String() string { if s.Histogram != nil { return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ Timestamp: s.Timestamp, - Histogram: *s.Histogram, + Histogram: s.Histogram, }) } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ @@ -82,7 +82,7 @@ func (s Sample) MarshalJSON() ([]byte, error) { Metric: s.Metric, Histogram: SampleHistogramPair{ Timestamp: s.Timestamp, - Histogram: *s.Histogram, + Histogram: s.Histogram, }, } return json.Marshal(&v) @@ -100,37 +100,19 @@ func (s Sample) MarshalJSON() ([]byte, error) { return json.Marshal(&v) } -type sampleHistogramPairPtr struct { - Timestamp Time - Histogram *SampleHistogram -} - -func (s *sampleHistogramPairPtr) UnmarshalJSON(buf []byte) error { - tmp := []interface{}{&s.Timestamp, &s.Histogram} - wantLen := len(tmp) - if err := json.Unmarshal(buf, &tmp); err != nil { - return err - } - if gotLen := len(tmp); gotLen != wantLen { - return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) - } - return nil -} - // UnmarshalJSON implements json.Unmarshaler. -// TODO: simplify and remove the need for both sampleHistogramPairPtr and SampleHistogramPair func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - Histogram sampleHistogramPairPtr `json:"histogram"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, - Histogram: sampleHistogramPairPtr{ + Histogram: SampleHistogramPair{ Timestamp: s.Timestamp, Histogram: s.Histogram, }, diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 9266fb9854c..8b59571a3f7 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -18,8 +18,15 @@ import ( "fmt" "math" "strconv" + "unsafe" + + jsoniter "github.com/json-iterator/go" ) +func init() { + jsoniter.RegisterTypeEncoderFunc("model.SamplePair", marshalSamplePairJSON, marshalJSONIsEmpty) +} + var ( // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a // non-existing sample pair. It is a SamplePair with timestamp Earliest and @@ -71,17 +78,18 @@ type SamplePair struct { Value SampleValue } -// MarshalJSON implements json.Marshaler. +// marshalSamplePairJSON writes `[ts, "val"]`. +func marshalSamplePairJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*SamplePair)(ptr)) + stream.WriteArrayStart() + MarshalTimestamp(int64(p.Timestamp), stream) + stream.WriteMore() + MarshalValue(float64(p.Value), stream) + stream.WriteArrayEnd() +} + func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil + return jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(s) } // UnmarshalJSON implements json.Unmarshaler. diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index a4be0bca311..cc221a8868e 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -18,8 +18,16 @@ import ( "fmt" "strconv" "strings" + "unsafe" + + jsoniter "github.com/json-iterator/go" ) +func init() { + jsoniter.RegisterTypeEncoderFunc("model.HistogramBucket", marshalHistogramBucketJSON, marshalJSONIsEmpty) + jsoniter.RegisterTypeEncoderFunc("model.SampleHistogramPair", marshalSampleHistogramPairJSON, marshalJSONIsEmpty) +} + type FloatString float64 func (v FloatString) String() string { @@ -43,30 +51,16 @@ func (v *FloatString) UnmarshalJSON(b []byte) error { } type HistogramBucket struct { - Boundaries int + Boundaries int32 Lower FloatString Upper FloatString Count FloatString } -func (s HistogramBucket) MarshalJSON() ([]byte, error) { - b, err := json.Marshal(s.Boundaries) - if err != nil { - return nil, err - } - l, err := json.Marshal(s.Lower) - if err != nil { - return nil, err - } - u, err := json.Marshal(s.Upper) - if err != nil { - return nil, err - } - c, err := json.Marshal(s.Count) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +// marshalHistogramBucketJSON writes fmt.Sprintf("[%s,%s,%s,%s]", b.Boundaries, b.Lower, b.Upper, b.Count). +func marshalHistogramBucketJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + b := *((*HistogramBucket)(ptr)) + MarshalHistogramBucket(b, stream) } func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { @@ -135,19 +129,25 @@ func (s *SampleHistogram) Equal(o *SampleHistogram) bool { type SampleHistogramPair struct { Timestamp Time - Histogram SampleHistogram + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +// marshalSampleHistogramPairJSON writes `[ts, "val"]`. +func marshalSampleHistogramPairJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + p := *((*SampleHistogramPair)(ptr)) + stream.WriteArrayStart() + MarshalTimestamp(int64(p.Timestamp), stream) + stream.WriteMore() + MarshalHistogram(*p.Histogram, stream) + stream.WriteArrayEnd() } func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Histogram) - if err != nil { - return nil, err + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil + return jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(s) } func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { @@ -159,6 +159,9 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { if gotLen := len(tmp); gotLen != wantLen { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } return nil } @@ -167,5 +170,5 @@ func (s SampleHistogramPair) String() string { } func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { - return s == o || (s.Histogram.Equal(&o.Histogram) && s.Timestamp.Equal(o.Timestamp)) + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) } diff --git a/vendor/github.com/prometheus/common/model/value_marshal.go b/vendor/github.com/prometheus/common/model/value_marshal.go new file mode 100644 index 00000000000..df193bcb31e --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_marshal.go @@ -0,0 +1,131 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "math" + "strconv" + "unsafe" + + jsoniter "github.com/json-iterator/go" +) + +func marshalJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +// MarshalTimestamp marshals a point timestamp using the passed jsoniter stream. +func MarshalTimestamp(t int64, stream *jsoniter.Stream) { + // Write out the timestamp as a float divided by 1000. + // This is ~3x faster than converting to a float. + if t < 0 { + stream.WriteRaw(`-`) + t = -t + } + stream.WriteInt64(t / 1000) + fraction := t % 1000 + if fraction != 0 { + stream.WriteRaw(`.`) + if fraction < 100 { + stream.WriteRaw(`0`) + } + if fraction < 10 { + stream.WriteRaw(`0`) + } + stream.WriteInt64(fraction) + } +} + +// MarshalValue marshals a point value using the passed jsoniter stream. +func MarshalValue(v float64, stream *jsoniter.Stream) { + stream.WriteRaw(`"`) + // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround + // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). + buf := stream.Buffer() + abs := math.Abs(v) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + buf = strconv.AppendFloat(buf, v, fmt, -1, 64) + stream.SetBuffer(buf) + stream.WriteRaw(`"`) +} + +// MarshalHistogramBucket writes something like: [ 3, "-0.25", "0.25", "3"] +// See MarshalHistogram to understand what the numbers mean +func MarshalHistogramBucket(b HistogramBucket, stream *jsoniter.Stream) { + stream.WriteArrayStart() + stream.WriteInt32(b.Boundaries) + stream.WriteMore() + MarshalValue(float64(b.Lower), stream) + stream.WriteMore() + MarshalValue(float64(b.Upper), stream) + stream.WriteMore() + MarshalValue(float64(b.Count), stream) + stream.WriteArrayEnd() +} + +// MarshalHistogram writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +func MarshalHistogram(h SampleHistogram, stream *jsoniter.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + MarshalValue(float64(h.Count), stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + MarshalValue(float64(h.Sum), stream) + + bucketFound := false + for _, bucket := range h.Buckets { + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + MarshalHistogramBucket(*bucket, stream) + } + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 6c8e3e21979..e358db69c5d 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.13.0 +PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.45.2 +GOLANGCI_LINT_VERSION ?= v1.49.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. - ifeq (,$(CIRCLE_JOB)) + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index ff6b927da15..06968ca2ed4 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go similarity index 64% rename from vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go rename to vendor/github.com/prometheus/procfs/cpuinfo_loong64.go index d6892709437..d88442f0edf 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -1,10 +1,9 @@ -// Copyright The OpenTelemetry Authors -// +// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,9 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" +//go:build linux +// +build linux + +package procfs -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index ea41bf2ca1e..a6b2b3127cb 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index d31a82600f6..f9d961e4417 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index f7a828bb1da..0c482c18ccf 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index a94f86dc4ae..06b7b8f2163 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,8 +27,9 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 // SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { @@ -38,6 +39,18 @@ type SoftnetStat struct { Dropped uint32 // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } + softnetStat.Width = width + stats = append(stats, softnetStat) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index dcea9c5a671..5cc40aef55b 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "io" "os" "path/filepath" "strconv" @@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) { return nil, err } - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), + procNetstat, err := parseNetstat(file) + if err != nil { + return nil, err + } + procNetstat.Filename = filepath.Base(filePath) + + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(r io.Reader) (NetStat, error) { + var ( + scanner = bufio.NewScanner(r) + netStat = NetStat{ + Stats: make(map[string][]uint64), } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + ) + + scanner.Scan() - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) } - netStatsTotal = append(netStatsTotal, netStatFile) } - return netStatsTotal, nil + + return netStat, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index cca03327c3f..ea83a75ffc4 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 00000000000..9df79c23799 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 48b5238194e..6a43bb24595 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -33,139 +33,140 @@ type ProcNetstat struct { } type TcpExt struct { // nolint:revive - SyncookiesSent float64 - SyncookiesRecv float64 - SyncookiesFailed float64 - EmbryonicRsts float64 - PruneCalled float64 - RcvPruned float64 - OfoPruned float64 - OutOfWindowIcmps float64 - LockDroppedIcmps float64 - ArpFilter float64 - TW float64 - TWRecycled float64 - TWKilled float64 - PAWSActive float64 - PAWSEstab float64 - DelayedACKs float64 - DelayedACKLocked float64 - DelayedACKLost float64 - ListenOverflows float64 - ListenDrops float64 - TCPHPHits float64 - TCPPureAcks float64 - TCPHPAcks float64 - TCPRenoRecovery float64 - TCPSackRecovery float64 - TCPSACKReneging float64 - TCPSACKReorder float64 - TCPRenoReorder float64 - TCPTSReorder float64 - TCPFullUndo float64 - TCPPartialUndo float64 - TCPDSACKUndo float64 - TCPLossUndo float64 - TCPLostRetransmit float64 - TCPRenoFailures float64 - TCPSackFailures float64 - TCPLossFailures float64 - TCPFastRetrans float64 - TCPSlowStartRetrans float64 - TCPTimeouts float64 - TCPLossProbes float64 - TCPLossProbeRecovery float64 - TCPRenoRecoveryFail float64 - TCPSackRecoveryFail float64 - TCPRcvCollapsed float64 - TCPDSACKOldSent float64 - TCPDSACKOfoSent float64 - TCPDSACKRecv float64 - TCPDSACKOfoRecv float64 - TCPAbortOnData float64 - TCPAbortOnClose float64 - TCPAbortOnMemory float64 - TCPAbortOnTimeout float64 - TCPAbortOnLinger float64 - TCPAbortFailed float64 - TCPMemoryPressures float64 - TCPMemoryPressuresChrono float64 - TCPSACKDiscard float64 - TCPDSACKIgnoredOld float64 - TCPDSACKIgnoredNoUndo float64 - TCPSpuriousRTOs float64 - TCPMD5NotFound float64 - TCPMD5Unexpected float64 - TCPMD5Failure float64 - TCPSackShifted float64 - TCPSackMerged float64 - TCPSackShiftFallback float64 - TCPBacklogDrop float64 - PFMemallocDrop float64 - TCPMinTTLDrop float64 - TCPDeferAcceptDrop float64 - IPReversePathFilter float64 - TCPTimeWaitOverflow float64 - TCPReqQFullDoCookies float64 - TCPReqQFullDrop float64 - TCPRetransFail float64 - TCPRcvCoalesce float64 - TCPOFOQueue float64 - TCPOFODrop float64 - TCPOFOMerge float64 - TCPChallengeACK float64 - TCPSYNChallenge float64 - TCPFastOpenActive float64 - TCPFastOpenActiveFail float64 - TCPFastOpenPassive float64 - TCPFastOpenPassiveFail float64 - TCPFastOpenListenOverflow float64 - TCPFastOpenCookieReqd float64 - TCPFastOpenBlackhole float64 - TCPSpuriousRtxHostQueues float64 - BusyPollRxPackets float64 - TCPAutoCorking float64 - TCPFromZeroWindowAdv float64 - TCPToZeroWindowAdv float64 - TCPWantZeroWindowAdv float64 - TCPSynRetrans float64 - TCPOrigDataSent float64 - TCPHystartTrainDetect float64 - TCPHystartTrainCwnd float64 - TCPHystartDelayDetect float64 - TCPHystartDelayCwnd float64 - TCPACKSkippedSynRecv float64 - TCPACKSkippedPAWS float64 - TCPACKSkippedSeq float64 - TCPACKSkippedFinWait2 float64 - TCPACKSkippedTimeWait float64 - TCPACKSkippedChallenge float64 - TCPWinProbe float64 - TCPKeepAlive float64 - TCPMTUPFail float64 - TCPMTUPSuccess float64 - TCPWqueueTooBig float64 + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 } type IpExt struct { // nolint:revive - InNoRoutes float64 - InTruncatedPkts float64 - InMcastPkts float64 - OutMcastPkts float64 - InBcastPkts float64 - OutBcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InCsumErrors float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 - ReasmOverlaps float64 + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 } func (p Proc) Netstat() (ProcNetstat, error) { @@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) { if err != nil { return ProcNetstat{PID: p.PID}, err } - procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) procNetstat.PID = p.PID return procNetstat, err } -// parseNetstat parses the metrics from proc//net/netstat file +// parseProcNetstat parses the metrics from proc//net/netstat file // and returns a ProcNetstat structure. -func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { var ( scanner = bufio.NewScanner(r) procNetstat = ProcNetstat{} @@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = value + procNetstat.TcpExt.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = value + procNetstat.TcpExt.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = value + procNetstat.TcpExt.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = value + procNetstat.TcpExt.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = value + procNetstat.TcpExt.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = value + procNetstat.TcpExt.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = value + procNetstat.TcpExt.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = value + procNetstat.TcpExt.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = value + procNetstat.TcpExt.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = value + procNetstat.TcpExt.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = value + procNetstat.TcpExt.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = value + procNetstat.TcpExt.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = value + procNetstat.TcpExt.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = value + procNetstat.TcpExt.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = value + procNetstat.TcpExt.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = value + procNetstat.TcpExt.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = value + procNetstat.TcpExt.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = value + procNetstat.TcpExt.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = value + procNetstat.TcpExt.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = value + procNetstat.TcpExt.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = value + procNetstat.TcpExt.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = value + procNetstat.TcpExt.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = value + procNetstat.TcpExt.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = value + procNetstat.TcpExt.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = value + procNetstat.TcpExt.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = value + procNetstat.TcpExt.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = value + procNetstat.TcpExt.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = value + procNetstat.TcpExt.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = value + procNetstat.TcpExt.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = value + procNetstat.TcpExt.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = value + procNetstat.TcpExt.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = value + procNetstat.TcpExt.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = value + procNetstat.TcpExt.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = value + procNetstat.TcpExt.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = value + procNetstat.TcpExt.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = value + procNetstat.TcpExt.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = value + procNetstat.TcpExt.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = value + procNetstat.TcpExt.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = value + procNetstat.TcpExt.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = value + procNetstat.TcpExt.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = value + procNetstat.TcpExt.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = value + procNetstat.TcpExt.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = value + procNetstat.TcpExt.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = value + procNetstat.TcpExt.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = value + procNetstat.TcpExt.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = value + procNetstat.TcpExt.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = value + procNetstat.TcpExt.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = value + procNetstat.TcpExt.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = value + procNetstat.TcpExt.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = value + procNetstat.TcpExt.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = value + procNetstat.TcpExt.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = value + procNetstat.TcpExt.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = value + procNetstat.TcpExt.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = value + procNetstat.TcpExt.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = value + procNetstat.TcpExt.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = value + procNetstat.TcpExt.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = value + procNetstat.TcpExt.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = value + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = value + procNetstat.TcpExt.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = value + procNetstat.TcpExt.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = value + procNetstat.TcpExt.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = value + procNetstat.TcpExt.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = value + procNetstat.TcpExt.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = value + procNetstat.TcpExt.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = value + procNetstat.TcpExt.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = value + procNetstat.TcpExt.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = value + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = value + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = value + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = value + procNetstat.TcpExt.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = value + procNetstat.TcpExt.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = value + procNetstat.TcpExt.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = value + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = value + procNetstat.TcpExt.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = value + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = value + procNetstat.TcpExt.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = value + procNetstat.TcpExt.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = value + procNetstat.TcpExt.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = value + procNetstat.TcpExt.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = value + procNetstat.TcpExt.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = value + procNetstat.TcpExt.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = value + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = value + procNetstat.TcpExt.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = value + procNetstat.TcpExt.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = value + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = value + procNetstat.TcpExt.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = value + procNetstat.TcpExt.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = value + procNetstat.TcpExt.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = value + procNetstat.TcpExt.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = value + procNetstat.TcpExt.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = value + procNetstat.TcpExt.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = value + procNetstat.IpExt.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = value + procNetstat.IpExt.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = value + procNetstat.IpExt.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = value + procNetstat.IpExt.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = value + procNetstat.IpExt.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = value + procNetstat.IpExt.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = value + procNetstat.IpExt.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = value + procNetstat.IpExt.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = value + procNetstat.IpExt.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = value + procNetstat.IpExt.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = value + procNetstat.IpExt.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = value + procNetstat.IpExt.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = value + procNetstat.IpExt.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = value + procNetstat.IpExt.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = value + procNetstat.IpExt.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = value + procNetstat.IpExt.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = value + procNetstat.IpExt.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = value + procNetstat.IpExt.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index ae191896cbd..6c46b718849 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -37,100 +37,100 @@ type ProcSnmp struct { } type Ip struct { // nolint:revive - Forwarding float64 - DefaultTTL float64 - InReceives float64 - InHdrErrors float64 - InAddrErrors float64 - ForwDatagrams float64 - InUnknownProtos float64 - InDiscards float64 - InDelivers float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 } -type Icmp struct { - InMsgs float64 - InErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InTimeExcds float64 - InParmProbs float64 - InSrcQuenchs float64 - InRedirects float64 - InEchos float64 - InEchoReps float64 - InTimestamps float64 - InTimestampReps float64 - InAddrMasks float64 - InAddrMaskReps float64 - OutMsgs float64 - OutErrors float64 - OutDestUnreachs float64 - OutTimeExcds float64 - OutParmProbs float64 - OutSrcQuenchs float64 - OutRedirects float64 - OutEchos float64 - OutEchoReps float64 - OutTimestamps float64 - OutTimestampReps float64 - OutAddrMasks float64 - OutAddrMaskReps float64 +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 } type IcmpMsg struct { - InType3 float64 - OutType3 float64 + InType3 *float64 + OutType3 *float64 } type Tcp struct { // nolint:revive - RtoAlgorithm float64 - RtoMin float64 - RtoMax float64 - MaxConn float64 - ActiveOpens float64 - PassiveOpens float64 - AttemptFails float64 - EstabResets float64 - CurrEstab float64 - InSegs float64 - OutSegs float64 - RetransSegs float64 - InErrs float64 - OutRsts float64 - InCsumErrors float64 + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 } type Udp struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } func (p Proc) Snmp() (ProcSnmp, error) { @@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = value + procSnmp.Ip.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = value + procSnmp.Ip.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = value + procSnmp.Ip.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = value + procSnmp.Ip.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = value + procSnmp.Ip.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = value + procSnmp.Ip.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = value + procSnmp.Ip.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = value + procSnmp.Ip.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = value + procSnmp.Ip.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = value + procSnmp.Ip.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = value + procSnmp.Ip.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = value + procSnmp.Ip.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = value + procSnmp.Ip.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = value + procSnmp.Ip.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = value + procSnmp.Ip.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = value + procSnmp.Ip.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = value + procSnmp.Ip.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = value + procSnmp.Ip.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = value + procSnmp.Ip.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = value + procSnmp.Icmp.InMsgs = &value case "InErrors": - procSnmp.Icmp.InErrors = value + procSnmp.Icmp.InErrors = &value case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = value + procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = value + procSnmp.Icmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = value + procSnmp.Icmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = value + procSnmp.Icmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = value + procSnmp.Icmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = value + procSnmp.Icmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = value + procSnmp.Icmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = value + procSnmp.Icmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = value + procSnmp.Icmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = value + procSnmp.Icmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = value + procSnmp.Icmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = value + procSnmp.Icmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = value + procSnmp.Icmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = value + procSnmp.Icmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = value + procSnmp.Icmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = value + procSnmp.Icmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = value + procSnmp.Icmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = value + procSnmp.Icmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = value + procSnmp.Icmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = value + procSnmp.Icmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = value + procSnmp.Icmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = value + procSnmp.Icmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = value + procSnmp.Icmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = value + procSnmp.Icmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = value + procSnmp.Icmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = value + procSnmp.IcmpMsg.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = value + procSnmp.IcmpMsg.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = value + procSnmp.Tcp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = value + procSnmp.Tcp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = value + procSnmp.Tcp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = value + procSnmp.Tcp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = value + procSnmp.Tcp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = value + procSnmp.Tcp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = value + procSnmp.Tcp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = value + procSnmp.Tcp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = value + procSnmp.Tcp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = value + procSnmp.Tcp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = value + procSnmp.Tcp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = value + procSnmp.Tcp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = value + procSnmp.Tcp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = value + procSnmp.Tcp.OutRsts = &value case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = value + procSnmp.Tcp.InCsumErrors = &value } case "Udp": switch key { case "InDatagrams": - procSnmp.Udp.InDatagrams = value + procSnmp.Udp.InDatagrams = &value case "NoPorts": - procSnmp.Udp.NoPorts = value + procSnmp.Udp.NoPorts = &value case "InErrors": - procSnmp.Udp.InErrors = value + procSnmp.Udp.InErrors = &value case "OutDatagrams": - procSnmp.Udp.OutDatagrams = value + procSnmp.Udp.OutDatagrams = &value case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = value + procSnmp.Udp.RcvbufErrors = &value case "SndbufErrors": - procSnmp.Udp.SndbufErrors = value + procSnmp.Udp.SndbufErrors = &value case "InCsumErrors": - procSnmp.Udp.InCsumErrors = value + procSnmp.Udp.InCsumErrors = &value case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = value + procSnmp.Udp.IgnoredMulti = &value } case "UdpLite": switch key { case "InDatagrams": - procSnmp.UdpLite.InDatagrams = value + procSnmp.UdpLite.InDatagrams = &value case "NoPorts": - procSnmp.UdpLite.NoPorts = value + procSnmp.UdpLite.NoPorts = &value case "InErrors": - procSnmp.UdpLite.InErrors = value + procSnmp.UdpLite.InErrors = &value case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = value + procSnmp.UdpLite.OutDatagrams = &value case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = value + procSnmp.UdpLite.RcvbufErrors = &value case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = value + procSnmp.UdpLite.SndbufErrors = &value case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = value + procSnmp.UdpLite.InCsumErrors = &value case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = value + procSnmp.UdpLite.IgnoredMulti = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index f611992d52c..3059cc6a136 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -36,106 +36,106 @@ type ProcSnmp6 struct { } type Ip6 struct { // nolint:revive - InReceives float64 - InHdrErrors float64 - InTooBigErrors float64 - InNoRoutes float64 - InAddrErrors float64 - InUnknownProtos float64 - InTruncatedPkts float64 - InDiscards float64 - InDelivers float64 - OutForwDatagrams float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 - InMcastPkts float64 - OutMcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 } type Icmp6 struct { - InMsgs float64 - InErrors float64 - OutMsgs float64 - OutErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InPktTooBigs float64 - InTimeExcds float64 - InParmProblems float64 - InEchos float64 - InEchoReplies float64 - InGroupMembQueries float64 - InGroupMembResponses float64 - InGroupMembReductions float64 - InRouterSolicits float64 - InRouterAdvertisements float64 - InNeighborSolicits float64 - InNeighborAdvertisements float64 - InRedirects float64 - InMLDv2Reports float64 - OutDestUnreachs float64 - OutPktTooBigs float64 - OutTimeExcds float64 - OutParmProblems float64 - OutEchos float64 - OutEchoReplies float64 - OutGroupMembQueries float64 - OutGroupMembResponses float64 - OutGroupMembReductions float64 - OutRouterSolicits float64 - OutRouterAdvertisements float64 - OutNeighborSolicits float64 - OutNeighborAdvertisements float64 - OutRedirects float64 - OutMLDv2Reports float64 - InType1 float64 - InType134 float64 - InType135 float64 - InType136 float64 - InType143 float64 - OutType133 float64 - OutType135 float64 - OutType136 float64 - OutType143 float64 + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 } type Udp6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 } func (p Proc) Snmp6() (ProcSnmp6, error) { @@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = value + procSnmp6.Ip6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = value + procSnmp6.Ip6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = value + procSnmp6.Ip6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = value + procSnmp6.Ip6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = value + procSnmp6.Ip6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = value + procSnmp6.Ip6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = value + procSnmp6.Ip6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = value + procSnmp6.Ip6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = value + procSnmp6.Ip6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = value + procSnmp6.Ip6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = value + procSnmp6.Ip6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = value + procSnmp6.Ip6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = value + procSnmp6.Ip6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = value + procSnmp6.Ip6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = value + procSnmp6.Ip6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = value + procSnmp6.Ip6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = value + procSnmp6.Ip6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = value + procSnmp6.Ip6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = value + procSnmp6.Ip6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = value + procSnmp6.Ip6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = value + procSnmp6.Ip6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = value + procSnmp6.Ip6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = value + procSnmp6.Ip6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = value + procSnmp6.Ip6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = value + procSnmp6.Ip6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = value + procSnmp6.Ip6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = value + procSnmp6.Ip6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = value + procSnmp6.Ip6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = value + procSnmp6.Ip6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = value + procSnmp6.Ip6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = value + procSnmp6.Ip6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = value + procSnmp6.Ip6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = value + procSnmp6.Icmp6.InMsgs = &value case "InErrors": - procSnmp6.Icmp6.InErrors = value + procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = value + procSnmp6.Icmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = value + procSnmp6.Icmp6.OutErrors = &value case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = value + procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = value + procSnmp6.Icmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = value + procSnmp6.Icmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = value + procSnmp6.Icmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = value + procSnmp6.Icmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = value + procSnmp6.Icmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = value + procSnmp6.Icmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = value + procSnmp6.Icmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = value + procSnmp6.Icmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = value + procSnmp6.Icmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = value + procSnmp6.Icmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = value + procSnmp6.Icmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = value + procSnmp6.Icmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = value + procSnmp6.Icmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = value + procSnmp6.Icmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = value + procSnmp6.Icmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = value + procSnmp6.Icmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = value + procSnmp6.Icmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = value + procSnmp6.Icmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = value + procSnmp6.Icmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = value + procSnmp6.Icmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = value + procSnmp6.Icmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = value + procSnmp6.Icmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = value + procSnmp6.Icmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = value + procSnmp6.Icmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = value + procSnmp6.Icmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = value + procSnmp6.Icmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = value + procSnmp6.Icmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = value + procSnmp6.Icmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = value + procSnmp6.Icmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = value + procSnmp6.Icmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = value + procSnmp6.Icmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = value + procSnmp6.Icmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = value + procSnmp6.Icmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = value + procSnmp6.Icmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = value + procSnmp6.Icmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = value + procSnmp6.Icmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = value + procSnmp6.Icmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = value + procSnmp6.Icmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = value + procSnmp6.Icmp6.OutType143 = &value } case "Udp6": switch key { case "InDatagrams": - procSnmp6.Udp6.InDatagrams = value + procSnmp6.Udp6.InDatagrams = &value case "NoPorts": - procSnmp6.Udp6.NoPorts = value + procSnmp6.Udp6.NoPorts = &value case "InErrors": - procSnmp6.Udp6.InErrors = value + procSnmp6.Udp6.InErrors = &value case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = value + procSnmp6.Udp6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = value + procSnmp6.Udp6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = value + procSnmp6.Udp6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = value + procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = value + procSnmp6.Udp6.IgnoredMulti = &value } case "UdpLite6": switch key { case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = value + procSnmp6.UdpLite6.InDatagrams = &value case "NoPorts": - procSnmp6.UdpLite6.NoPorts = value + procSnmp6.UdpLite6.NoPorts = &value case "InErrors": - procSnmp6.UdpLite6.InErrors = value + procSnmp6.UdpLite6.InErrors = &value case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = value + procSnmp6.UdpLite6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = value + procSnmp6.UdpLite6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = value + procSnmp6.UdpLite6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = value + procSnmp6.UdpLite6.InCsumErrors = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06c556ef962..b278eb2c2df 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -102,6 +102,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 594022ded48..3d8c06439a9 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 33f97caa08d..586af48af9f 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) { if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} - stat := Stat{} +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 00000000000..f08bfc769db --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := fsi.FS(proc.path("task")) + if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 20ceb77e2df..cdedcae996d 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -26,7 +26,9 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array // and numa_zonelist_order (deprecated) which is a string. diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 8e8460d4c59..8bc4bf34a09 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -80,7 +80,8 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return cfg, nil } - for i, v := range cfg.GlobalConfig.ExternalLabels { + b := labels.ScratchBuilder{} + cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { if s == "$" { return "$" @@ -93,10 +94,10 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro }) if newV != v.Value { level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) - v.Value = newV - cfg.GlobalConfig.ExternalLabels[i] = v } - } + b.Add(v.Name, newV) + }) + cfg.GlobalConfig.ExternalLabels = b.Labels() return cfg, nil } @@ -112,10 +113,6 @@ func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log. } if agentMode { - if len(cfg.RemoteWriteConfigs) == 0 { - return nil, errors.New("at least one remote_write target must be specified in agent mode") - } - if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { return nil, errors.New("field alerting is not allowed in agent mode") } @@ -361,13 +358,16 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } - for _, l := range gc.ExternalLabels { + if err := gc.ExternalLabels.Validate(func(l labels.Label) error { if !model.LabelName(l.Name).IsValid() { return fmt.Errorf("%q is not a valid label name", l.Name) } if !model.LabelValue(l.Value).IsValid() { return fmt.Errorf("%q is not a valid label value", l.Value) } + return nil + }); err != nil { + return err } // First set the correct scrape interval, then check that the timeout @@ -394,7 +394,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // isZero returns true iff the global config is the zero value. func (c *GlobalConfig) isZero() bool { - return c.ExternalLabels == nil && + return c.ExternalLabels.IsEmpty() && c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index b7357fa6cde..8b304a0faf8 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -428,11 +428,11 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ), + Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index d75afd10ed2..256679a8c96 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -27,6 +27,8 @@ import ( // used to represent a histogram with integer counts and thus serves as a more // generalized representation. type FloatHistogram struct { + // Counter reset information. + CounterResetHint CounterResetHint // Currently valid schema numbers are -4 <= n <= 8. They are all for // base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. Or @@ -244,6 +246,37 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { return h } +// Equals returns true if the given float histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + + if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + return true +} + // addBucket takes the "coordinates" of the last bucket that was handled and // adds the provided bucket after it. If a corresponding bucket exists, the // count is added. If not, the bucket is inserted. The updated slices and the diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go index c62be0b08cf..e1de5ffb52d 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -25,14 +25,14 @@ type BucketCount interface { float64 | uint64 } -// internalBucketCount is used internally by Histogram and FloatHistogram. The +// InternalBucketCount is used internally by Histogram and FloatHistogram. The // difference to the BucketCount above is that Histogram internally uses deltas // between buckets rather than absolute counts (while FloatHistogram uses // absolute counts directly). Go type parameters don't allow type // specialization. Therefore, where special treatment of deltas between buckets // vs. absolute counts is important, this information has to be provided as a // separate boolean parameter "deltaBuckets" -type internalBucketCount interface { +type InternalBucketCount interface { float64 | int64 } @@ -86,7 +86,7 @@ type BucketIterator[BC BucketCount] interface { // implementations, together with an implementation of the At method. This // iterator can be embedded in full implementations of BucketIterator to save on // code replication. -type baseBucketIterator[BC BucketCount, IBC internalBucketCount] struct { +type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { schema int32 spans []Span buckets []IBC @@ -121,7 +121,7 @@ func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { // compactBuckets is a generic function used by both Histogram.Compact and // FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are // deltas. Set it to false if the buckets contain absolute counts. -func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { +func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { // Fast path: If there are no empty buckets AND no offset in any span is // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return // immediately. We check that first because it's cheap and presumably @@ -327,6 +327,18 @@ func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmp return buckets, spans } +func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if b != b2[i] { + return false + } + } + return true +} + func getBound(idx, schema int32) float64 { // Here a bit of context about the behavior for the last bucket counting // regular numbers (called simply "last bucket" below) and the bucket diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go index 934c4dde9c2..6d425307c55 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -19,6 +19,17 @@ import ( "strings" ) +// CounterResetHint contains the known information about a counter reset, +// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply. +type CounterResetHint byte + +const ( + UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not. + CounterReset // CounterReset means there was definitely a counter reset starting from this histogram. + NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram. + GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen. +) + // Histogram encodes a sparse, high-resolution histogram. See the design // document for full details: // https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# @@ -35,6 +46,8 @@ import ( // // Which bucket indices are actually used is determined by the spans. type Histogram struct { + // Counter reset information. + CounterResetHint CounterResetHint // Currently valid schema numbers are -4 <= n <= 8. They are all for // base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. Or @@ -250,18 +263,6 @@ func allEmptySpans(s []Span) bool { return true } -func bucketsMatch(b1, b2 []int64) bool { - if len(b1) != len(b2) { - return false - } - for i, b := range b1 { - if b != b2[i] { - return false - } - } - return true -} - // Compact works like FloatHistogram.Compact. See there for detailed // explanations. func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { @@ -307,15 +308,16 @@ func (h *Histogram) ToFloat() *FloatHistogram { } return &FloatHistogram{ - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: float64(h.ZeroCount), - Count: float64(h.Count), - Sum: h.Sum, - PositiveSpans: positiveSpans, - NegativeSpans: negativeSpans, - PositiveBuckets: positiveBuckets, - NegativeBuckets: negativeBuckets, + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.ZeroCount), + Count: float64(h.Count), + Sum: h.Sum, + PositiveSpans: positiveSpans, + NegativeSpans: negativeSpans, + PositiveBuckets: positiveBuckets, + NegativeBuckets: negativeBuckets, } } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index aafba218aa9..36a0e6cb358 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -357,9 +357,7 @@ func EmptyLabels() Labels { // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) - for _, l := range ls { - set = append(set, l) - } + set = append(set, ls...) sort.Sort(set) return set @@ -414,6 +412,49 @@ func Compare(a, b Labels) int { return len(a) - len(b) } +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + (*ls) = append((*ls)[:0], b...) +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls) == 0 +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for _, l := range ls { + f(l) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for _, l := range ls { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + for i, l := range *ls { + (*ls)[i].Name = intern(l.Name) + (*ls)[i].Value = intern(l.Value) + } +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + for _, l := range ls { + release(l.Name) + release(l.Value) + } +} + // Builder allows modifying Labels. type Builder struct { base Labels @@ -470,7 +511,7 @@ Outer: return b } -// Set the name/value pair as a label. +// Set the name/value pair as a label. A value of "" means delete that label. func (b *Builder) Set(n, v string) *Builder { if v == "" { // Empty labels are the same as missing labels. @@ -525,3 +566,40 @@ Outer: } return res } + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add Labels +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + sort.Sort(b.add) +} + +// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(ls Labels) { + b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. +} + +// Return the name/value pairs added so far as a Labels object. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. + return append([]Label{}, b.add...) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go index a683588d166..05b8168825b 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "os" - "sort" "strings" ) @@ -51,13 +50,14 @@ func ReadLabels(fn string, n int) ([]Labels, error) { defer f.Close() scanner := bufio.NewScanner(f) + b := ScratchBuilder{} var mets []Labels hashes := map[uint64]struct{}{} i := 0 for scanner.Scan() && i < n { - m := make(Labels, 0, 10) + b.Reset() r := strings.NewReplacer("\"", "", "{", "", "}", "") s := r.Replace(scanner.Text()) @@ -65,10 +65,11 @@ func ReadLabels(fn string, n int) ([]Labels, error) { labelChunks := strings.Split(s, ",") for _, labelChunk := range labelChunks { split := strings.Split(labelChunk, ":") - m = append(m, Label{Name: split[0], Value: split[1]}) + b.Add(split[0], split[1]) } // Order of the k/v labels matters, don't assume we'll always receive them already sorted. - sort.Sort(m) + b.Sort() + m := b.Labels() h := m.Hash() if _, ok := hashes[h]; ok { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index c731f6e0d3b..0cc6eeeb7ed 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -203,20 +203,20 @@ func (re Regexp) String() string { // Process returns a relabeled copy of the given label set. The relabel configurations // are applied in order of input. -// If a label set is dropped, nil is returned. +// If a label set is dropped, EmptyLabels and false is returned. // May return the input labelSet modified. -func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { - lb := labels.NewBuilder(nil) +func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) { + lb := labels.NewBuilder(labels.EmptyLabels()) for _, cfg := range cfgs { - lbls = relabel(lbls, cfg, lb) - if lbls == nil { - return nil + lbls, keep = relabel(lbls, cfg, lb) + if !keep { + return labels.EmptyLabels(), false } } - return lbls + return lbls, true } -func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels { +func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.Labels, keep bool) { var va [16]string values := va[:0] if len(cfg.SourceLabels) > cap(values) { @@ -232,19 +232,19 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels switch cfg.Action { case Drop: if cfg.Regex.MatchString(val) { - return nil + return labels.EmptyLabels(), false } case Keep: if !cfg.Regex.MatchString(val) { - return nil + return labels.EmptyLabels(), false } case DropEqual: if lset.Get(cfg.TargetLabel) == val { - return nil + return labels.EmptyLabels(), false } case KeepEqual: if lset.Get(cfg.TargetLabel) != val { - return nil + return labels.EmptyLabels(), false } case Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) @@ -271,29 +271,29 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) case LabelMap: - for _, l := range lset { + lset.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement) lb.Set(res, l.Value) } - } + }) case LabelDrop: - for _, l := range lset { + lset.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) case LabelKeep: - for _, l := range lset { + lset.Range(func(l labels.Label) { if !cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) default: panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action)) } - return lb.Labels(lset) + return lb.Labels(lset), true } // sum64 sums the md5 hash to an uint64. diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index f1d5f39257e..30b3face0de 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -143,22 +143,24 @@ type RuleGroup struct { // Rule describes an alerting or recording rule. type Rule struct { - Record string `yaml:"record,omitempty"` - Alert string `yaml:"alert,omitempty"` - Expr string `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record string `yaml:"record,omitempty"` + Alert string `yaml:"alert,omitempty"` + Expr string `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules. type RuleNode struct { - Record yaml.Node `yaml:"record,omitempty"` - Alert yaml.Node `yaml:"alert,omitempty"` - Expr yaml.Node `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record yaml.Node `yaml:"record,omitempty"` + Alert yaml.Node `yaml:"alert,omitempty"` + Expr yaml.Node `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // Validate the rule and return a list of encountered errors. @@ -208,6 +210,12 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { node: &r.Record, }) } + if r.KeepFiringFor != 0 { + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), + node: &r.Record, + }) + } if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { nodes = append(nodes, WrappedError{ err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 932a3d96db1..15a95a95924 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "math" - "sort" "strings" "unicode/utf8" @@ -82,6 +81,7 @@ func (l *openMetricsLexer) Error(es string) { // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit type OpenMetricsParser struct { l *openMetricsLexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -113,8 +113,8 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } -// Histogram always returns (nil, nil, nil, nil) because OpenMetrics does not support -// sparse histograms. +// Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not +// support sparse histograms yet. func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { return nil, nil, nil, nil } @@ -158,14 +158,11 @@ func (p *OpenMetricsParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *OpenMetricsParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -173,16 +170,16 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } @@ -204,17 +201,18 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { e.Ts = p.exemplarTs } + p.builder.Reset() for i := 0; i < len(p.eOffsets); i += 4 { a := p.eOffsets[i] - p.start b := p.eOffsets[i+1] - p.start c := p.eOffsets[i+2] - p.start d := p.eOffsets[i+3] - p.start - e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], s[c:d]) } - // Sort the labels. - sort.Sort(e.Labels) + p.builder.Sort() + e.Labels = p.builder.Labels() return true } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index a3bb8bb9bfd..b0c963392d2 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -21,7 +21,6 @@ import ( "fmt" "io" "math" - "sort" "strconv" "strings" "unicode/utf8" @@ -144,6 +143,7 @@ func (l *promlexer) Error(es string) { // Prometheus text exposition format. type PromParser struct { l *promlexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -168,8 +168,8 @@ func (p *PromParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } -// Histogram always returns (nil, nil, nil, nil) because the Prometheus text format -// does not support sparse histograms. +// Histogram returns (nil, nil, nil, nil) for now because the Prometheus text +// format does not support sparse histograms yet. func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { return nil, nil, nil, nil } @@ -212,14 +212,11 @@ func (p *PromParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *PromParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -227,16 +224,16 @@ func (p *PromParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels to maintain the sorted labels invariant. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } @@ -343,7 +340,7 @@ func (p *PromParser) Next() (Entry, error) { t2 = p.nextToken() } if t2 != tValue { - return EntryInvalid, parseError("expected value after metric", t) + return EntryInvalid, parseError("expected value after metric", t2) } if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { return EntryInvalid, err @@ -353,7 +350,7 @@ func (p *PromParser) Next() (Entry, error) { p.val = math.Float64frombits(value.NormalNaN) } p.hasTS = false - switch p.nextToken() { + switch t := p.nextToken(); t { case tLinebreak: break case tTimestamp: @@ -362,7 +359,7 @@ func (p *PromParser) Next() (Entry, error) { return EntryInvalid, err } if t2 := p.nextToken(); t2 != tLinebreak { - return EntryInvalid, parseError("expected next entry after timestamp", t) + return EntryInvalid, parseError("expected next entry after timestamp", t2) } default: return EntryInvalid, parseError("expected timestamp or new record", t) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index a9c940879eb..eca145955e8 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -19,7 +19,6 @@ import ( "fmt" "io" "math" - "sort" "strings" "unicode/utf8" @@ -59,6 +58,8 @@ type ProtobufParser struct { // that we have to decode the next MetricFamily. state Entry + builder labels.ScratchBuilder // held here to reduce allocations when building Labels + mf *dto.MetricFamily // The following are just shenanigans to satisfy the Parser interface. @@ -104,7 +105,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { default: v = s.GetQuantile()[p.fieldPos].GetValue() } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // This should only happen for a legacy histogram. h := m.GetHistogram() switch p.fieldPos { @@ -169,6 +170,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his fh.NegativeSpans[i].Offset = span.GetOffset() fh.NegativeSpans[i].Length = span.GetLength() } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + fh.CounterResetHint = histogram.GaugeType + } fh.Compact(0) if ts != 0 { return p.metricBytes.Bytes(), &ts, nil, &fh @@ -198,6 +202,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his sh.NegativeSpans[i].Offset = span.GetOffset() sh.NegativeSpans[i].Length = span.GetLength() } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + sh.CounterResetHint = histogram.GaugeType + } sh.Compact(0) if ts != 0 { return p.metricBytes.Bytes(), &ts, &sh, nil @@ -224,6 +231,8 @@ func (p *ProtobufParser) Type() ([]byte, MetricType) { return n, MetricTypeGauge case dto.MetricType_HISTOGRAM: return n, MetricTypeHistogram + case dto.MetricType_GAUGE_HISTOGRAM: + return n, MetricTypeGaugeHistogram case dto.MetricType_SUMMARY: return n, MetricTypeSummary } @@ -245,23 +254,19 @@ func (p *ProtobufParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *ProtobufParser) Metric(l *labels.Labels) string { - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: p.getMagicName(), - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - *l = append(*l, labels.Label{ - Name: lp.GetName(), - Value: lp.GetValue(), - }) + p.builder.Add(lp.GetName(), lp.GetValue()) } if needed, name, value := p.getMagicLabel(); needed { - *l = append(*l, labels.Label{Name: name, Value: value}) + p.builder.Add(name, value) } // Sort labels to maintain the sorted labels invariant. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return p.metricBytes.String() } @@ -276,7 +281,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { switch p.mf.GetType() { case dto.MetricType_COUNTER: exProto = m.GetCounter().GetExemplar() - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: bb := m.GetHistogram().GetBucket() if p.fieldPos < 0 { if p.state == EntrySeries { @@ -305,12 +310,12 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { ex.HasTs = true ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) } + p.builder.Reset() for _, lp := range exProto.GetLabel() { - ex.Labels = append(ex.Labels, labels.Label{ - Name: lp.GetName(), - Value: lp.GetValue(), - }) + p.builder.Add(lp.GetName(), lp.GetValue()) } + p.builder.Sort() + ex.Labels = p.builder.Labels() return true } @@ -334,7 +339,7 @@ func (p *ProtobufParser) Next() (Entry, error) { } // We are at the beginning of a metric family. Put only the name - // into metricBytes and validate only name and help for now. + // into metricBytes and validate only name, help, and type for now. name := p.mf.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { return EntryInvalid, errors.Errorf("invalid metric name: %s", name) @@ -342,6 +347,17 @@ func (p *ProtobufParser) Next() (Entry, error) { if help := p.mf.GetHelp(); !utf8.ValidString(help) { return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) } + switch p.mf.GetType() { + case dto.MetricType_COUNTER, + dto.MetricType_GAUGE, + dto.MetricType_HISTOGRAM, + dto.MetricType_GAUGE_HISTOGRAM, + dto.MetricType_SUMMARY, + dto.MetricType_UNTYPED: + // All good. + default: + return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + } p.metricBytes.Reset() p.metricBytes.WriteString(name) @@ -349,7 +365,8 @@ func (p *ProtobufParser) Next() (Entry, error) { case EntryHelp: p.state = EntryType case EntryType: - if p.mf.GetType() == dto.MetricType_HISTOGRAM && + t := p.mf.GetType() + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { p.state = EntryHistogram } else { @@ -359,8 +376,11 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } case EntryHistogram, EntrySeries: + t := p.mf.GetType() if p.state == EntrySeries && !p.fieldsDone && - (p.mf.GetType() == dto.MetricType_SUMMARY || p.mf.GetType() == dto.MetricType_HISTOGRAM) { + (t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM) { p.fieldPos++ } else { p.metricPos++ @@ -421,7 +441,7 @@ func (p *ProtobufParser) getMagicName() string { if p.fieldPos == -1 { return p.mf.GetName() + "_sum" } - if t == dto.MetricType_HISTOGRAM { + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { return p.mf.GetName() + "_bucket" } return p.mf.GetName() @@ -439,7 +459,7 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) { q := qq[p.fieldPos] p.fieldsDone = p.fieldPos == len(qq)-1 return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() if p.fieldPos >= len(bb) { p.fieldsDone = true diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index fd89a029c7d..79697d07966 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -353,11 +353,11 @@ func (n *Manager) Send(alerts ...*Alert) { for _, a := range alerts { lb := labels.NewBuilder(a.Labels) - for _, l := range n.opts.ExternalLabels { + n.opts.ExternalLabels.Range(func(l labels.Label) { if a.Labels.Get(l.Name) == "" { lb.Set(l.Name, l.Value) } - } + }) a.Labels = lb.Labels(a.Labels) } @@ -394,8 +394,8 @@ func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { var relabeledAlerts []*Alert for _, alert := range alerts { - labels := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) - if labels != nil { + labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) + if keep { alert.Labels = labels relabeledAlerts = append(relabeledAlerts, alert) } @@ -570,9 +570,9 @@ func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { apiLabelSet := models.LabelSet{} - for _, label := range modelLabelSet { + modelLabelSet.Range(func(label labels.Label) { apiLabelSet[label.Name] = label.Value - } + }) return apiLabelSet } @@ -719,9 +719,9 @@ func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig } } - lset := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) - if lset == nil { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{lbls}) + lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) + if !keep { + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)}) continue } diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index a2b43848932..ddfb26b13fd 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -653,12 +653,13 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval query.sampleStats.InitStepTracking(start, start, 1) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() - var mat Matrix switch result := val.(type) { @@ -704,10 +705,12 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() mat, ok := val.(Matrix) if !ok { @@ -1029,6 +1032,14 @@ type EvalNodeHelper struct { resultMetric map[string]labels.Labels } +func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { + if enh.lb == nil { + enh.lb = labels.NewBuilder(lbls) + } else { + enh.lb.Reset(lbls) + } +} + // DropMetricName is a cached version of DropMetricName. func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { if enh.Dmn == nil { @@ -1390,10 +1401,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) + var chkIter chunkenc.Iterator for i, s := range selVS.Series { ev.currentSamples -= len(points) points = points[:0] - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) metric := selVS.Series[i].Labels() // The last_over_time function acts like offset; thus, it // should keep the metric name. For all the other range @@ -1562,7 +1575,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.NumberLiteral: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil + return append(enh.Out, Sample{Point: Point{V: e.Val}, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: @@ -1575,8 +1588,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } mat := make(Matrix, 0, len(e.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range e.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: e.Series[i].Labels(), Points: getPointSlice(numSteps), @@ -1720,8 +1735,10 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect } vec := make(Vector, 0, len(node.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range node.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) t, v, h, ok := ev.vectorSelectorSingle(it, node, ts) if ok { @@ -1809,12 +1826,14 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } + var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { if err := contextDone(ev.ctx, "expression evaluation"); err != nil { ev.error(err) } - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: series[i].Labels(), } @@ -2141,12 +2160,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V enh.resultMetric = make(map[string]labels.Labels, len(enh.Out)) } - if enh.lb == nil { - enh.lb = labels.NewBuilder(lhs) - } else { - enh.lb.Reset(lhs) - } - + enh.resetBuilder(lhs) buf := bytes.NewBuffer(enh.lblResultBuf[:0]) enh.lblBuf = lhs.Bytes(enh.lblBuf) buf.Write(enh.lblBuf) @@ -2179,7 +2193,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels(nil) + ret := enh.lb.Labels(labels.EmptyLabels()) enh.resultMetric[str] = ret return ret } @@ -2219,7 +2233,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels(nil) + return labels.NewBuilder(l).Del(labels.MetricName).Labels(labels.EmptyLabels()) } // scalarBinop evaluates a binary operation between two Scalars. @@ -2346,15 +2360,14 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } } - lb := labels.NewBuilder(nil) var buf []byte for si, s := range vec { metric := s.Metric if op == parser.COUNT_VALUES { - lb.Reset(metric) - lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = lb.Labels(nil) + enh.resetBuilder(metric) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) + metric = enh.lb.Labels(labels.EmptyLabels()) // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2371,14 +2384,18 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without group, ok := result[groupingKey] // Add a new group if it doesn't exist. if !ok { - lb.Reset(metric) + var m labels.Labels + enh.resetBuilder(metric) if without { - lb.Del(grouping...) - lb.Del(labels.MetricName) + enh.lb.Del(grouping...) + enh.lb.Del(labels.MetricName) + m = enh.lb.Labels(labels.EmptyLabels()) + } else if len(grouping) > 0 { + enh.lb.Keep(grouping...) + m = enh.lb.Labels(labels.EmptyLabels()) } else { - lb.Keep(grouping...) + m = labels.EmptyLabels() } - m := lb.Labels(nil) newAgg := &groupedAggregation{ labels: m, value: s.V, diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index d481cb7358b..c5922002b0e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -957,7 +957,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if !ok { sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). - Labels(nil) + Labels(labels.EmptyLabels()) mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb @@ -1077,7 +1077,7 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels(labels.EmptyLabels()) enh.Dmn[h] = outMetric } } @@ -1145,7 +1145,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels(labels.EmptyLabels()) enh.Dmn[h] = outMetric } @@ -1383,7 +1383,7 @@ func (s *vectorByReverseValueHeap) Pop() interface{} { // createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched // in a given expression. It is used in the absent functions. func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { - m := labels.Labels{} + b := labels.NewBuilder(labels.EmptyLabels()) var lm []*labels.Matcher switch n := expr.(type) { @@ -1392,25 +1392,26 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { case *parser.MatrixSelector: lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers default: - return m + return labels.EmptyLabels() } - empty := []string{} + // The 'has' map implements backwards-compatibility for historic behaviour: + // e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output. + // Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`. + has := make(map[string]bool, len(lm)) for _, ma := range lm { if ma.Name == labels.MetricName { continue } - if ma.Type == labels.MatchEqual && !m.Has(ma.Name) { - m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels(nil) + if ma.Type == labels.MatchEqual && !has[ma.Name] { + b.Set(ma.Name, ma.Value) + has[ma.Name] = true } else { - empty = append(empty, ma.Name) + b.Del(ma.Name) } } - for _, v := range empty { - m = labels.NewBuilder(m).Del(v).Labels(nil) - } - return m + return b.Labels(labels.EmptyLabels()) } func stringFromArg(e parser.Expr) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 433f45259c9..461e854ac1a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -16,13 +16,13 @@ package parser import ( "math" - "sort" "strconv" "time" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) + %} %union { @@ -32,6 +32,7 @@ import ( matcher *labels.Matcher label labels.Label labels labels.Labels + lblList []labels.Label strings []string series []SequenceValue uint uint64 @@ -138,10 +139,9 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher - %type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors - -%type label_set label_set_list metric +%type label_set metric +%type label_set_list %type